From 7108df0d2cb323d9ae2f0df59edacccdb6f3d62a Mon Sep 17 00:00:00 2001 From: chenjianping Date: Sun, 7 Mar 2021 17:05:31 +0800 Subject: [PATCH] IR Unify --- mindspore/lite/CMakeLists.txt | 1 + mindspore/lite/include/model.h | 6 +- mindspore/lite/include/version.h | 2 +- mindspore/lite/micro/CMakeLists.txt | 12 +- mindspore/lite/micro/cmake/file_list.cmake | 200 ++- .../lite/micro/cmake/package_cmsis.cmake | 21 + .../lite/micro/cmake/package_micro_ops.cmake | 32 - .../lite/micro/cmake/package_nnacl.cmake | 20 + .../lite/micro/cmake/package_wrapper.cmake | 25 + mindspore/lite/micro/cmake/wrapper.cmake | 12 - mindspore/lite/micro/coder/CMakeLists.txt | 23 +- .../lite/micro/coder/allocator/allocator.cc | 8 +- .../lite/micro/coder/allocator/allocator.h | 6 +- mindspore/lite/micro/coder/coder.cc | 19 +- mindspore/lite/micro/coder/coder_config.h | 12 +- mindspore/lite/micro/coder/context.cc | 6 +- .../component/benchmark_component.cc | 16 +- .../generator/component/benchmark_component.h | 2 - .../generator/component/cmake_component.cc | 9 +- .../generator/component/common_component.cc | 10 +- .../component/const_blocks/cmake_lists.h | 10 +- .../component/const_blocks/debug_utils.h | 12 +- .../component/const_blocks/license.h | 8 +- .../component/const_blocks/load_input.h | 12 +- .../component/const_blocks/micro_tensor.h | 24 +- .../component/const_blocks/thread_pool.h | 99 ++ .../generator/component/parallel_component.cc | 61 + .../generator/component/parallel_component.h | 35 + .../generator/component/weight_component.cc | 16 +- .../lite/micro/coder/generator/generator.cc | 20 +- .../inference/inference_generator.cc | 19 +- .../generator/inference/inference_generator.h | 2 +- .../coder/generator/train/train_generator.cc | 2 +- .../coder/generator/train/train_generator.h | 2 +- mindspore/lite/micro/coder/graph.cc | 10 +- .../coder/opcoders/base/conv2d_base_coder.cc | 45 +- .../coder/opcoders/base/conv2d_base_coder.h | 22 +- .../base/detection_post_process_base_coder.cc | 153 ++ .../base/detection_post_process_base_coder.h | 54 + .../coder/opcoders/base/dtype_cast_coder.cc | 2 +- .../coder/opcoders/base/dtype_cast_coder.h | 2 +- .../base/full_connection_base_coder.cc | 8 +- .../base/full_connection_base_coder.h | 5 +- .../opcoders/base/quant_dtype_cast_coder.cc | 81 +- .../opcoders/base/quant_dtype_cast_coder.h | 8 +- .../coder/opcoders/base/reduce_base_coder.cc | 6 +- .../coder/opcoders/base/reduce_base_coder.h | 8 +- .../coder/opcoders/base/resize_base_coder.cc | 104 ++ .../coder/opcoders/base/resize_base_coder.h | 49 + .../coder/opcoders/base/softmax_base_coder.cc | 2 +- .../coder/opcoders/base/softmax_base_coder.h | 4 +- .../opcoders/cmsis-nn/int8/add_int8_coder.cc | 6 +- .../cmsis-nn/int8/conv2d_int8_coder.cc | 36 +- .../cmsis-nn/int8/dwconv_int8_coder.cc | 4 - .../cmsis-nn/int8/fullconnection_int8_coder.h | 4 +- .../opcoders/cmsis-nn/int8/mul_int8_coder.cc | 4 +- .../cmsis-nn/int8/pooling_int8_coder.cc | 12 +- .../cmsis-nn/int8/softmax_int8_coder.cc | 4 +- .../coder/opcoders/nnacl/dequant/de_quant.cc | 143 ++ .../coder/opcoders/nnacl/dequant/de_quant.h | 63 + .../nnacl/fp32/activation_fp32_coder.cc | 10 +- .../opcoders/nnacl/fp32/addn_fp32_coder.cc | 15 +- .../nnacl/fp32/arithmetic_fp32_coder.cc | 35 +- .../nnacl/fp32/arithmetic_fp32_coder.h | 8 +- .../nnacl/fp32/arithmetic_self_fp32_coder.h | 8 +- .../nnacl/fp32/assign_add_fp32_coder.cc | 4 +- .../nnacl/fp32/batchnorm_fp32_coder.cc | 12 +- .../nnacl/fp32/batchnorm_fp32_coder.h | 6 + .../opcoders/nnacl/fp32/biasadd_fp32_coder.cc | 77 + .../opcoders/nnacl/fp32/biasadd_fp32_coder.h | 43 + .../fp32/convolution_depthwise_fp32_coder.cc | 11 +- .../fp32/convolution_depthwise_fp32_coder.h | 2 +- .../nnacl/fp32/convolution_fp32_coder.cc | 123 +- .../nnacl/fp32/convolution_fp32_coder.h | 14 +- .../fp32/convolution_winograd_fp32_coder.cc | 17 +- .../fp32/convolution_winograd_fp32_coder.h | 2 +- .../nnacl/fp32/matmul_fp32_base_coder.cc | 76 +- .../nnacl/fp32/matmul_fp32_base_coder.h | 1 + .../opcoders/nnacl/fp32/matmul_fp32_coder.cc | 7 +- .../opcoders/nnacl/fp32/pad_fp32_coder.cc | 12 +- .../opcoders/nnacl/fp32/pooling_fp32_coder.cc | 24 +- .../opcoders/nnacl/fp32/pooling_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/power_fp32_coder.cc | 4 +- .../opcoders/nnacl/fp32/reduce_fp32_coder.cc | 4 +- .../opcoders/nnacl/fp32/scale_fp32_coder.cc | 43 +- .../opcoders/nnacl/fp32/slice_fp32_coder.cc | 74 - .../opcoders/nnacl/fp32/slice_fp32_coder.h | 37 - .../opcoders/nnacl/fp32/softmax_fp32_coder.cc | 6 +- .../opcoders/nnacl/fp32/softmax_fp32_coder.h | 2 +- .../opcoders/nnacl/fp32/splice_fp32_coder.cc | 57 + .../opcoders/nnacl/fp32/splice_fp32_coder.h | 35 + .../opcoders/nnacl/fp32/tile_fp32_coder.cc | 4 +- .../nnacl/fp32/transpose_fp32_coder.cc | 10 +- .../nnacl/fp32/transpose_fp32_coder.h | 16 +- .../nnacl/int8/activation_int8_coder.cc | 74 + .../opcoders/nnacl/int8/add_int8_coder.cc | 64 +- .../opcoders/nnacl/int8/add_int8_coder.h | 12 +- .../nnacl/int8/batchnorm_int8_coder.cc | 162 ++ .../nnacl/int8/batchnorm_int8_coder.h | 49 + .../opcoders/nnacl/int8/concat_int8_coder.cc | 28 +- .../opcoders/nnacl/int8/concat_int8_coder.h | 2 +- .../nnacl/int8/conv2d_1x1_int8_coder.cc | 20 +- .../nnacl/int8/conv2d_1x1_int8_coder.h | 2 +- .../nnacl/int8/conv2d_3x3_int8_coder.cc | 20 +- .../nnacl/int8/conv2d_3x3_int8_coder.h | 7 +- .../opcoders/nnacl/int8/conv2d_int8_coder.cc | 92 +- .../opcoders/nnacl/int8/conv2d_int8_coder.h | 15 +- .../int8/convolution_depthwise_int8_coder.cc | 110 ++ .../int8/convolution_depthwise_int8_coder.h | 52 + .../nnacl/int8/deconvolution_int8_coder.cc | 15 +- .../nnacl/int8/deconvolution_int8_coder.h | 2 +- .../int8/detection_post_process_int8_coder.cc | 56 + .../int8/detection_post_process_int8_coder.h | 39 + .../opcoders/nnacl/int8/div_int8_coder.cc | 78 + .../opcoders/nnacl/int8/div_int8_coder.h | 49 + .../nnacl/int8/fullconnection_int8_coder.cc | 11 +- .../opcoders/nnacl/int8/pooling_int8_coder.cc | 29 +- .../opcoders/nnacl/int8/reduce_int8_coder.cc | 4 +- .../opcoders/nnacl/int8/reduce_int8_coder.h | 3 +- .../opcoders/nnacl/int8/relux_int8_coder.cc | 57 + .../opcoders/nnacl/int8/relux_int8_coder.h | 81 + .../opcoders/nnacl/int8/reshape_int8_coder.cc | 7 +- .../opcoders/nnacl/int8/resize_int8_coder.cc | 109 ++ .../opcoders/nnacl/int8/resize_int8_coder.h | 60 + .../opcoders/nnacl/int8/sigmoid_int8_coder.cc | 74 + .../opcoders/nnacl/int8/sigmoid_int8_coder.h | 43 + .../opcoders/nnacl/int8/softmax_int8_coder.cc | 28 +- .../opcoders/nnacl/int8/sub_int8_coder.cc | 105 ++ .../opcoders/nnacl/int8/sub_int8_coder.h | 49 + .../lite/micro/coder/opcoders/op_coder.cc | 13 +- .../lite/micro/coder/opcoders/op_coder.h | 15 +- .../micro/coder/opcoders/op_coder_builder.cc | 31 +- .../micro/coder/opcoders/op_coder_builder.h | 10 +- .../micro/coder/opcoders/op_coder_register.h | 5 +- .../lite/micro/coder/opcoders/parallel.cc | 33 + .../lite/micro/coder/opcoders/parallel.h | 41 + .../nnacl_serializer/nnacl_fp32_serializer.cc | 16 +- .../nnacl_serializer/nnacl_fp32_serializer.h | 8 +- .../nnacl_serializer/nnacl_int8_serializer.cc | 80 +- .../nnacl_serializer/nnacl_int8_serializer.h | 13 +- .../nnacl_serializer/nnacl_stream_utils.cc | 90 + .../nnacl_serializer/nnacl_stream_utils.h | 58 +- .../coder/opcoders/serializers/serializer.h | 8 +- .../coder/operator_library/CMakeLists.txt | 49 + .../detection_post_process_base_wrapper.c | 71 + .../detection_post_process_base_wrapper.h | 24 + .../fp32/dequant_int8_to_fp32_wrapper.c | 69 + .../fp32/dequant_int8_to_fp32_wrapper.h | 48 + .../wrapper/fp32/matmul_fp32_wrapper.c | 0 .../wrapper/fp32/matmul_fp32_wrapper.h | 33 + .../wrapper/int8/add_int8_wrapper.c | 69 + .../wrapper/int8/add_int8_wrapper.h | 49 + .../wrapper/int8/batchnorm_int8_wrapper.c | 25 + .../wrapper/int8/batchnorm_int8_wrapper.h | 32 + .../wrapper/int8/concat_int8_wrapper.c | 27 + .../wrapper/int8/concat_int8_wrapper.h | 35 + .../wrapper/int8/conv1x1_init_int8_wrapper.c | 90 + .../wrapper/int8/conv1x1_init_int8_wrapper.h | 28 + .../wrapper/int8/conv1x1_run_int8_wrapper.c | 225 +++ .../wrapper/int8/conv1x1_run_int8_wrapper.h | 48 + .../wrapper/int8/conv_init_int8_wrapper.c | 88 + .../wrapper/int8/conv_init_int8_wrapper.h | 26 + .../int8/convolution_depthwise_int8_wrapper.c | 25 + .../int8/convolution_depthwise_int8_wrapper.h | 35 + .../wrapper/int8/convolution_int8_wrapper.c | 43 + .../wrapper/int8/convolution_int8_wrapper.h | 43 + .../wrapper/int8/matmul_int8_wrapper.c | 0 .../wrapper/int8/matmul_int8_wrapper.h | 35 + .../wrapper/int8/resize_int8_wrapper.c | 25 + .../wrapper/int8/resize_int8_wrapper.h | 41 + .../wrapper/int8/slice_int8_wrapper.c | 24 + .../wrapper/int8/slice_int8_wrapper.h | 31 + mindspore/lite/micro/coder/session.cc | 53 +- mindspore/lite/micro/coder/train.cc | 23 +- .../lite/micro/coder/utils/coder_utils.cc | 5 +- mindspore/lite/micro/coder/utils/type_cast.cc | 22 +- mindspore/lite/micro/coder/utils/type_cast.h | 3 + .../micro/example/micro_speech/Softmax-3.out | 8 +- .../example/mobilenetv2_quant/1_224_224_3.bin | Bin 602112 -> 0 bytes .../example/mobilenetv2_quant/Reshape-64.out | 96 +- .../input_1_224_224_3_uint8.bin | Bin 0 -> 150528 bytes mindspore/lite/micro/test/CMakeLists.txt | 13 +- .../micro/wrapper/fp32/matmul_fp32_wrapper.h | 33 - .../micro/wrapper/int8/add_int8_wrapper.c | 69 - .../micro/wrapper/int8/add_int8_wrapper.h | 50 - .../wrapper/int8/conv1x1_init_int8_wrapper.c | 90 - .../wrapper/int8/conv1x1_init_int8_wrapper.h | 28 - .../wrapper/int8/conv1x1_run_int8_wrapper.c | 224 --- .../wrapper/int8/conv1x1_run_int8_wrapper.h | 49 - .../wrapper/int8/conv_init_int8_wrapper.c | 88 - .../wrapper/int8/conv_init_int8_wrapper.h | 26 - .../micro/wrapper/int8/matmul_int8_wrapper.h | 35 - mindspore/lite/nnacl/CMakeLists.txt | 1 + mindspore/lite/nnacl/arithmetic.h | 1 + mindspore/lite/nnacl/base/tile_base.h | 2 + mindspore/lite/nnacl/concat_parameter.h | 2 +- mindspore/lite/nnacl/conv_parameter.h | 5 +- mindspore/lite/nnacl/crop_parameter.h | 2 +- mindspore/lite/nnacl/errorcode.h | 2 + mindspore/lite/nnacl/fp16/activation_fp16.h | 2 +- mindspore/lite/nnacl/fp16/lstm_fp16.c | 43 +- mindspore/lite/nnacl/fp16/lstm_fp16.h | 2 +- .../lite/nnacl/fp16_grad/activation_grad.c | 17 - .../lite/nnacl/fp16_grad/activation_grad.h | 3 +- .../nnacl/fp16_grad/arithmetic_self_grad.c | 37 + .../nnacl/fp16_grad/arithmetic_self_grad.h | 39 + mindspore/lite/nnacl/fp32/activation_fp32.h | 2 +- mindspore/lite/nnacl/fp32/layer_norm_fp32.c | 10 +- mindspore/lite/nnacl/fp32/lstm_fp32.c | 45 +- mindspore/lite/nnacl/fp32/lstm_fp32.h | 2 +- mindspore/lite/nnacl/fp32/pooling_fp32.h | 2 +- mindspore/lite/nnacl/fp32/splice_fp32.c | 30 + mindspore/lite/nnacl/fp32/splice_fp32.h | 31 + mindspore/lite/nnacl/fp32_grad/batch_norm.h | 1 - mindspore/lite/nnacl/fp32_grad/softmax_grad.h | 2 +- mindspore/lite/nnacl/gather_parameter.h | 2 +- mindspore/lite/nnacl/infer/adam_infer.c | 42 + mindspore/lite/nnacl/infer/adam_infer.h | 31 + .../lite/nnacl/infer/add_sub_grad_infer.c | 63 + .../lite/nnacl/infer/add_sub_grad_infer.h | 31 + mindspore/lite/nnacl/infer/addn_infer.c | 70 + mindspore/lite/nnacl/infer/addn_infer.h | 31 + .../lite/nnacl/infer/apply_momentum_infer.c | 45 + .../lite/nnacl/infer/apply_momentum_infer.h | 31 + mindspore/lite/nnacl/infer/argmin_max_infer.c | 70 + mindspore/lite/nnacl/infer/argmin_max_infer.h | 32 + .../nnacl/infer/arithmetic_compare_infer.c | 28 + .../nnacl/infer/arithmetic_compare_infer.h | 31 + .../lite/nnacl/infer/arithmetic_grad_infer.c | 97 + .../lite/nnacl/infer/arithmetic_grad_infer.h | 45 + mindspore/lite/nnacl/infer/arithmetic_infer.c | 117 ++ mindspore/lite/nnacl/infer/arithmetic_infer.h | 32 + mindspore/lite/nnacl/infer/assert_op_infer.c | 22 + mindspore/lite/nnacl/infer/assert_op_infer.h | 31 + mindspore/lite/nnacl/infer/assign_add_infer.c | 34 + mindspore/lite/nnacl/infer/assign_add_infer.h | 31 + mindspore/lite/nnacl/infer/assign_infer.c | 37 + mindspore/lite/nnacl/infer/assign_infer.h | 31 + .../nnacl/infer/audio_spectrogram_infer.c | 71 + .../nnacl/infer/audio_spectrogram_infer.h | 37 + .../lite/nnacl/infer/batch_to_space_infer.c | 137 ++ .../lite/nnacl/infer/batch_to_space_infer.h | 32 + mindspore/lite/nnacl/infer/bias_grad_infer.c | 39 + mindspore/lite/nnacl/infer/bias_grad_infer.h | 31 + .../nnacl/infer/binary_cross_entropy_infer.c | 33 + .../nnacl/infer/binary_cross_entropy_infer.h | 32 + mindspore/lite/nnacl/infer/bn_grad_infer.c | 38 + mindspore/lite/nnacl/infer/bn_grad_infer.h | 31 + .../lite/nnacl/infer/broadcast_to_infer.c | 68 + .../lite/nnacl/infer/broadcast_to_infer.h | 32 + mindspore/lite/nnacl/infer/cast_infer.c | 41 + mindspore/lite/nnacl/infer/cast_infer.h | 31 + mindspore/lite/nnacl/infer/common_infer.c | 455 +++++ mindspore/lite/nnacl/infer/common_infer.h | 209 +++ mindspore/lite/nnacl/infer/concat_infer.c | 73 + mindspore/lite/nnacl/infer/concat_infer.h | 32 + .../nnacl/infer/constant_of_shape_infer.c | 64 + .../nnacl/infer/constant_of_shape_infer.h | 32 + .../nnacl/infer/conv2d_grad_filter_infer.c | 35 + .../nnacl/infer/conv2d_grad_filter_infer.h | 32 + .../nnacl/infer/conv2d_grad_input_infer.c | 41 + .../nnacl/infer/conv2d_grad_input_infer.h | 32 + mindspore/lite/nnacl/infer/conv2d_infer.c | 99 ++ mindspore/lite/nnacl/infer/conv2d_infer.h | 32 + .../lite/nnacl/infer/crop_and_resize_infer.c | 56 + .../lite/nnacl/infer/crop_and_resize_infer.h | 31 + mindspore/lite/nnacl/infer/crop_infer.c | 31 + mindspore/lite/nnacl/infer/crop_infer.h | 32 + .../infer/custom_extract_features_infer.c | 46 + .../infer/custom_extract_features_infer.h | 31 + .../lite/nnacl/infer/custom_normalize_infer.c | 39 + .../lite/nnacl/infer/custom_normalize_infer.h | 32 + .../lite/nnacl/infer/custom_predict_infer.c | 39 + .../lite/nnacl/infer/custom_predict_infer.h | 36 + mindspore/lite/nnacl/infer/deconv2d_infer.c | 97 + mindspore/lite/nnacl/infer/deconv2d_infer.h | 32 + .../nnacl/infer/dedepthwise_conv2d_infer.c | 57 + .../nnacl/infer/dedepthwise_conv2d_infer.h | 32 + .../lite/nnacl/infer/depth_to_space_infer.c | 54 + .../lite/nnacl/infer/depth_to_space_infer.h | 32 + .../lite/nnacl/infer/depthwise_conv2d_infer.c | 71 + .../lite/nnacl/infer/depthwise_conv2d_infer.h | 32 + .../infer/detection_post_process_infer.c | 76 + .../infer/detection_post_process_infer.h | 32 + .../lite/nnacl/infer/dropout_grad_infer.c | 33 + .../lite/nnacl/infer/dropout_grad_infer.h | 31 + mindspore/lite/nnacl/infer/dropout_infer.c | 39 + mindspore/lite/nnacl/infer/dropout_infer.h | 31 + .../lite/nnacl/infer/embedding_lookup_infer.c | 58 + .../lite/nnacl/infer/embedding_lookup_infer.h | 31 + .../lite/nnacl/infer/expand_dims_infer.c | 44 + .../lite/nnacl/infer/expand_dims_infer.h | 31 + mindspore/lite/nnacl/infer/fft_imag_infer.c | 22 + mindspore/lite/nnacl/infer/fft_imag_infer.h | 31 + mindspore/lite/nnacl/infer/fft_real_infer.c | 22 + mindspore/lite/nnacl/infer/fft_real_infer.h | 31 + mindspore/lite/nnacl/infer/fill_infer.c | 45 + mindspore/lite/nnacl/infer/fill_infer.h | 31 + .../lite/nnacl/infer/flatten_grad_infer.c | 42 + .../lite/nnacl/infer/flatten_grad_infer.h | 31 + mindspore/lite/nnacl/infer/flatten_infer.c | 44 + mindspore/lite/nnacl/infer/flatten_infer.h | 31 + .../lite/nnacl/infer/full_connection_infer.c | 74 + .../lite/nnacl/infer/full_connection_infer.h | 32 + .../lite/nnacl/infer/fused_batchnorm_infer.c | 34 + .../lite/nnacl/infer/fused_batchnorm_infer.h | 31 + mindspore/lite/nnacl/infer/gather_infer.c | 60 + mindspore/lite/nnacl/infer/gather_infer.h | 32 + mindspore/lite/nnacl/infer/gather_nd_infer.c | 49 + mindspore/lite/nnacl/infer/gather_nd_infer.h | 32 + .../infer/group_conv2d_grad_input_infer.c | 38 + .../infer/group_conv2d_grad_input_infer.h | 32 + mindspore/lite/nnacl/infer/gru_infer.c | 82 + mindspore/lite/nnacl/infer/gru_infer.h | 32 + .../lite/nnacl/infer/hashtable_lookup_infer.c | 41 + .../lite/nnacl/infer/hashtable_lookup_infer.h | 31 + .../nnacl/infer/invert_permutation_infer.c | 39 + .../nnacl/infer/invert_permutation_infer.h | 31 + mindspore/lite/nnacl/infer/layer_norm_infer.c | 35 + mindspore/lite/nnacl/infer/layer_norm_infer.h | 32 + mindspore/lite/nnacl/infer/lin_space_infer.c | 40 + mindspore/lite/nnacl/infer/lin_space_infer.h | 31 + .../lite/nnacl/infer/lsh_projection_infer.c | 49 + .../lite/nnacl/infer/lsh_projection_infer.h | 32 + mindspore/lite/nnacl/infer/lstm_infer.c | 62 + mindspore/lite/nnacl/infer/lstm_infer.h | 32 + mindspore/lite/nnacl/infer/matmul_infer.c | 83 + mindspore/lite/nnacl/infer/matmul_infer.h | 32 + .../lite/nnacl/infer/maximum_grad_infer.c | 56 + .../lite/nnacl/infer/maximum_grad_infer.h | 42 + mindspore/lite/nnacl/infer/mean_infer.c | 67 + mindspore/lite/nnacl/infer/mean_infer.h | 32 + mindspore/lite/nnacl/infer/merge_infer.c | 93 + mindspore/lite/nnacl/infer/merge_infer.h | 34 + mindspore/lite/nnacl/infer/mfcc_infer.c | 43 + mindspore/lite/nnacl/infer/mfcc_infer.h | 36 + .../nnacl/infer/non_max_suppression_infer.c | 30 + .../nnacl/infer/non_max_suppression_infer.h | 31 + mindspore/lite/nnacl/infer/one_hot_infer.c | 54 + mindspore/lite/nnacl/infer/one_hot_infer.h | 32 + mindspore/lite/nnacl/infer/pad_infer.c | 58 + mindspore/lite/nnacl/infer/pad_infer.h | 32 + mindspore/lite/nnacl/infer/partial_infer.c | 22 + mindspore/lite/nnacl/infer/partial_infer.h | 32 + .../lite/nnacl/infer/pooling_grad_infer.c | 59 + .../lite/nnacl/infer/pooling_grad_infer.h | 32 + mindspore/lite/nnacl/infer/pooling_infer.c | 80 + mindspore/lite/nnacl/infer/pooling_infer.h | 32 + mindspore/lite/nnacl/infer/power_infer.c | 51 + mindspore/lite/nnacl/infer/power_infer.h | 32 + mindspore/lite/nnacl/infer/prior_box_infer.c | 74 + mindspore/lite/nnacl/infer/prior_box_infer.h | 32 + .../lite/nnacl/infer/quant_dtype_cast_infer.c | 39 + .../lite/nnacl/infer/quant_dtype_cast_infer.h | 37 + .../infer/random_standard_normal_infer.c | 43 + .../infer/random_standard_normal_infer.h | 31 + mindspore/lite/nnacl/infer/range_infer.c | 74 + mindspore/lite/nnacl/infer/range_infer.h | 32 + mindspore/lite/nnacl/infer/rank_infer.c | 34 + mindspore/lite/nnacl/infer/rank_infer.h | 31 + mindspore/lite/nnacl/infer/reduce_infer.c | 101 ++ mindspore/lite/nnacl/infer/reduce_infer.h | 32 + mindspore/lite/nnacl/infer/reshape_infer.c | 176 ++ mindspore/lite/nnacl/infer/reshape_infer.h | 32 + mindspore/lite/nnacl/infer/resize_infer.c | 137 ++ mindspore/lite/nnacl/infer/resize_infer.h | 32 + mindspore/lite/nnacl/infer/rfft_infer.c | 36 + mindspore/lite/nnacl/infer/rfft_infer.h | 36 + .../lite/nnacl/infer/roi_pooling_infer.c | 43 + .../lite/nnacl/infer/roi_pooling_infer.h | 32 + mindspore/lite/nnacl/infer/scatter_nd_infer.c | 40 + mindspore/lite/nnacl/infer/scatter_nd_infer.h | 32 + mindspore/lite/nnacl/infer/select_infer.c | 53 + mindspore/lite/nnacl/infer/select_infer.h | 31 + mindspore/lite/nnacl/infer/sgd_infer.c | 38 + mindspore/lite/nnacl/infer/sgd_infer.h | 31 + mindspore/lite/nnacl/infer/shape_infer.c | 37 + mindspore/lite/nnacl/infer/shape_infer.h | 31 + mindspore/lite/nnacl/infer/size_infer.c | 37 + mindspore/lite/nnacl/infer/size_infer.h | 31 + mindspore/lite/nnacl/infer/skip_gram_infer.c | 34 + mindspore/lite/nnacl/infer/skip_gram_infer.h | 31 + mindspore/lite/nnacl/infer/slice_infer.c | 81 + mindspore/lite/nnacl/infer/slice_infer.h | 32 + .../nnacl/infer/softmax_cross_entropy_infer.c | 42 + .../nnacl/infer/softmax_cross_entropy_infer.h | 31 + mindspore/lite/nnacl/infer/softmax_infer.c | 38 + mindspore/lite/nnacl/infer/softmax_infer.h | 32 + .../lite/nnacl/infer/space_to_batch_infer.c | 57 + .../lite/nnacl/infer/space_to_batch_infer.h | 32 + .../nnacl/infer/space_to_batch_nd_infer.c | 132 ++ .../nnacl/infer/space_to_batch_nd_infer.h | 32 + .../lite/nnacl/infer/space_to_depth_infer.c | 56 + .../lite/nnacl/infer/space_to_depth_infer.h | 32 + .../sparse_softmax_cross_entropy_infer.c | 39 + .../sparse_softmax_cross_entropy_infer.h | 37 + .../lite/nnacl/infer/sparse_to_dense_infer.c | 40 + .../lite/nnacl/infer/sparse_to_dense_infer.h | 31 + mindspore/lite/nnacl/infer/splice_infer.c | 58 + mindspore/lite/nnacl/infer/splice_infer.h | 32 + mindspore/lite/nnacl/infer/split_infer.c | 77 + mindspore/lite/nnacl/infer/split_infer.h | 32 + mindspore/lite/nnacl/infer/squeeze_infer.c | 59 + mindspore/lite/nnacl/infer/squeeze_infer.h | 32 + mindspore/lite/nnacl/infer/stack_infer.c | 57 + mindspore/lite/nnacl/infer/stack_infer.h | 32 + .../nnacl/infer/strided_slice_grad_infer.c | 140 ++ .../nnacl/infer/strided_slice_grad_infer.h | 32 + .../lite/nnacl/infer/strided_slice_infer.c | 328 ++++ .../lite/nnacl/infer/strided_slice_infer.h | 32 + mindspore/lite/nnacl/infer/switch_infer.c | 103 ++ mindspore/lite/nnacl/infer/switch_infer.h | 32 + .../nnacl/infer/tensorlist_fromtensor_infer.c | 75 + .../nnacl/infer/tensorlist_fromtensor_infer.h | 31 + .../nnacl/infer/tensorlist_getitem_infer.c | 80 + .../nnacl/infer/tensorlist_getitem_infer.h | 32 + .../nnacl/infer/tensorlist_reserve_infer.c | 79 + .../nnacl/infer/tensorlist_reserve_infer.h | 31 + .../nnacl/infer/tensorlist_setitem_infer.c | 128 ++ .../nnacl/infer/tensorlist_setitem_infer.h | 31 + .../lite/nnacl/infer/tensorlist_stack_infer.c | 68 + .../lite/nnacl/infer/tensorlist_stack_infer.h | 31 + mindspore/lite/nnacl/infer/tile_infer.c | 109 ++ mindspore/lite/nnacl/infer/tile_infer.h | 32 + mindspore/lite/nnacl/infer/topk_infer.c | 50 + mindspore/lite/nnacl/infer/topk_infer.h | 32 + mindspore/lite/nnacl/infer/transpose_infer.c | 83 + mindspore/lite/nnacl/infer/transpose_infer.h | 32 + .../lite/nnacl/infer/uniform_real_infer.c | 37 + .../lite/nnacl/infer/uniform_real_infer.h | 31 + mindspore/lite/nnacl/infer/unique_infer.c | 39 + mindspore/lite/nnacl/infer/unique_infer.h | 31 + .../nnacl/infer/unsorted_segment_sum_infer.c | 39 + .../nnacl/infer/unsorted_segment_sum_infer.h | 36 + mindspore/lite/nnacl/infer/unsqueeze_infer.c | 63 + mindspore/lite/nnacl/infer/unsqueeze_infer.h | 32 + mindspore/lite/nnacl/infer/unstack_infer.c | 52 + mindspore/lite/nnacl/infer/unstack_infer.h | 32 + mindspore/lite/nnacl/infer/where_infer.c | 75 + mindspore/lite/nnacl/infer/where_infer.h | 31 + mindspore/lite/nnacl/infer/while_infer.c | 30 + mindspore/lite/nnacl/infer/while_infer.h | 31 + mindspore/lite/nnacl/int8/layer_norm_int8.c | 10 +- mindspore/lite/nnacl/int8/reduce_int8.c | 13 +- mindspore/lite/nnacl/int8/splice_int8.c | 30 + mindspore/lite/nnacl/int8/splice_int8.h | 30 + mindspore/lite/nnacl/l2_norm_parameter.h | 2 +- mindspore/lite/nnacl/layer_norm_parameter.h | 4 +- mindspore/lite/nnacl/lstm_parameter.h | 6 +- mindspore/lite/nnacl/matmul_parameter.h | 2 + mindspore/lite/nnacl/nnacl_common.h | 2 +- mindspore/lite/nnacl/op_base.h | 4 +- mindspore/lite/nnacl/slice_parameter.h | 1 + mindspore/lite/nnacl/splice_parameter.h | 28 + mindspore/lite/nnacl/squeeze_parameter.h | 21 +- .../lite/nnacl/strided_slice_parameter.h | 5 + mindspore/lite/nnacl/tensor_c.h | 28 + mindspore/lite/nnacl/tensorlist_parameter.h | 5 +- mindspore/lite/nnacl/transpose.h | 1 + mindspore/lite/nnacl/unsqueeze_parameter.h | 1 + mindspore/lite/schema/model.fbs | 222 +-- mindspore/lite/schema/model_v0.fbs | 6 +- mindspore/lite/schema/ops.fbs | 1557 +++++++---------- mindspore/lite/schema/ops_types.fbs | 141 ++ mindspore/lite/schema/ops_v0.fbs | 43 +- mindspore/lite/src/CMakeLists.txt | 9 + mindspore/lite/src/common/common.h | 2 +- mindspore/lite/src/common/graph_util.cc | 19 +- mindspore/lite/src/common/graph_util.h | 2 +- mindspore/lite/src/common/log_util.h | 31 + mindspore/lite/src/common/prim_inner.h | 36 + mindspore/lite/src/common/prim_util.cc | 122 ++ mindspore/lite/src/common/prim_util.h | 34 + mindspore/lite/src/common/tensor_util.cc | 375 ++++ mindspore/lite/src/common/tensor_util.h | 48 + mindspore/lite/src/common/version_manager.h | 4 +- mindspore/lite/src/dequant.cc | 13 +- mindspore/lite/src/dequant.h | 2 +- mindspore/lite/src/kernel_registry.cc | 58 +- mindspore/lite/src/kernel_registry.h | 9 +- mindspore/lite/src/lite_kernel.cc | 8 +- mindspore/lite/src/lite_kernel.h | 25 +- mindspore/lite/src/lite_model.cc | 39 +- mindspore/lite/src/lite_model.h | 36 +- mindspore/lite/src/lite_session.cc | 5 +- mindspore/lite/src/ops/CMakeLists.txt | 3 +- mindspore/lite/src/ops/abs.cc | 62 - mindspore/lite/src/ops/abs.h | 42 - mindspore/lite/src/ops/abs_grad.cc | 69 - mindspore/lite/src/ops/abs_grad.h | 42 - mindspore/lite/src/ops/activation.cc | 98 -- mindspore/lite/src/ops/activation.h | 50 - mindspore/lite/src/ops/activation_grad.cc | 88 - mindspore/lite/src/ops/activation_grad.h | 47 - mindspore/lite/src/ops/adam.cc | 98 -- mindspore/lite/src/ops/adam.h | 46 - mindspore/lite/src/ops/add.cc | 77 - mindspore/lite/src/ops/add.h | 44 - mindspore/lite/src/ops/adder.cc | 185 -- mindspore/lite/src/ops/adder.h | 81 - mindspore/lite/src/ops/addn.cc | 123 -- mindspore/lite/src/ops/addn.h | 43 - mindspore/lite/src/ops/apply_momentum.cc | 103 -- mindspore/lite/src/ops/apply_momentum.h | 47 - mindspore/lite/src/ops/argmax.cc | 134 -- mindspore/lite/src/ops/argmax.h | 54 - mindspore/lite/src/ops/argmin.cc | 134 -- mindspore/lite/src/ops/argmin.h | 53 - mindspore/lite/src/ops/arithmetic.cc | 114 -- mindspore/lite/src/ops/arithmetic.h | 58 - mindspore/lite/src/ops/arithmetic_compare.cc | 29 - mindspore/lite/src/ops/arithmetic_compare.h | 41 - mindspore/lite/src/ops/arithmetic_grad.cc | 121 -- mindspore/lite/src/ops/arithmetic_grad.h | 58 - mindspore/lite/src/ops/arithmetic_self.cc | 42 - mindspore/lite/src/ops/arithmetic_self.h | 45 - mindspore/lite/src/ops/assert_op.cc | 70 - mindspore/lite/src/ops/assert_op.h | 43 - mindspore/lite/src/ops/assign.cc | 89 - mindspore/lite/src/ops/assign.h | 43 - mindspore/lite/src/ops/assign_add.cc | 94 - mindspore/lite/src/ops/assign_add.h | 40 - mindspore/lite/src/ops/audio_spectrogram.cc | 107 -- mindspore/lite/src/ops/audio_spectrogram.h | 50 - mindspore/lite/src/ops/batch_norm.cc | 75 - mindspore/lite/src/ops/batch_norm.h | 44 - mindspore/lite/src/ops/batch_to_space.cc | 231 --- mindspore/lite/src/ops/batch_to_space.h | 52 - mindspore/lite/src/ops/bias_add.cc | 77 - mindspore/lite/src/ops/bias_add.h | 43 - mindspore/lite/src/ops/bias_grad.cc | 103 -- mindspore/lite/src/ops/bias_grad.h | 46 - .../lite/src/ops/binary_cross_entropy.cc | 119 -- mindspore/lite/src/ops/binary_cross_entropy.h | 47 - .../lite/src/ops/binary_cross_entropy_grad.cc | 121 -- .../lite/src/ops/binary_cross_entropy_grad.h | 47 - mindspore/lite/src/ops/bn_grad.cc | 111 -- mindspore/lite/src/ops/bn_grad.h | 47 - mindspore/lite/src/ops/broadcast_to.cc | 119 -- mindspore/lite/src/ops/broadcast_to.h | 46 - mindspore/lite/src/ops/cast.cc | 112 -- mindspore/lite/src/ops/cast.h | 47 - mindspore/lite/src/ops/ceil.cc | 31 - mindspore/lite/src/ops/ceil.h | 49 - mindspore/lite/src/ops/clip.cc | 54 - mindspore/lite/src/ops/clip.h | 45 - .../src/ops/compat/attr_transfer_common.cc | 2 +- .../src/ops/compat/attr_transfer_common.h | 3 +- .../lite/src/ops/compat/compat_register.h | 4 +- .../ops/compat/v0/broadcast_to_compat_v0.cc | 48 + .../ops/compat/v0/broadcat_to_compat_v0.cc | 47 - .../lite/src/ops/compat/v0/cast_compat_v0.cc | 41 + .../ops/compat/v0/expand_dims_compat_v0.cc | 45 + .../lite/src/ops/compat/v0/fill_compat_v0.cc | 47 + .../src/ops/compat/v0/gather_compat_v0.cc | 42 + .../src/ops/compat/v0/nchw2nhwc_compat_v0.cc | 46 + .../src/ops/compat/v0/nhwc2nchw_compat_v0.cc | 46 + .../lite/src/ops/compat/v0/pad_compat_v0.cc | 46 + .../src/ops/compat/v0/permute_compat_v0.cc | 51 + .../lite/src/ops/compat/v0/power_compat_v0.cc | 46 + .../src/ops/compat/v0/reduce_compat_v0.cc | 47 + .../src/ops/compat/v0/reshape_compat_v0.cc | 11 +- .../lite/src/ops/compat/v0/slice_compat_v0.cc | 59 + .../ops/compat/v0/strided_slice_compat_v0.cc | 9 +- .../lite/src/ops/compat/v0/tile_compat_v0.cc | 47 + .../lite/src/ops/compat/v0/topk_compat_v0.cc | 45 + .../src/ops/compat/v0/transpose_compat_v0.cc | 48 + mindspore/lite/src/ops/concat.cc | 139 -- mindspore/lite/src/ops/concat.h | 45 - mindspore/lite/src/ops/constant.h | 36 - mindspore/lite/src/ops/constant_of_shape.cc | 119 -- mindspore/lite/src/ops/constant_of_shape.h | 43 - mindspore/lite/src/ops/control_depend.cc | 61 - mindspore/lite/src/ops/control_depend.h | 40 - mindspore/lite/src/ops/conv2d.cc | 417 ----- mindspore/lite/src/ops/conv2d.h | 98 -- mindspore/lite/src/ops/conv2d_grad_filter.cc | 244 --- mindspore/lite/src/ops/conv2d_grad_filter.h | 78 - mindspore/lite/src/ops/conv2d_grad_input.cc | 244 --- mindspore/lite/src/ops/conv2d_grad_input.h | 78 - mindspore/lite/src/ops/cos.cc | 63 - mindspore/lite/src/ops/cos.h | 41 - mindspore/lite/src/ops/crop.cc | 80 - mindspore/lite/src/ops/crop.h | 47 - mindspore/lite/src/ops/crop_and_resize.cc | 116 -- mindspore/lite/src/ops/crop_and_resize.h | 47 - .../lite/src/ops/custom_extract_features.cc | 70 - .../lite/src/ops/custom_extract_features.h | 40 - mindspore/lite/src/ops/custom_normalize.cc | 66 - mindspore/lite/src/ops/custom_normalize.h | 40 - mindspore/lite/src/ops/custom_predict.cc | 79 - mindspore/lite/src/ops/custom_predict.h | 46 - mindspore/lite/src/ops/deconv2d.cc | 375 ---- mindspore/lite/src/ops/deconv2d.h | 90 - mindspore/lite/src/ops/dedepthwise_conv2d.cc | 171 -- mindspore/lite/src/ops/dedepthwise_conv2d.h | 83 - mindspore/lite/src/ops/depend.cc | 64 - mindspore/lite/src/ops/depend.h | 40 - mindspore/lite/src/ops/depth_to_space.cc | 99 -- mindspore/lite/src/ops/depth_to_space.h | 46 - mindspore/lite/src/ops/depthwise_conv2d.cc | 262 --- mindspore/lite/src/ops/depthwise_conv2d.h | 88 - mindspore/lite/src/ops/dequant.cc | 48 - mindspore/lite/src/ops/dequant.h | 38 - .../lite/src/ops/detection_post_process.cc | 208 --- .../lite/src/ops/detection_post_process.h | 70 - mindspore/lite/src/ops/div.cc | 82 - mindspore/lite/src/ops/div.h | 45 - mindspore/lite/src/ops/dropout.cc | 103 -- mindspore/lite/src/ops/dropout.h | 47 - mindspore/lite/src/ops/dropout_grad.cc | 98 -- mindspore/lite/src/ops/dropout_grad.h | 47 - mindspore/lite/src/ops/eltwise.cc | 51 - mindspore/lite/src/ops/eltwise.h | 45 - mindspore/lite/src/ops/elu.cc | 73 - mindspore/lite/src/ops/elu.h | 44 - mindspore/lite/src/ops/embedding_lookup.cc | 94 - mindspore/lite/src/ops/embedding_lookup.h | 45 - mindspore/lite/src/ops/equal.cc | 68 - mindspore/lite/src/ops/equal.h | 42 - mindspore/lite/src/ops/erf.h | 32 - mindspore/lite/src/ops/exp.cc | 84 - mindspore/lite/src/ops/exp.h | 48 - mindspore/lite/src/ops/expand_dims.cc | 122 -- mindspore/lite/src/ops/expand_dims.h | 46 - .../src/ops/fake_quant_with_min_max_vars.cc | 67 - .../src/ops/fake_quant_with_min_max_vars.h | 45 - mindspore/lite/src/ops/fft_imag.cc | 54 - mindspore/lite/src/ops/fft_imag.h | 42 - mindspore/lite/src/ops/fft_real.cc | 54 - mindspore/lite/src/ops/fft_real.h | 42 - mindspore/lite/src/ops/fill.cc | 97 - mindspore/lite/src/ops/fill.h | 46 - mindspore/lite/src/ops/flatten.cc | 98 -- mindspore/lite/src/ops/flatten.h | 43 - mindspore/lite/src/ops/flatten_grad.cc | 97 - mindspore/lite/src/ops/flatten_grad.h | 43 - mindspore/lite/src/ops/floor.cc | 67 - mindspore/lite/src/ops/floor.h | 42 - mindspore/lite/src/ops/floor_div.cc | 66 - mindspore/lite/src/ops/floor_div.h | 42 - mindspore/lite/src/ops/floor_mod.cc | 42 - mindspore/lite/src/ops/floor_mod.h | 41 - mindspore/lite/src/ops/full_connection.cc | 122 -- mindspore/lite/src/ops/full_connection.h | 50 - mindspore/lite/src/ops/fused_batchnorm.cc | 103 -- mindspore/lite/src/ops/fused_batchnorm.h | 49 - mindspore/lite/src/ops/gather.cc | 149 -- mindspore/lite/src/ops/gather.h | 48 - mindspore/lite/src/ops/gather_nd.cc | 120 -- mindspore/lite/src/ops/gather_nd.h | 44 - mindspore/lite/src/ops/gelu.cc | 53 - mindspore/lite/src/ops/gelu.h | 40 - mindspore/lite/src/ops/greater.cc | 68 - mindspore/lite/src/ops/greater.h | 42 - mindspore/lite/src/ops/greater_equal.cc | 43 - mindspore/lite/src/ops/greater_equal.h | 42 - .../lite/src/ops/group_conv2d_grad_input.cc | 172 -- .../lite/src/ops/group_conv2d_grad_input.h | 77 - mindspore/lite/src/ops/gru.cc | 121 -- mindspore/lite/src/ops/gru.h | 47 - mindspore/lite/src/ops/hashtable_lookup.cc | 69 - mindspore/lite/src/ops/hashtable_lookup.h | 40 - mindspore/lite/src/ops/identity.h | 33 - mindspore/lite/src/ops/if.h | 33 - mindspore/lite/src/ops/instance_norm.cc | 76 - mindspore/lite/src/ops/instance_norm.h | 44 - mindspore/lite/src/ops/invert_permutation.cc | 66 - mindspore/lite/src/ops/invert_permutation.h | 43 - mindspore/lite/src/ops/is_finite.h | 33 - mindspore/lite/src/ops/l2_norm.cc | 69 - mindspore/lite/src/ops/l2_norm.h | 49 - mindspore/lite/src/ops/layer_norm.cc | 122 -- mindspore/lite/src/ops/layer_norm.h | 54 - mindspore/lite/src/ops/leaky_relu.cc | 55 - mindspore/lite/src/ops/leaky_relu.h | 45 - mindspore/lite/src/ops/less.cc | 43 - mindspore/lite/src/ops/less.h | 42 - mindspore/lite/src/ops/less_equal.cc | 42 - mindspore/lite/src/ops/less_equal.h | 42 - mindspore/lite/src/ops/lin_space.cc | 53 - mindspore/lite/src/ops/lin_space.h | 42 - .../src/ops/local_response_normalization.cc | 91 - .../src/ops/local_response_normalization.h | 50 - mindspore/lite/src/ops/log.cc | 63 - mindspore/lite/src/ops/log.h | 43 - mindspore/lite/src/ops/log_grad.cc | 46 - mindspore/lite/src/ops/log_grad.h | 41 - mindspore/lite/src/ops/logical_and.cc | 43 - mindspore/lite/src/ops/logical_and.h | 42 - mindspore/lite/src/ops/logical_not.cc | 42 - mindspore/lite/src/ops/logical_not.h | 42 - mindspore/lite/src/ops/logical_or.cc | 43 - mindspore/lite/src/ops/logical_or.h | 42 - mindspore/lite/src/ops/lrn.cc | 61 - mindspore/lite/src/ops/lrn.h | 50 - mindspore/lite/src/ops/lsh_projection.cc | 91 - mindspore/lite/src/ops/lsh_projection.h | 41 - mindspore/lite/src/ops/lstm.cc | 106 -- mindspore/lite/src/ops/lstm.h | 48 - mindspore/lite/src/ops/make_tuple.cc | 71 - mindspore/lite/src/ops/make_tuple.h | 39 - mindspore/lite/src/ops/matmul.cc | 152 -- mindspore/lite/src/ops/matmul.h | 47 - mindspore/lite/src/ops/maximum.cc | 74 - mindspore/lite/src/ops/maximum.h | 44 - mindspore/lite/src/ops/maximum_grad.cc | 126 -- mindspore/lite/src/ops/maximum_grad.h | 46 - mindspore/lite/src/ops/merge.cc | 143 -- mindspore/lite/src/ops/merge.h | 49 - mindspore/lite/src/ops/mfcc.cc | 83 - mindspore/lite/src/ops/mfcc.h | 56 - mindspore/lite/src/ops/minimum.cc | 67 - mindspore/lite/src/ops/minimum.h | 43 - mindspore/lite/src/ops/minimum_grad.cc | 128 -- mindspore/lite/src/ops/minimum_grad.h | 45 - mindspore/lite/src/ops/mod.cc | 69 - mindspore/lite/src/ops/mod.h | 42 - mindspore/lite/src/ops/mul.cc | 84 - mindspore/lite/src/ops/mul.h | 45 - mindspore/lite/src/ops/nchw2nhwc.cc | 67 - mindspore/lite/src/ops/nchw2nhwc.h | 43 - mindspore/lite/src/ops/neg.cc | 64 - mindspore/lite/src/ops/neg.h | 43 - mindspore/lite/src/ops/neg_grad.cc | 41 - mindspore/lite/src/ops/neg_grad.h | 42 - mindspore/lite/src/ops/nhwc2nchw.cc | 68 - mindspore/lite/src/ops/nhwc2nchw.h | 43 - mindspore/lite/src/ops/non_max_suppression.cc | 70 - mindspore/lite/src/ops/non_max_suppression.h | 42 - mindspore/lite/src/ops/nonzero.cc | 123 -- mindspore/lite/src/ops/nonzero.h | 45 - mindspore/lite/src/ops/not_equal.cc | 43 - mindspore/lite/src/ops/not_equal.h | 42 - mindspore/lite/src/ops/one_hot.cc | 132 -- mindspore/lite/src/ops/one_hot.h | 46 - mindspore/lite/src/ops/oneslike.cc | 86 - mindspore/lite/src/ops/oneslike.h | 41 - mindspore/lite/src/ops/ops_def.cc | 1054 ++++++++++- mindspore/lite/src/ops/ops_def.h | 157 ++ mindspore/lite/src/ops/ops_func_declare.h | 453 +++++ mindspore/lite/src/ops/ops_register.h | 56 - mindspore/lite/src/ops/ops_utils.cc | 905 ++++++++++ mindspore/lite/src/ops/ops_utils.h | 62 + mindspore/lite/src/ops/p_relu.cc | 59 - mindspore/lite/src/ops/p_relu.h | 45 - mindspore/lite/src/ops/pad.cc | 199 --- mindspore/lite/src/ops/pad.h | 51 - mindspore/lite/src/ops/partial.cc | 83 - mindspore/lite/src/ops/partial.h | 46 - mindspore/lite/src/ops/pooling.cc | 235 --- mindspore/lite/src/ops/pooling.h | 85 - mindspore/lite/src/ops/pooling_grad.cc | 215 --- mindspore/lite/src/ops/pooling_grad.h | 77 - .../ops/populate/activation_grad_populate.cc | 21 +- .../src/ops/populate/activation_populate.cc | 26 +- .../lite/src/ops/populate/adam_populate.cc | 12 +- .../lite/src/ops/populate/add_populate.cc | 19 +- .../lite/src/ops/populate/adder_populate.cc | 45 +- .../lite/src/ops/populate/addn_populate.cc | 13 +- .../lite/src/ops/populate/argmax_populate.cc | 25 +- .../lite/src/ops/populate/argmin_populate.cc | 25 +- .../src/ops/populate/arithmetic_populate.cc | 56 +- .../src/ops/populate/arithmetic_populate.h | 8 +- .../ops/populate/arithmetic_self_populate.cc | 43 +- .../lite/src/ops/populate/assert_populate.cc | 13 +- .../src/ops/populate/assign_add_populate.cc | 12 +- .../lite/src/ops/populate/assign_populate.cc | 13 +- .../populate/audio_spectrogram_populate.cc | 42 + .../src/ops/populate/batch_norm_populate.cc | 19 +- .../ops/populate/batch_to_space_populate.cc | 41 +- .../src/ops/populate/bias_add_populate.cc | 13 +- .../src/ops/populate/bias_grad_populate.cc | 14 +- .../binary_cross_entropy_grad_populate.cc | 21 +- .../populate/binary_cross_entropy_populate.cc | 17 +- .../src/ops/populate/broadcast_to_populate.cc | 20 +- .../lite/src/ops/populate/cast_populate.cc | 24 +- .../lite/src/ops/populate/clip_populate.cc | 37 + .../lite/src/ops/populate/common_populate.cc | 16 +- .../lite/src/ops/populate/concat_populate.cc | 19 +- .../populate/constant_of_shape_populate.cc | 28 +- .../lite/src/ops/populate/conv2d_populate.cc | 68 +- .../ops/populate/crop_and_resize_populate.cc | 19 +- .../lite/src/ops/populate/crop_populate.cc | 31 +- .../custom_extract_features_populate.cc | 18 +- .../ops/populate/custom_normalize_populate.cc | 13 +- .../ops/populate/custom_predict_populate.cc | 18 +- .../src/ops/populate/deconv2d_populate.cc | 62 +- .../populate/dedepthwise_conv2d_populate.cc | 10 +- .../lite/src/ops/populate/default_populate.cc | 36 + .../lite/src/ops/populate/default_populate.h | 26 + .../ops/populate/depth_to_space_populate.cc | 22 +- .../ops/populate/depthwise_conv2d_populate.cc | 88 +- .../detection_post_process_populate.cc | 42 +- .../lite/src/ops/populate/div_populate.cc | 12 +- .../lite/src/ops/populate/eltwise_populate.cc | 34 +- .../lite/src/ops/populate/elu_populate.cc | 19 +- .../ops/populate/embedding_lookup_populate.cc | 34 +- .../lite/src/ops/populate/erf_populate.cc | 23 + .../lite/src/ops/populate/exp_populate.cc | 21 +- .../src/ops/populate/expand_dims_populate.cc | 23 +- .../lite/src/ops/populate/fill_populate.cc | 27 +- .../lite/src/ops/populate/flatten_populate.cc | 12 +- .../ops/populate/full_connection_populate.cc | 28 +- .../ops/populate/fused_batchnorm_populate.cc | 19 +- .../src/ops/populate/gather_nd_populate.cc | 16 +- .../lite/src/ops/populate/gather_populate.cc | 26 +- .../lite/src/ops/populate/gru_populate.cc | 16 +- .../ops/populate/hashtable_lookup_populate.cc | 13 +- .../lite/src/ops/populate/if_populate.cc | 23 + .../ops/populate/instance_norm_populate.cc | 18 +- .../populate/invert_permutation_populate.cc | 24 + .../src/ops/populate/isfinite_populate.cc | 23 + .../lite/src/ops/populate/l2_norm_populate.cc | 41 +- .../src/ops/populate/layer_norm_populate.cc | 29 +- .../src/ops/populate/lin_space_populate.cc | 23 + .../local_response_normalization_populate.cc | 25 +- .../ops/populate/lsh_projection_populate.cc | 17 +- .../lite/src/ops/populate/lstm_populate.cc | 22 +- .../lite/src/ops/populate/matmul_populate.cc | 18 +- .../lite/src/ops/populate/merge_populate.cc | 12 +- .../lite/src/ops/populate/mfcc_populate.cc | 39 + .../lite/src/ops/populate/mul_populate.cc | 20 +- .../src/ops/populate/nchw2nhwc_populate.cc | 43 - .../src/ops/populate/nhwc2nchw_populate.cc | 44 - .../populate/non_max_suppression_populate.cc | 19 +- .../lite/src/ops/populate/nonzero_populate.cc | 15 +- .../lite/src/ops/populate/one_hot_populate.cc | 22 +- .../src/ops/populate/oneslike_populate.cc | 12 +- .../lite/src/ops/populate/p_relu_populate.cc | 25 +- .../lite/src/ops/populate/pad_populate.cc | 33 +- .../lite/src/ops/populate/partial_populate.cc | 17 +- .../lite/src/ops/populate/pooling_populate.cc | 128 +- .../lite/src/ops/populate/populate_register.h | 32 +- .../lite/src/ops/populate/power_populate.cc | 23 +- .../src/ops/populate/prior_box_populate.cc | 68 +- .../ops/populate/quant_dtype_cast_populate.cc | 20 +- .../random_standard_normal_populate.cc | 20 +- .../lite/src/ops/populate/range_populate.cc | 25 +- .../lite/src/ops/populate/rank_populate.cc | 36 + .../lite/src/ops/populate/reduce_populate.cc | 33 +- .../lite/src/ops/populate/reshape_populate.cc | 25 +- .../lite/src/ops/populate/resize_populate.cc | 26 +- .../lite/src/ops/populate/reverse_populate.cc | 22 +- .../ops/populate/reverse_sequence_populate.cc | 26 +- .../src/ops/populate/roi_pooling_populate.cc | 34 +- .../lite/src/ops/populate/scale_populate.cc | 25 +- .../src/ops/populate/scatter_nd_populate.cc | 14 +- .../lite/src/ops/populate/select_populate.cc | 17 +- .../lite/src/ops/populate/shape_populate.cc | 13 +- .../lite/src/ops/populate/size_populate.cc | 23 + .../src/ops/populate/skip_gram_populate.cc | 22 +- .../lite/src/ops/populate/slice_populate.cc | 28 +- .../lite/src/ops/populate/softmax_populate.cc | 25 +- .../populate/space_to_batch_nd_populate.cc | 59 +- .../ops/populate/space_to_batch_populate.cc | 47 +- .../ops/populate/space_to_depth_populate.cc | 20 +- .../ops/populate/sparse_to_dense_populate.cc | 16 +- .../lite/src/ops/populate/split_populate.cc | 36 +- .../lite/src/ops/populate/squeeze_populate.cc | 29 +- .../lite/src/ops/populate/stack_populate.cc | 19 +- .../ops/populate/strided_slice_populate.cc | 54 +- .../src/ops/populate/strided_slice_populate.h | 10 +- .../lite/src/ops/populate/sub_populate.cc | 14 +- .../lite/src/ops/populate/switch_populate.cc | 12 +- .../populate/tensorlistfromtensor_populate.cc | 18 +- .../populate/tensorlistgetitem_populate.cc | 19 +- .../populate/tensorlistreserve_populate.cc | 19 +- .../populate/tensorlistsetlitem_populate.cc | 20 +- .../ops/populate/tensorliststack_populate.cc | 20 +- .../lite/src/ops/populate/tile_populate.cc | 35 +- .../lite/src/ops/populate/topk_populate.cc | 20 +- .../src/ops/populate/transpose_populate.cc | 24 +- .../src/ops/populate/uniform_real_populate.cc | 23 + .../lite/src/ops/populate/unique_populate.cc | 15 +- .../populate/unsorted_segment_sum_populate.cc | 12 +- .../src/ops/populate/unsqueeze_populate.cc | 27 +- .../lite/src/ops/populate/unstack_populate.cc | 16 +- .../src/ops/populate/upsample_populate.cc | 44 - .../v0/activation_grad_populate_v0.cc | 45 + .../ops/populate/v0/activation_populate_v0.cc | 45 + .../src/ops/populate/v0/adam_populate_v0.cc | 37 + .../src/ops/populate/v0/add_populate_v0.cc | 42 + .../src/ops/populate/v0/addn_populate_v0.cc | 38 + .../src/ops/populate/v0/argmax_populate_v0.cc | 47 + .../src/ops/populate/v0/argmin_populate_v0.cc | 47 + .../ops/populate/v0/arithmetic_populate_v0.cc | 91 + .../ops/populate/v0/arithmetic_populate_v0.h | 28 + .../v0/arithmetic_self_populate_v0.cc | 83 + .../src/ops/populate/v0/assert_populate_v0.cc | 38 + .../ops/populate/v0/assign_add_populate_v0.cc | 37 + .../src/ops/populate/v0/assign_populate_v0.cc | 37 + .../ops/populate/v0/batch_norm_populate_v0.cc | 43 + .../populate/v0/batch_to_space_populate_v0.cc | 70 + .../ops/populate/v0/bias_add_populate_v0.cc | 39 + .../ops/populate/v0/bias_grad_populate_v0.cc | 40 + .../binary_cross_entropy_grad_populate_v0.cc | 44 + .../v0/binary_cross_entropy_populate_v0.cc | 44 + .../populate/v0/broadcast_to_populate_v0.cc | 48 + .../src/ops/populate/v0/cast_populate_v0.cc | 38 + .../src/ops/populate/v0/clip_populate_v0.cc | 37 + .../src/ops/populate/v0/common_populate_v0.cc | 43 + .../src/ops/populate/v0/concat_populate_v0.cc | 42 + .../v0/constant_of_shape_populate_v0.cc | 57 + .../src/ops/populate/v0/conv2d_populate_v0.cc | 82 + .../src/ops/populate/v0/crop_populate_v0.cc | 51 + .../v0/custom_extract_features_populate_v0.cc | 43 + .../v0/custom_normalize_populate_v0.cc | 43 + .../populate/v0/custom_predict_populate_v0.cc | 44 + .../ops/populate/v0/deconv2d_populate_v0.cc | 77 + .../v0/dedepthwise_conv2d_populate_v0.cc | 82 + .../populate/v0/depth_to_space_populate_v0.cc | 46 + .../v0/depthwise_conv2d_populate_v0.cc | 83 + .../v0/detection_post_process_populate_v0.cc | 55 + .../src/ops/populate/v0/div_populate_v0.cc | 40 + .../ops/populate/v0/eltwise_populate_v0.cc | 39 + .../src/ops/populate/v0/elu_populate_v0.cc | 42 + .../v0/embedding_lookup_populate_v0.cc | 50 + .../src/ops/populate/v0/exp_populate_v0.cc | 49 + .../populate/v0/expand_dims_populate_v0.cc | 37 + .../src/ops/populate/v0/fill_populate_v0.cc | 36 + .../ops/populate/v0/flatten_populate_v0.cc | 37 + .../v0/full_connection_populate_v0.cc | 56 + .../v0/fused_batchnorm_populate_v0.cc | 45 + .../ops/populate/v0/gather_nd_populate_v0.cc | 38 + .../src/ops/populate/v0/gather_populate_v0.cc | 47 + .../v0/hashtable_lookup_populate_v0.cc | 38 + .../populate/v0/instance_norm_populate_v0.cc | 43 + .../ops/populate/v0/l2_norm_populate_v0.cc | 65 + .../ops/populate/v0/layer_norm_populate_v0.cc | 54 + .../ops/populate/v0/layer_norm_populate_v0.h | 27 + ...ocal_response_normalization_populate_v0.cc | 48 + .../populate/v0/lsh_projection_populate_v0.cc | 44 + .../src/ops/populate/v0/lstm_populate_v0.cc | 47 + .../src/ops/populate/v0/matmul_populate_v0.cc | 46 + .../src/ops/populate/v0/mul_populate_v0.cc | 40 + .../ops/populate/v0/nchw2nhwc_populate_v0.cc | 44 + .../ops/populate/v0/nhwc2nchw_populate_v0.cc | 44 + .../v0/non_max_suppression_populate_v0.cc | 43 + .../ops/populate/v0/one_hot_populate_v0.cc | 47 + .../ops/populate/v0/oneslike_populate_v0.cc | 37 + .../src/ops/populate/v0/p_relu_populate_v0.cc | 42 + .../src/ops/populate/v0/pad_populate_v0.cc | 43 + .../ops/populate/v0/partial_populate_v0.cc | 47 + .../ops/populate/v0/pooling_populate_v0.cc | 100 ++ .../src/ops/populate/v0/power_populate_v0.cc | 43 + .../ops/populate/v0/prior_box_populate_v0.cc | 83 + .../v0/quant_dtype_cast_populate_v0.cc | 45 + .../src/ops/populate/v0/range_populate_v0.cc | 45 + .../src/ops/populate/v0/rank_populate_v0.cc | 37 + .../src/ops/populate/v0/reduce_populate_v0.cc | 56 + .../ops/populate/v0/reshape_populate_v0.cc | 39 + .../src/ops/populate/v0/resize_populate_v0.cc | 50 + .../ops/populate/v0/reverse_populate_v0.cc | 47 + .../v0/reverse_sequence_populate_v0.cc | 45 + .../populate/v0/roi_pooling_populate_v0.cc | 44 + .../src/ops/populate/v0/scale_populate_v0.cc | 47 + .../ops/populate/v0/scatter_nd_populate_v0.cc | 37 + .../src/ops/populate/v0/shape_populate_v0.cc | 38 + .../ops/populate/v0/skip_gram_populate_v0.cc | 44 + .../src/ops/populate/v0/slice_populate_v0.cc | 56 + .../ops/populate/v0/softmax_populate_v0.cc | 42 + .../v0/space_to_batch_nd_populate_v0.cc | 56 + .../populate/v0/space_to_batch_populate_v0.cc | 63 + .../populate/v0/space_to_depth_populate_v0.cc | 48 + .../v0/sparse_to_dense_populate_v0.cc | 39 + .../src/ops/populate/v0/split_populate_v0.cc | 64 + .../v0/squared_difference_populate_v0.cc | 40 + .../ops/populate/v0/squeeze_populate_v0.cc | 49 + .../src/ops/populate/v0/stack_populate_v0.cc | 42 + .../populate/v0/strided_slice_populate_v0.cc | 75 + .../populate/v0/strided_slice_populate_v0.h | 28 + .../src/ops/populate/v0/sub_populate_v0.cc | 41 + .../src/ops/populate/v0/switch_populate_v0.cc | 38 + .../v0/tensorlistfromtensor_populate_v0.cc | 44 + .../v0/tensorlistgetitem_populate_v0.cc | 42 + .../v0/tensorlistreserve_populate_v0.cc | 43 + .../v0/tensorlistsetlitem_populate_v0.cc | 43 + .../v0/tensorliststack_populate_v0.cc | 43 + .../src/ops/populate/v0/tile_populate_v0.cc | 58 + .../src/ops/populate/v0/topk_populate_v0.cc | 43 + .../ops/populate/v0/transpose_populate_v0.cc | 49 + .../src/ops/populate/v0/unique_populate_v0.cc | 38 + .../v0/unsorted_segment_sum_populate_v0.cc | 38 + .../ops/populate/v0/unsqueeze_populate_v0.cc | 47 + .../ops/populate/v0/unstack_populate_v0.cc | 42 + .../src/ops/populate/v0/where_populate_v0.cc | 37 + .../src/ops/populate/v0/while_populate_v0.cc | 48 + .../lite/src/ops/populate/where_populate.cc | 16 +- .../lite/src/ops/populate/while_populate.cc | 18 +- mindspore/lite/src/ops/power.cc | 133 -- mindspore/lite/src/ops/power.h | 50 - mindspore/lite/src/ops/power_grad.cc | 91 - mindspore/lite/src/ops/power_grad.h | 49 - mindspore/lite/src/ops/primitive_c.cc | 1178 ------------- mindspore/lite/src/ops/primitive_c.h | 259 --- mindspore/lite/src/ops/prior_box.cc | 165 -- mindspore/lite/src/ops/prior_box.h | 66 - mindspore/lite/src/ops/quant.cc | 57 - mindspore/lite/src/ops/quant.h | 37 - mindspore/lite/src/ops/quant_dtype_cast.cc | 72 - mindspore/lite/src/ops/quant_dtype_cast.h | 47 - .../lite/src/ops/random_standard_normal.cc | 101 -- .../lite/src/ops/random_standard_normal.h | 46 - mindspore/lite/src/ops/range.cc | 149 -- mindspore/lite/src/ops/range.h | 52 - mindspore/lite/src/ops/rank.cc | 55 - mindspore/lite/src/ops/rank.h | 43 - mindspore/lite/src/ops/real_div.cc | 62 - mindspore/lite/src/ops/real_div.h | 44 - mindspore/lite/src/ops/reciprocal.cc | 56 - mindspore/lite/src/ops/reciprocal.h | 49 - mindspore/lite/src/ops/reduce.cc | 221 --- mindspore/lite/src/ops/reduce.h | 56 - mindspore/lite/src/ops/reshape.cc | 242 --- mindspore/lite/src/ops/reshape.h | 54 - mindspore/lite/src/ops/resize.cc | 261 --- mindspore/lite/src/ops/resize.h | 64 - mindspore/lite/src/ops/return.cc | 86 - mindspore/lite/src/ops/return.h | 42 - mindspore/lite/src/ops/reverse.cc | 62 - mindspore/lite/src/ops/reverse.h | 46 - mindspore/lite/src/ops/reverse_sequence.cc | 75 - mindspore/lite/src/ops/reverse_sequence.h | 48 - mindspore/lite/src/ops/rfft.cc | 66 - mindspore/lite/src/ops/rfft.h | 44 - mindspore/lite/src/ops/roi_pooling.cc | 97 - mindspore/lite/src/ops/roi_pooling.h | 49 - mindspore/lite/src/ops/round.cc | 42 - mindspore/lite/src/ops/round.h | 42 - mindspore/lite/src/ops/rsqrt.cc | 63 - mindspore/lite/src/ops/rsqrt.h | 43 - mindspore/lite/src/ops/scale.cc | 56 - mindspore/lite/src/ops/scale.h | 46 - mindspore/lite/src/ops/scatter_nd.cc | 86 - mindspore/lite/src/ops/scatter_nd.h | 43 - mindspore/lite/src/ops/schema_def.h | 73 - mindspore/lite/src/ops/schema_register.h | 15 +- mindspore/lite/src/ops/select.cc | 104 -- mindspore/lite/src/ops/select.h | 39 - mindspore/lite/src/ops/sgd.cc | 106 -- mindspore/lite/src/ops/sgd.h | 48 - mindspore/lite/src/ops/shape.cc | 70 - mindspore/lite/src/ops/shape.h | 42 - .../ops/sigmoid_cross_entropy_with_logits.cc | 100 -- .../ops/sigmoid_cross_entropy_with_logits.h | 45 - .../sigmoid_cross_entropy_with_logits_grad.cc | 102 -- .../sigmoid_cross_entropy_with_logits_grad.h | 45 - mindspore/lite/src/ops/sin.cc | 66 - mindspore/lite/src/ops/sin.h | 43 - mindspore/lite/src/ops/size.cc | 64 - mindspore/lite/src/ops/size.h | 43 - mindspore/lite/src/ops/skip_gram.cc | 86 - mindspore/lite/src/ops/skip_gram.h | 49 - mindspore/lite/src/ops/slice.cc | 240 --- mindspore/lite/src/ops/slice.h | 59 - mindspore/lite/src/ops/smooth_l1_loss.cc | 101 -- mindspore/lite/src/ops/smooth_l1_loss.h | 46 - mindspore/lite/src/ops/smooth_l1_loss_grad.cc | 101 -- mindspore/lite/src/ops/smooth_l1_loss_grad.h | 46 - mindspore/lite/src/ops/softmax.cc | 99 -- mindspore/lite/src/ops/softmax.h | 47 - .../lite/src/ops/softmax_cross_entropy.cc | 104 -- .../lite/src/ops/softmax_cross_entropy.h | 48 - mindspore/lite/src/ops/space_to_batch.cc | 150 -- mindspore/lite/src/ops/space_to_batch.h | 60 - mindspore/lite/src/ops/space_to_batch_nd.cc | 213 --- mindspore/lite/src/ops/space_to_batch_nd.h | 48 - mindspore/lite/src/ops/space_to_depth.cc | 109 -- mindspore/lite/src/ops/space_to_depth.h | 47 - .../src/ops/sparse_softmax_cross_entropy.cc | 120 -- .../src/ops/sparse_softmax_cross_entropy.h | 48 - mindspore/lite/src/ops/sparse_to_dense.cc | 74 - mindspore/lite/src/ops/sparse_to_dense.h | 50 - mindspore/lite/src/ops/split.cc | 165 -- mindspore/lite/src/ops/split.h | 57 - mindspore/lite/src/ops/sqrt.cc | 69 - mindspore/lite/src/ops/sqrt.h | 43 - mindspore/lite/src/ops/square.cc | 69 - mindspore/lite/src/ops/square.h | 42 - mindspore/lite/src/ops/squared_difference.cc | 45 - mindspore/lite/src/ops/squared_difference.h | 42 - mindspore/lite/src/ops/squeeze.cc | 139 -- mindspore/lite/src/ops/squeeze.h | 48 - mindspore/lite/src/ops/stack.cc | 120 -- mindspore/lite/src/ops/stack.h | 50 - mindspore/lite/src/ops/strided_slice.cc | 454 ----- mindspore/lite/src/ops/strided_slice.h | 89 - mindspore/lite/src/ops/strided_slice_grad.cc | 266 --- mindspore/lite/src/ops/strided_slice_grad.h | 64 - mindspore/lite/src/ops/sub.cc | 84 - mindspore/lite/src/ops/sub.h | 45 - mindspore/lite/src/ops/switch.cc | 142 -- mindspore/lite/src/ops/switch.h | 45 - .../lite/src/ops/tensorlist_fromtensor.cc | 147 -- .../lite/src/ops/tensorlist_fromtensor.h | 44 - mindspore/lite/src/ops/tensorlist_getitem.cc | 192 -- mindspore/lite/src/ops/tensorlist_getitem.h | 49 - mindspore/lite/src/ops/tensorlist_reserve.cc | 138 -- mindspore/lite/src/ops/tensorlist_reserve.h | 43 - mindspore/lite/src/ops/tensorlist_setitem.cc | 168 -- mindspore/lite/src/ops/tensorlist_setitem.h | 43 - mindspore/lite/src/ops/tensorlist_stack.cc | 196 --- mindspore/lite/src/ops/tensorlist_stack.h | 53 - mindspore/lite/src/ops/tile.cc | 201 --- mindspore/lite/src/ops/tile.h | 50 - mindspore/lite/src/ops/topk.cc | 128 -- mindspore/lite/src/ops/topk.h | 48 - mindspore/lite/src/ops/transpose.cc | 164 -- mindspore/lite/src/ops/transpose.h | 47 - mindspore/lite/src/ops/tuple_get_item.cc | 70 - mindspore/lite/src/ops/tuple_get_item.h | 40 - mindspore/lite/src/ops/uniform_real.cc | 101 -- mindspore/lite/src/ops/uniform_real.h | 46 - mindspore/lite/src/ops/unique.cc | 68 - mindspore/lite/src/ops/unique.h | 45 - .../lite/src/ops/unsorted_segment_sum.cc | 110 -- mindspore/lite/src/ops/unsorted_segment_sum.h | 43 - mindspore/lite/src/ops/unsqueeze.cc | 116 -- mindspore/lite/src/ops/unsqueeze.h | 46 - mindspore/lite/src/ops/unstack.cc | 81 - mindspore/lite/src/ops/unstack.h | 45 - mindspore/lite/src/ops/upsample.cc | 102 -- mindspore/lite/src/ops/upsample.h | 50 - mindspore/lite/src/ops/where.cc | 123 -- mindspore/lite/src/ops/where.h | 46 - mindspore/lite/src/ops/while.cc | 103 -- mindspore/lite/src/ops/while.h | 50 - mindspore/lite/src/ops/zeros_like.cc | 66 - mindspore/lite/src/ops/zeros_like.h | 43 - .../runtime/agent/npu/npu_converter_utils.cc | 4 +- .../runtime/agent/npu/npu_converter_utils.h | 2 +- .../src/runtime/agent/npu/npu_executor.cc | 2 +- .../lite/src/runtime/agent/npu/npu_executor.h | 2 +- .../lite/src/runtime/agent/npu/npu_manager.cc | 2 +- .../lite/src/runtime/agent/npu/npu_manager.h | 7 +- .../agent/npu/optimizer/npu_base_pass.h | 2 +- .../agent/npu/optimizer/npu_fusion_pass.cc | 2 +- .../agent/npu/optimizer/npu_fusion_pass.h | 3 +- .../optimizer/npu_insert_transform_pass.cc | 32 +- .../npu/optimizer/npu_insert_transform_pass.h | 10 +- .../agent/npu/optimizer/npu_pass_manager.cc | 2 +- .../agent/npu/optimizer/npu_pass_manager.h | 2 +- .../agent/npu/optimizer/npu_pass_utils.cc | 38 +- .../agent/npu/optimizer/npu_pass_utils.h | 6 +- .../agent/npu/optimizer/npu_transform_pass.cc | 33 +- .../agent/npu/optimizer/npu_transform_pass.h | 11 +- .../runtime/agent/npu/subgraph_npu_kernel.cc | 2 +- .../runtime/agent/npu/subgraph_npu_kernel.h | 2 +- mindspore/lite/src/runtime/infer_manager.cc | 432 +++++ mindspore/lite/src/runtime/infer_manager.h | 65 + .../runtime/kernel/arm/base/argminmax_base.cc | 12 +- .../runtime/kernel/arm/base/argminmax_base.h | 5 +- .../lite/src/runtime/kernel/arm/base/assert.h | 5 +- .../src/runtime/kernel/arm/base/carry_data.cc | 2 +- .../src/runtime/kernel/arm/base/carry_data.h | 5 +- .../kernel/arm/base/constant_of_shape.h | 5 +- .../kernel/arm/base/convolution_base.cc | 6 - .../kernel/arm/base/convolution_base.h | 5 +- .../src/runtime/kernel/arm/base/crop_base.h | 5 +- .../kernel/arm/base/depth_to_space_base.h | 5 +- .../arm/base/detection_post_process_base.h | 5 +- .../lite/src/runtime/kernel/arm/base/merge.h | 5 +- .../runtime/kernel/arm/base/pooling_base.cc | 11 +- .../runtime/kernel/arm/base/pooling_base.h | 5 +- .../src/runtime/kernel/arm/base/prior_box.h | 5 +- .../kernel/arm/base/quant_dtype_cast.h | 5 +- .../kernel/arm/base/random_standard_normal.h | 5 +- .../runtime/kernel/arm/base/reduce_base.cc | 34 +- .../src/runtime/kernel/arm/base/reduce_base.h | 5 +- .../runtime/kernel/arm/base/reshape_base.h | 5 +- .../runtime/kernel/arm/base/resize_base.cc | 8 +- .../src/runtime/kernel/arm/base/resize_base.h | 5 +- .../lite/src/runtime/kernel/arm/base/select.h | 5 +- .../src/runtime/kernel/arm/base/slice_base.cc | 30 +- .../src/runtime/kernel/arm/base/slice_base.h | 5 +- .../runtime/kernel/arm/base/softmax_base.cc | 1 - .../runtime/kernel/arm/base/softmax_base.h | 5 +- .../src/runtime/kernel/arm/base/split_base.h | 5 +- .../src/runtime/kernel/arm/base/stack_base.h | 5 +- .../runtime/kernel/arm/base/strided_slice.cc | 12 - .../runtime/kernel/arm/base/strided_slice.h | 5 +- .../src/runtime/kernel/arm/base/switch.cc | 2 - .../lite/src/runtime/kernel/arm/base/switch.h | 5 +- .../src/runtime/kernel/arm/base/tile_base.cc | 8 +- .../src/runtime/kernel/arm/base/tile_base.h | 5 +- .../runtime/kernel/arm/fp16/activation_fp16.h | 5 +- .../kernel/arm/fp16/arithmetic_compare_fp16.h | 5 +- .../kernel/arm/fp16/arithmetic_fp16.cc | 45 +- .../runtime/kernel/arm/fp16/arithmetic_fp16.h | 7 +- .../kernel/arm/fp16/arithmetic_self_fp16.h | 5 +- .../runtime/kernel/arm/fp16/batchnorm_fp16.h | 5 +- .../src/runtime/kernel/arm/fp16/bias_fp16.h | 5 +- .../src/runtime/kernel/arm/fp16/cast_fp16.h | 5 +- .../src/runtime/kernel/arm/fp16/concat_fp16.h | 5 +- .../kernel/arm/fp16/convolution_1x1_fp16.cc | 5 - .../kernel/arm/fp16/convolution_1x1_fp16.h | 7 +- .../kernel/arm/fp16/convolution_base_fp16.cc | 2 - .../kernel/arm/fp16/convolution_base_fp16.h | 5 +- .../arm/fp16/convolution_delegate_fp16.cc | 82 +- .../arm/fp16/convolution_delegate_fp16.h | 10 +- .../arm/fp16/convolution_depthwise_fp16.cc | 37 - .../arm/fp16/convolution_depthwise_fp16.h | 5 +- .../convolution_depthwise_slidewindow_fp16.cc | 5 - .../convolution_depthwise_slidewindow_fp16.h | 5 +- .../kernel/arm/fp16/convolution_fp16.cc | 6 - .../kernel/arm/fp16/convolution_fp16.h | 7 +- .../arm/fp16/convolution_winograd_fp16.h | 7 +- .../src/runtime/kernel/arm/fp16/crop_fp16.h | 5 +- .../arm/fp16/deconvolution_depthwise_fp16.cc | 30 - .../arm/fp16/deconvolution_depthwise_fp16.h | 5 +- .../kernel/arm/fp16/deconvolution_fp16.cc | 43 +- .../kernel/arm/fp16/deconvolution_fp16.h | 5 +- .../arm/fp16/deconvolution_winograd_fp16.cc | 3 - .../arm/fp16/deconvolution_winograd_fp16.h | 5 +- .../kernel/arm/fp16/fullconnection_fp16.h | 5 +- .../kernel/arm/fp16/fused_batchnorm_fp16.h | 5 +- .../runtime/kernel/arm/fp16/gather_fp16.cc | 7 +- .../src/runtime/kernel/arm/fp16/gather_fp16.h | 5 +- .../kernel/arm/fp16/group_convolution_fp16.cc | 13 +- .../kernel/arm/fp16/group_convolution_fp16.h | 3 +- .../src/runtime/kernel/arm/fp16/gru_fp16.cc | 4 +- .../src/runtime/kernel/arm/fp16/gru_fp16.h | 5 +- .../kernel/arm/fp16/instance_norm_fp16.h | 5 +- .../src/runtime/kernel/arm/fp16/lstm_fp16.cc | 28 +- .../src/runtime/kernel/arm/fp16/lstm_fp16.h | 7 +- .../kernel/arm/fp16/matmul_base_fp16.h | 5 +- .../src/runtime/kernel/arm/fp16/matmul_fp16.h | 5 +- .../src/runtime/kernel/arm/fp16/pad_fp16.cc | 4 +- .../src/runtime/kernel/arm/fp16/pad_fp16.h | 5 +- .../runtime/kernel/arm/fp16/pooling_fp16.cc | 6 +- .../runtime/kernel/arm/fp16/pooling_fp16.h | 5 +- .../kernel/arm/fp16/quant_dtype_cast_fp16.cc | 5 +- .../kernel/arm/fp16/quant_dtype_cast_fp16.h | 5 +- .../runtime/kernel/arm/fp16/reduce_fp16.cc | 4 +- .../src/runtime/kernel/arm/fp16/reduce_fp16.h | 5 +- .../src/runtime/kernel/arm/fp16/scale_fp16.cc | 4 +- .../src/runtime/kernel/arm/fp16/scale_fp16.h | 5 +- .../runtime/kernel/arm/fp16/softmax_fp16.cc | 4 +- .../runtime/kernel/arm/fp16/softmax_fp16.h | 5 +- .../src/runtime/kernel/arm/fp16/stack_fp16.h | 5 +- .../runtime/kernel/arm/fp16/transpose_fp16.h | 5 +- .../arm/fp16_grad/activation_fp16_grad.cc | 6 +- .../arm/fp16_grad/activation_fp16_grad.h | 5 +- .../fp16_grad/arithmetic_fp16_self_grad.cc | 85 + .../arm/fp16_grad/arithmetic_fp16_self_grad.h | 45 + .../runtime/kernel/arm/fp32/activation_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/adder_fp32.cc | 4 +- .../src/runtime/kernel/arm/fp32/adder_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/addn_fp32.h | 5 +- .../kernel/arm/fp32/arithmetic_compare_fp32.h | 5 +- .../kernel/arm/fp32/arithmetic_fp32.cc | 85 +- .../runtime/kernel/arm/fp32/arithmetic_fp32.h | 23 +- .../kernel/arm/fp32/arithmetic_self_fp32.h | 5 +- .../kernel/arm/fp32/batch_to_space_fp32.cc | 1 - .../kernel/arm/fp32/batch_to_space_fp32.h | 5 +- .../runtime/kernel/arm/fp32/batchnorm_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/bias_fp32.h | 5 +- .../kernel/arm/fp32/broadcast_to_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/cast_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/concat_fp32.h | 5 +- .../kernel/arm/fp32/convolution_1x1_fp32.h | 4 +- .../arm/fp32/convolution_delegate_fp32.cc | 69 +- .../arm/fp32/convolution_delegate_fp32.h | 9 +- .../fp32/convolution_depthwise_3x3_fp32.cc | 6 - .../arm/fp32/convolution_depthwise_3x3_fp32.h | 5 +- .../arm/fp32/convolution_depthwise_fp32.cc | 54 - .../arm/fp32/convolution_depthwise_fp32.h | 6 +- .../convolution_depthwise_indirect_fp32.cc | 5 - .../convolution_depthwise_indirect_fp32.h | 5 +- .../convolution_depthwise_slidewindow_fp32.cc | 4 - .../convolution_depthwise_slidewindow_fp32.h | 5 +- .../kernel/arm/fp32/convolution_fp32.cc | 6 +- .../kernel/arm/fp32/convolution_fp32.h | 6 +- .../arm/fp32/convolution_winograd_fp32.cc | 5 - .../arm/fp32/convolution_winograd_fp32.h | 5 +- .../kernel/arm/fp32/crop_and_resize_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/crop_fp32.h | 5 +- .../arm/fp32/deconvolution_depthwise_fp32.cc | 30 - .../arm/fp32/deconvolution_depthwise_fp32.h | 5 +- .../kernel/arm/fp32/deconvolution_fp32.cc | 39 +- .../kernel/arm/fp32/deconvolution_fp32.h | 5 +- .../arm/fp32/deconvolution_winograd_fp32.h | 5 +- .../kernel/arm/fp32/depth_to_space_fp32.h | 5 +- .../arm/fp32/detection_post_process_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/elu_fp32.h | 5 +- .../kernel/arm/fp32/embedding_lookup_fp32.cc | 4 +- .../kernel/arm/fp32/embedding_lookup_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/exp_fp32.cc | 4 +- .../src/runtime/kernel/arm/fp32/exp_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/fill_fp32.h | 5 +- .../kernel/arm/fp32/fullconnection_fp32.h | 5 +- .../kernel/arm/fp32/fused_batchnorm_fp32.h | 5 +- .../runtime/kernel/arm/fp32/gatherNd_fp32.h | 5 +- .../runtime/kernel/arm/fp32/gather_fp32.cc | 9 +- .../src/runtime/kernel/arm/fp32/gather_fp32.h | 6 +- .../kernel/arm/fp32/group_convolution_fp32.cc | 15 +- .../kernel/arm/fp32/group_convolution_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/gru_fp32.cc | 4 +- .../src/runtime/kernel/arm/fp32/gru_fp32.h | 5 +- .../kernel/arm/fp32/instance_norm_fp32.h | 5 +- .../kernel/arm/fp32/invert_permutation_fp32.h | 5 +- .../runtime/kernel/arm/fp32/l2_norm_fp32.cc | 4 +- .../runtime/kernel/arm/fp32/l2_norm_fp32.h | 5 +- .../kernel/arm/fp32/layer_norm_fp32.cc | 4 +- .../runtime/kernel/arm/fp32/layer_norm_fp32.h | 5 +- .../arm/fp32/local_response_norm_fp32.cc | 5 +- .../arm/fp32/local_response_norm_fp32.h | 5 +- .../kernel/arm/fp32/lsh_projection_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/lstm_fp32.cc | 28 +- .../src/runtime/kernel/arm/fp32/lstm_fp32.h | 7 +- .../src/runtime/kernel/arm/fp32/matmul_fp32.h | 5 +- .../kernel/arm/fp32/matmul_fp32_base.h | 5 +- .../arm/fp32/non_max_suppression_fp32.h | 5 +- .../runtime/kernel/arm/fp32/nonzero_fp32.h | 5 +- .../runtime/kernel/arm/fp32/one_hot_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/pad_fp32.cc | 4 +- .../src/runtime/kernel/arm/fp32/pad_fp32.h | 5 +- .../runtime/kernel/arm/fp32/pooling_fp32.cc | 6 +- .../runtime/kernel/arm/fp32/pooling_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/power_fp32.cc | 16 +- .../src/runtime/kernel/arm/fp32/power_fp32.h | 7 +- .../src/runtime/kernel/arm/fp32/prelu_fp32.cc | 4 +- .../src/runtime/kernel/arm/fp32/prelu_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/range_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/rank_fp32.h | 5 +- .../runtime/kernel/arm/fp32/reduce_fp32.cc | 9 +- .../src/runtime/kernel/arm/fp32/reduce_fp32.h | 5 +- .../runtime/kernel/arm/fp32/resize_fp32.cc | 3 - .../src/runtime/kernel/arm/fp32/resize_fp32.h | 5 +- .../runtime/kernel/arm/fp32/reverse_fp32.cc | 6 +- .../runtime/kernel/arm/fp32/reverse_fp32.h | 5 +- .../kernel/arm/fp32/reverse_sequence_fp32.h | 5 +- .../kernel/arm/fp32/roi_pooling_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/scale_fp32.cc | 4 +- .../src/runtime/kernel/arm/fp32/scale_fp32.h | 5 +- .../kernel/arm/fp32/scatter_nd_fp32.cc | 4 +- .../runtime/kernel/arm/fp32/scatter_nd_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/shape_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/size_fp32.h | 5 +- .../runtime/kernel/arm/fp32/skip_gram_fp32.h | 5 +- .../runtime/kernel/arm/fp32/softmax_fp32.cc | 4 +- .../runtime/kernel/arm/fp32/softmax_fp32.h | 5 +- .../kernel/arm/fp32/space_to_batch_fp32.h | 5 +- .../kernel/arm/fp32/space_to_depth_fp32.h | 5 +- .../kernel/arm/fp32/sparse_to_dense_fp32.h | 5 +- .../arm/fp32/tensorlist_fromtensor_fp32.h | 7 +- .../kernel/arm/fp32/tensorlist_getitem_fp32.h | 7 +- .../kernel/arm/fp32/tensorlist_reserve_fp32.h | 7 +- .../arm/fp32/tensorlist_setitem_fp32.cc | 5 +- .../kernel/arm/fp32/tensorlist_setitem_fp32.h | 7 +- .../kernel/arm/fp32/tensorlist_stack_fp32.h | 7 +- .../src/runtime/kernel/arm/fp32/topk_fp32.cc | 31 +- .../src/runtime/kernel/arm/fp32/topk_fp32.h | 8 +- .../runtime/kernel/arm/fp32/transpose_fp32.cc | 30 +- .../runtime/kernel/arm/fp32/transpose_fp32.h | 5 +- .../src/runtime/kernel/arm/fp32/unique_fp32.h | 5 +- .../runtime/kernel/arm/fp32/unstack_fp32.h | 5 +- .../runtime/kernel/arm/fp32/upsample_fp32.cc | 135 -- .../runtime/kernel/arm/fp32/upsample_fp32.h | 43 - .../src/runtime/kernel/arm/fp32/where_fp32.h | 5 +- .../runtime/kernel/arm/fp32/zeroslike_fp32.h | 5 +- .../kernel/arm/fp32_grad/activation_grad.cc | 5 +- .../kernel/arm/fp32_grad/activation_grad.h | 5 +- .../src/runtime/kernel/arm/fp32_grad/adam.cc | 5 +- .../src/runtime/kernel/arm/fp32_grad/adam.h | 5 +- .../kernel/arm/fp32_grad/apply_momentum.cc | 5 +- .../kernel/arm/fp32_grad/apply_momentum.h | 5 +- .../kernel/arm/fp32_grad/arithmetic_grad.cc | 5 +- .../kernel/arm/fp32_grad/arithmetic_grad.h | 5 +- .../arm/fp32_grad/arithmetic_self_grad.cc | 5 +- .../arm/fp32_grad/arithmetic_self_grad.h | 5 +- .../runtime/kernel/arm/fp32_grad/assign.cc | 5 +- .../src/runtime/kernel/arm/fp32_grad/assign.h | 5 +- .../runtime/kernel/arm/fp32_grad/bias_grad.cc | 11 +- .../runtime/kernel/arm/fp32_grad/bias_grad.h | 5 +- .../runtime/kernel/arm/fp32_grad/bn_grad.cc | 11 +- .../runtime/kernel/arm/fp32_grad/bn_grad.h | 5 +- .../kernel/arm/fp32_grad/convolution.cc | 7 +- .../kernel/arm/fp32_grad/convolution.h | 8 +- .../arm/fp32_grad/convolution_grad_filter.cc | 11 +- .../arm/fp32_grad/convolution_grad_filter.h | 5 +- .../arm/fp32_grad/convolution_grad_input.cc | 33 +- .../arm/fp32_grad/convolution_grad_input.h | 5 +- .../fp32_grad/deconvolution_grad_filter.cc | 5 +- .../arm/fp32_grad/deconvolution_grad_filter.h | 5 +- .../runtime/kernel/arm/fp32_grad/dropout.cc | 5 +- .../runtime/kernel/arm/fp32_grad/dropout.h | 5 +- .../kernel/arm/fp32_grad/dropout_grad.cc | 5 +- .../kernel/arm/fp32_grad/dropout_grad.h | 5 +- .../runtime/kernel/arm/fp32_grad/make_tuple.h | 5 +- .../runtime/kernel/arm/fp32_grad/neg_grad.cc | 5 +- .../runtime/kernel/arm/fp32_grad/neg_grad.h | 5 +- .../kernel/arm/fp32_grad/pooling_grad.cc | 12 +- .../kernel/arm/fp32_grad/pooling_grad.h | 5 +- .../kernel/arm/fp32_grad/power_grad.cc | 5 +- .../runtime/kernel/arm/fp32_grad/power_grad.h | 5 +- .../src/runtime/kernel/arm/fp32_grad/sgd.cc | 11 +- .../src/runtime/kernel/arm/fp32_grad/sgd.h | 7 +- .../sigmoid_cross_entropy_with_logits.cc | 11 +- .../sigmoid_cross_entropy_with_logits.h | 5 +- .../sigmoid_cross_entropy_with_logits_grad.cc | 11 +- .../sigmoid_cross_entropy_with_logits_grad.h | 5 +- .../kernel/arm/fp32_grad/smooth_l1_loss.cc | 5 +- .../kernel/arm/fp32_grad/smooth_l1_loss.h | 7 +- .../arm/fp32_grad/smooth_l1_loss_grad.cc | 5 +- .../arm/fp32_grad/smooth_l1_loss_grad.h | 7 +- .../softmax_cross_entropy_with_logits.cc | 13 +- .../softmax_cross_entropy_with_logits.h | 5 +- .../kernel/arm/fp32_grad/softmax_grad.cc | 5 +- .../kernel/arm/fp32_grad/softmax_grad.h | 5 +- ...parse_softmax_cross_entropy_with_logits.cc | 11 +- ...sparse_softmax_cross_entropy_with_logits.h | 5 +- .../kernel/arm/fp32_grad/strided_slice_grad.h | 5 +- .../kernel/arm/fp32_grad/tuple_getitem.cc | 106 -- .../kernel/arm/fp32_grad/tuple_getitem.h | 46 - .../kernel/arm/int8/activation_int8.cc | 15 +- .../src/runtime/kernel/arm/int8/add_int8.cc | 4 +- .../src/runtime/kernel/arm/int8/add_int8.h | 5 +- .../runtime/kernel/arm/int8/argminmax_int8.cc | 8 +- .../runtime/kernel/arm/int8/argminmax_int8.h | 5 +- .../kernel/arm/int8/arithmetic_int8.cc | 19 +- .../runtime/kernel/arm/int8/arithmetic_int8.h | 5 +- .../kernel/arm/int8/arithmetic_self_int8.h | 5 +- .../kernel/arm/int8/batch_to_space_int8.h | 5 +- .../runtime/kernel/arm/int8/batchnorm_int8.h | 5 +- .../runtime/kernel/arm/int8/bias_add_int8.h | 5 +- .../src/runtime/kernel/arm/int8/concat_int8.h | 5 +- .../kernel/arm/int8/convolution_1x1_int8.h | 5 +- .../kernel/arm/int8/convolution_3x3_int8.cc | 5 - .../kernel/arm/int8/convolution_3x3_int8.h | 5 +- .../int8/convolution_depthwise_3x3_int8.cc | 7 +- .../arm/int8/convolution_depthwise_3x3_int8.h | 5 +- .../arm/int8/convolution_depthwise_int8.cc | 57 - .../arm/int8/convolution_depthwise_int8.h | 5 +- .../convolution_depthwise_slidewindow_int8.cc | 70 +- .../convolution_depthwise_slidewindow_int8.h | 6 +- .../kernel/arm/int8/convolution_int8.cc | 87 +- .../kernel/arm/int8/convolution_int8.h | 5 +- .../src/runtime/kernel/arm/int8/crop_int8.h | 5 +- .../arm/int8/deconvolution_depthwise_int8.cc | 30 - .../arm/int8/deconvolution_depthwise_int8.h | 5 +- .../kernel/arm/int8/deconvolution_int8.cc | 34 +- .../kernel/arm/int8/deconvolution_int8.h | 5 +- .../kernel/arm/int8/depth_to_space_int8.h | 5 +- .../arm/int8/detection_post_process_int8.h | 5 +- .../src/runtime/kernel/arm/int8/div_int8.cc | 4 +- .../src/runtime/kernel/arm/int8/div_int8.h | 5 +- .../kernel/arm/int8/fullconnection_int8.h | 5 +- .../runtime/kernel/arm/int8/gatherNd_int8.h | 5 +- .../runtime/kernel/arm/int8/gather_int8.cc | 1 - .../src/runtime/kernel/arm/int8/gather_int8.h | 6 +- .../kernel/arm/int8/group_convolution_int8.cc | 1 - .../kernel/arm/int8/group_convolution_int8.h | 3 +- .../src/runtime/kernel/arm/int8/hswish_int8.h | 5 +- .../runtime/kernel/arm/int8/l2_norm_int8.cc | 4 +- .../runtime/kernel/arm/int8/l2_norm_int8.h | 5 +- .../kernel/arm/int8/layer_norm_int8.cc | 4 +- .../runtime/kernel/arm/int8/layer_norm_int8.h | 5 +- .../kernel/arm/int8/leaky_relu_int8.cc | 4 +- .../runtime/kernel/arm/int8/leaky_relu_int8.h | 5 +- .../kernel/arm/int8/matmul_base_int8.h | 5 +- .../src/runtime/kernel/arm/int8/matmul_int8.h | 5 +- .../src/runtime/kernel/arm/int8/mul_int8.cc | 4 +- .../src/runtime/kernel/arm/int8/mul_int8.h | 5 +- .../src/runtime/kernel/arm/int8/pad_int8.cc | 4 +- .../src/runtime/kernel/arm/int8/pad_int8.h | 5 +- .../runtime/kernel/arm/int8/pooling_int8.cc | 6 +- .../runtime/kernel/arm/int8/pooling_int8.h | 5 +- .../src/runtime/kernel/arm/int8/power_int8.cc | 6 +- .../src/runtime/kernel/arm/int8/power_int8.h | 5 +- .../runtime/kernel/arm/int8/reduce_int8.cc | 37 +- .../src/runtime/kernel/arm/int8/reduce_int8.h | 6 +- .../src/runtime/kernel/arm/int8/relux_int8.h | 15 +- .../runtime/kernel/arm/int8/reshape_int8.h | 5 +- .../src/runtime/kernel/arm/int8/resize_int8.h | 5 +- .../src/runtime/kernel/arm/int8/scale_int8.cc | 4 +- .../src/runtime/kernel/arm/int8/scale_int8.h | 5 +- .../runtime/kernel/arm/int8/sigmoid_int8.h | 5 +- .../src/runtime/kernel/arm/int8/slice_int8.cc | 4 +- .../src/runtime/kernel/arm/int8/slice_int8.h | 5 +- .../runtime/kernel/arm/int8/softmax_int8.cc | 4 +- .../runtime/kernel/arm/int8/softmax_int8.h | 5 +- .../kernel/arm/int8/space_to_batch_int8.h | 5 +- .../src/runtime/kernel/arm/int8/split_int8.h | 5 +- .../runtime/kernel/arm/int8/squeeze_int8.h | 5 +- .../src/runtime/kernel/arm/int8/sub_int8.cc | 38 +- .../src/runtime/kernel/arm/int8/sub_int8.h | 5 +- .../src/runtime/kernel/arm/int8/tanh_int8.h | 5 +- .../src/runtime/kernel/arm/int8/topk_int8.cc | 4 +- .../src/runtime/kernel/arm/int8/topk_int8.h | 5 +- .../runtime/kernel/arm/int8/transpose_int8.cc | 9 + .../runtime/kernel/arm/int8/transpose_int8.h | 5 +- .../runtime/kernel/arm/int8/unsqueeze_int8.h | 5 +- .../kernel/arm/string/extract_feature.cc | 5 +- .../kernel/arm/string/extract_feature.h | 5 +- .../kernel/arm/string/hashtable_lookup.cc | 5 +- .../kernel/arm/string/hashtable_lookup.h | 5 +- .../runtime/kernel/arm/string/normalize.cc | 5 +- .../src/runtime/kernel/arm/string/normalize.h | 5 +- .../src/runtime/kernel/arm/string/predict.cc | 5 +- .../src/runtime/kernel/arm/string/predict.h | 5 +- .../src/runtime/kernel/npu/activation_npu.cc | 3 +- .../src/runtime/kernel/npu/activation_npu.h | 9 +- .../src/runtime/kernel/npu/arithmetic_npu.cc | 37 +- .../src/runtime/kernel/npu/arithmetic_npu.h | 9 +- .../runtime/kernel/npu/arithmetic_self_npu.cc | 7 +- .../runtime/kernel/npu/arithmetic_self_npu.h | 9 +- .../src/runtime/kernel/npu/batchnorm_npu.cc | 2 +- .../src/runtime/kernel/npu/batchnorm_npu.h | 8 +- .../lite/src/runtime/kernel/npu/cast_npu.cc | 10 +- .../lite/src/runtime/kernel/npu/cast_npu.h | 10 +- .../lite/src/runtime/kernel/npu/concat_npu.cc | 2 +- .../lite/src/runtime/kernel/npu/concat_npu.h | 10 +- .../kernel/npu/convolution_base_npu.cc | 2 +- .../runtime/kernel/npu/convolution_base_npu.h | 10 +- .../kernel/npu/convolution_depthwise_npu.cc | 11 +- .../kernel/npu/convolution_depthwise_npu.h | 9 +- .../src/runtime/kernel/npu/convolution_npu.cc | 44 +- .../src/runtime/kernel/npu/convolution_npu.h | 7 +- .../runtime/kernel/npu/deconvolution_npu.cc | 13 +- .../runtime/kernel/npu/deconvolution_npu.h | 7 +- .../src/runtime/kernel/npu/eltwise_npu.cc | 3 +- .../lite/src/runtime/kernel/npu/eltwise_npu.h | 14 +- .../runtime/kernel/npu/fullconnection_npu.h | 5 +- .../lite/src/runtime/kernel/npu/gather_npu.cc | 11 +- .../lite/src/runtime/kernel/npu/gather_npu.h | 10 +- .../runtime/kernel/npu/instance_norm_npu.h | 5 +- .../lite/src/runtime/kernel/npu/matmul_npu.cc | 5 +- .../lite/src/runtime/kernel/npu/matmul_npu.h | 9 +- .../lite/src/runtime/kernel/npu/npu_kernel.h | 28 +- .../lite/src/runtime/kernel/npu/pad_npu.cc | 22 +- .../lite/src/runtime/kernel/npu/pad_npu.h | 15 +- .../src/runtime/kernel/npu/pooling_npu.cc | 12 +- .../lite/src/runtime/kernel/npu/pooling_npu.h | 7 +- .../lite/src/runtime/kernel/npu/reduce_npu.cc | 20 +- .../lite/src/runtime/kernel/npu/reduce_npu.h | 7 +- .../src/runtime/kernel/npu/reshape_npu.cc | 16 +- .../lite/src/runtime/kernel/npu/reshape_npu.h | 9 +- .../lite/src/runtime/kernel/npu/resize_npu.cc | 1 + .../lite/src/runtime/kernel/npu/resize_npu.h | 11 +- .../lite/src/runtime/kernel/npu/scale_npu.cc | 4 +- .../lite/src/runtime/kernel/npu/scale_npu.h | 9 +- .../lite/src/runtime/kernel/npu/shape_npu.cc | 3 +- .../lite/src/runtime/kernel/npu/shape_npu.h | 9 +- .../lite/src/runtime/kernel/npu/slice_npu.cc | 7 +- .../lite/src/runtime/kernel/npu/slice_npu.h | 10 +- .../src/runtime/kernel/npu/softmax_npu.cc | 6 +- .../lite/src/runtime/kernel/npu/softmax_npu.h | 9 +- .../lite/src/runtime/kernel/npu/split_npu.cc | 12 +- .../lite/src/runtime/kernel/npu/split_npu.h | 15 +- .../src/runtime/kernel/npu/squeeze_npu.cc | 6 +- .../lite/src/runtime/kernel/npu/squeeze_npu.h | 17 +- .../runtime/kernel/npu/strided_slice_npu.cc | 13 +- .../runtime/kernel/npu/strided_slice_npu.h | 14 +- .../src/runtime/kernel/npu/transpose_npu.cc | 14 +- .../src/runtime/kernel/npu/transpose_npu.h | 18 +- .../src/runtime/kernel/npu/unsqueeze_npu.cc | 2 +- .../src/runtime/kernel/npu/unsqueeze_npu.h | 15 +- .../runtime/kernel/opencl/cl/activation.cl | 20 +- .../kernel/opencl/cl/arithmeticself.cl | 1 - .../lite/src/runtime/kernel/opencl/cl/cast.cl | 53 +- .../kernel/opencl/cl/conv2d_transpose.cl | 8 +- .../src/runtime/kernel/opencl/cl/reduce.cl | 20 +- .../src/runtime/kernel/opencl/cl/softmax.cl | 8 +- .../runtime/kernel/opencl/kernel/activation.h | 5 +- .../runtime/kernel/opencl/kernel/argminmax.cc | 14 +- .../runtime/kernel/opencl/kernel/argminmax.h | 4 +- .../kernel/opencl/kernel/arithmetic.cc | 55 +- .../kernel/opencl/kernel/arithmetic_self.cc | 11 +- .../kernel/opencl/kernel/arithmetic_self.h | 2 +- .../src/runtime/kernel/opencl/kernel/cast.cc | 82 +- .../src/runtime/kernel/opencl/kernel/cast.h | 2 +- .../runtime/kernel/opencl/kernel/conv2d.cc | 67 +- .../src/runtime/kernel/opencl/kernel/conv2d.h | 8 +- .../kernel/opencl/kernel/conv2d_transpose.cc | 60 +- .../kernel/opencl/kernel/depthwise_conv2d.cc | 19 +- .../kernel/opencl/kernel/depthwise_conv2d.h | 6 +- .../kernel/opencl/kernel/fusion_eltwise.cc | 9 +- .../kernel/opencl/kernel/fusion_eltwise.h | 14 +- .../runtime/kernel/opencl/kernel/gather.cc | 7 +- .../kernel/opencl/kernel/layer_norm.cc | 6 +- .../runtime/kernel/opencl/kernel/matmul.cc | 21 +- .../src/runtime/kernel/opencl/kernel/pad.cc | 8 +- .../src/runtime/kernel/opencl/kernel/pad.h | 6 +- .../runtime/kernel/opencl/kernel/pooling2d.cc | 9 +- .../runtime/kernel/opencl/kernel/pooling2d.h | 6 +- .../src/runtime/kernel/opencl/kernel/power.cc | 6 +- .../src/runtime/kernel/opencl/kernel/prelu.cc | 6 +- .../runtime/kernel/opencl/kernel/reduce.cc | 54 +- .../src/runtime/kernel/opencl/kernel/reduce.h | 1 + .../runtime/kernel/opencl/kernel/reshape.cc | 4 +- .../runtime/kernel/opencl/kernel/reshape.h | 2 - .../src/runtime/kernel/opencl/kernel/scale.cc | 6 +- .../runtime/kernel/opencl/kernel/softmax.cc | 41 +- .../runtime/kernel/opencl/kernel/softmax.h | 15 +- .../kernel/opencl/kernel/strided_slice.cc | 63 +- .../runtime/kernel/opencl/kernel/to_format.cc | 3 - .../runtime/kernel/opencl/kernel/transpose.cc | 45 +- .../runtime/kernel/opencl/kernel/transpose.h | 2 +- .../runtime/kernel/opencl/kernel/winograd.h | 6 +- .../runtime/kernel/opencl/opencl_fusion.cc | 68 +- .../runtime/kernel/opencl/opencl_kernel.cc | 17 +- .../src/runtime/kernel/opencl/opencl_kernel.h | 23 +- .../runtime/kernel/opencl/opencl_subgraph.cc | 27 +- .../lite/src/runtime/kernel/opencl/utils.cc | 240 ++- .../lite/src/runtime/kernel/opencl/utils.h | 3 + mindspore/lite/src/scheduler.cc | 150 +- mindspore/lite/src/scheduler.h | 5 +- mindspore/lite/src/sub_graph_kernel.cc | 19 +- mindspore/lite/src/sub_graph_kernel.h | 2 +- mindspore/lite/src/train/loss_kernel.h | 5 +- mindspore/lite/src/train/optimizer_kernel.h | 5 +- mindspore/lite/src/train/train_loop.h | 4 +- mindspore/lite/src/train/train_model.cc | 1 - .../src/train/train_populate_parameter.cc | 567 +++--- .../lite/src/train/train_populate_parameter.h | 2 - .../src/train/train_populate_parameter_v0.cc | 661 +++++++ .../src/train/train_populate_parameter_v0.h | 25 + mindspore/lite/src/train/train_session.cc | 16 +- mindspore/lite/src/train/train_session.h | 1 - mindspore/lite/src/train/transfer_session.h | 1 - mindspore/lite/test/CMakeLists.txt | 28 +- .../test/common/import_from_meta_graphT.cc | 175 ++ .../test/common/import_from_meta_graphT.h | 64 + .../lite/test/models_for_process_only.cfg | 4 - mindspore/lite/test/models_gpu_fp32.cfg | 1 - mindspore/lite/test/models_mindspore.cfg | 30 +- .../lite/test/models_mindspore_mixbit.cfg | 2 +- .../lite/test/models_mindspore_train.cfg | 2 +- .../test/models_mindspore_weightquant.cfg | 4 +- mindspore/lite/test/models_ms_train.cfg | 21 +- mindspore/lite/test/models_onnx_fp16.cfg | 6 +- mindspore/lite/test/run_benchmark_nets.sh | 444 ++--- mindspore/lite/test/st/control_flow_test.cc | 40 +- mindspore/lite/test/st/sub_graph_test.cc | 54 +- .../test/ut/nnacl/infer/adam_infer_test.cc | 52 + .../test/ut/nnacl/infer/addn_infer_test.cc | 93 + .../nnacl/infer/apply_momentum_infer_test.cc | 52 + .../test/ut/nnacl/infer/argmax_infer_test.cc | 140 ++ .../test/ut/nnacl/infer/argmin_infer_test.cc | 140 ++ .../infer/arithmetic_compare_infer_test.cc | 173 ++ .../ut/nnacl/infer/arithmetic_infer_test.cc | 173 ++ .../ut/nnacl/infer/assign_add_infer_test.cc | 55 + .../test/ut/nnacl/infer/assign_infer_test.cc | 57 + .../infer/audio_spectrogram_infer_test.cc | 60 + .../nnacl/infer/batch_to_space_infer_test.cc | 187 ++ .../ut/nnacl/infer/bias_grad_infer_test.cc | 60 + .../infer/binary_cross_entropy_infer_test.cc | 87 + .../test/ut/nnacl/infer/bn_grad_infer_test.cc | 83 + .../ut/nnacl/infer/broadcast_to_infer_test.cc | 152 ++ .../test/ut/nnacl/infer/cast_infer_test.cc | 57 + .../test/ut/nnacl/infer/concat_infer_test.cc | 245 +++ .../infer/constant_of_shape_infer_test.cc | 63 + .../infer/conv2d_grad_filter_infer_test.cc | 54 + .../infer/conv2d_grad_input_infer_test.cc | 54 + .../test/ut/nnacl/infer/conv2d_infer_test.cc | 540 ++++++ .../nnacl/infer/crop_and_resize_infer_test.cc | 126 ++ .../test/ut/nnacl/infer/crop_infer_test.cc | 62 + .../custom_extract_features_infer_test.cc | 96 + .../infer/custom_normalize_infer_test.cc | 86 + .../nnacl/infer/custom_predict_infer_test.cc | 58 + .../ut/nnacl/infer/deconv2d_infer_test.cc | 172 ++ .../infer/dedepthwise_conv2d_infer_test.cc | 175 ++ .../nnacl/infer/depth_to_space_infer_test.cc | 181 ++ .../infer/depthwise_conv2d_infer_test.cc | 551 ++++++ .../detection_post_process_infer_test.cc | 83 + .../ut/nnacl/infer/dropout_grad_infer_test.cc | 57 + .../infer/embedding_lookup_infer_test.cc | 68 + .../ut/nnacl/infer/expand_dims_infer_test.cc | 126 ++ .../ut/nnacl/infer/fft_imag_infer_test.cc | 59 + .../test/ut/nnacl/infer/fill_infer_test.cc | 140 ++ .../ut/nnacl/infer/flatten_grad_infer_test.cc | 57 + .../test/ut/nnacl/infer/flatten_infer_test.cc | 128 ++ .../nnacl/infer/full_connection_infer_test.cc | 125 ++ .../nnacl/infer/fused_batchnorm_infer_test.cc | 64 + .../test/ut/nnacl/infer/gather_infer_test.cc | 194 ++ .../ut/nnacl/infer/gather_nd_infer_test.cc | 187 ++ .../group_conv2d_grad_input_infer_test.cc | 54 + .../test/ut/nnacl/infer/gru_infer_test.cc | 134 ++ .../infer/hashtable_lookup_infer_test.cc | 62 + .../test/ut/nnacl/infer/infer_manager_test.cc | 204 +++ .../infer/invert_permutation_infer_test.cc | 54 + .../ut/nnacl/infer/layer_norm_infer_test.cc | 107 ++ .../nnacl/infer/lsh_projection_infer_test.cc | 126 ++ .../test/ut/nnacl/infer/lstm_infer_test.cc | 79 + .../test/ut/nnacl/infer/matmul_infer_test.cc | 161 ++ .../ut/nnacl/infer/maximum_grad_infer_test.cc | 84 + .../test/ut/nnacl/infer/mean_infer_test.cc | 182 ++ .../test/ut/nnacl/infer/mfcc_infer_test.cc | 62 + .../test/ut/nnacl/infer/one_hot_infer_test.cc | 61 + .../test/ut/nnacl/infer/pad_infer_test.cc | 193 ++ .../ut/nnacl/infer/pooling_grad_infer_test.cc | 69 + .../test/ut/nnacl/infer/pooling_infer_test.cc | 276 +++ .../test/ut/nnacl/infer/power_infer_test.cc | 115 ++ .../infer/quant_dtype_cast_infer_test.cc | 56 + .../random_standard_normal_infer_test.cc | 59 + .../test/ut/nnacl/infer/range_infer_test.cc | 135 ++ .../test/ut/nnacl/infer/rank_infer_test.cc | 53 + .../test/ut/nnacl/infer/reduce_infer_test.cc | 185 ++ .../test/ut/nnacl/infer/reshape_infer_test.cc | 361 ++++ .../test/ut/nnacl/infer/resize_infer_test.cc | 179 ++ .../test/ut/nnacl/infer/rfft_infer_test.cc | 55 + .../ut/nnacl/infer/roi_pooling_infer_test.cc | 62 + .../ut/nnacl/infer/scatter_nd_infer_test.cc | 61 + .../test/ut/nnacl/infer/select_infer_test.cc | 241 +++ .../test/ut/nnacl/infer/sgd_infer_test.cc | 66 + .../test/ut/nnacl/infer/shape_infer_test.cc | 51 + .../test/ut/nnacl/infer/size_infer_test.cc | 55 + .../ut/nnacl/infer/skip_gram_infer_test.cc | 51 + .../test/ut/nnacl/infer/slice_infer_test.cc | 175 ++ .../infer/softmax_cross_entropy_infer_test.cc | 62 + .../test/ut/nnacl/infer/softmax_infer_test.cc | 56 + .../nnacl/infer/space_to_batch_infer_test.cc | 178 ++ .../infer/space_to_batch_nd_infer_test.cc | 179 ++ .../nnacl/infer/space_to_depth_infer_test.cc | 90 + .../nnacl/infer/sparse_to_dense_infer_test.cc | 59 + .../test/ut/nnacl/infer/split_infer_test.cc | 231 +++ .../test/ut/nnacl/infer/squeeze_infer_test.cc | 151 ++ .../test/ut/nnacl/infer/stack_infer_test.cc | 94 + .../nnacl/infer/strided_slice_infer_test.cc | 318 ++++ .../infer/tensorlist_fromtensor_infer_test.cc | 79 + .../infer/tensorlist_getitem_infer_test.cc | 97 + .../infer/tensorlist_reserve_infer_test.cc | 71 + .../infer/tensorlist_setitem_infer_test.cc | 108 ++ .../infer/tensorlist_stack_infer_test.cc | 89 + .../test/ut/nnacl/infer/tile_infer_test.cc | 95 + .../test/ut/nnacl/infer/topk_infer_test.cc | 99 ++ .../ut/nnacl/infer/transpose_infer_test.cc | 62 + .../test/ut/nnacl/infer/unique_infer_test.cc | 56 + .../infer/unsorted_segment_sum_infer_test.cc | 61 + .../ut/nnacl/infer/unsqueeze_infer_test.cc | 205 +++ .../test/ut/nnacl/infer/unstack_infer_test.cc | 58 + .../test/ut/nnacl/infer/where_infer_test.cc | 89 + .../test/ut/nnacl/infer/while_infer_test.cc | 62 + mindspore/lite/test/ut/src/infer_test.cc | 27 +- .../kernel/arm/common/strided_slice_tests.cc | 4 +- .../kernel/arm/fp16/reduce_fp16_tests.cc | 6 +- .../fp16_grad/activation_grad_fp16_test.cc | 54 - .../arithmetic_fp16_self_grad_tests.cc | 87 + .../kernel/arm/fp32/activation_fp32_test.cc | 6 +- .../kernel/arm/fp32/batchnorm_fp32_tests.cc | 6 +- .../arm/fp32/constant_of_shape_fp32_test.cc | 2 +- .../fp32/convolution_depthwise_fp32_tests.cc | 10 +- .../runtime/kernel/arm/fp32/crop_fp32_test.cc | 2 +- .../arm/fp32/deconvolution_fp32_tests.cc | 8 +- .../arm/fp32/detection_post_process_test.cc | 2 +- .../runtime/kernel/arm/fp32/elu_fp32_test.cc | 2 +- .../arm/fp32/embedding_lookup_fp32_test.cc | 2 +- .../arm/fp32/fullconnection_fp32_tests.cc | 16 +- .../kernel/arm/fp32/l2norm_fp32_test.cc | 4 +- .../arm/fp32/lsh_projection_fp32_tests.cc | 6 +- .../kernel/arm/fp32/lstm_fp32_tests.cc | 10 +- .../kernel/arm/fp32/matmul_fp32_tests.cc | 10 +- .../fp32/non_max_suppression_fp32_tests.cc | 2 +- .../kernel/arm/fp32/one_hot_fp32_test.cc | 2 +- .../runtime/kernel/arm/fp32/pad_fp32_test.cc | 6 +- .../kernel/arm/fp32/power_fp32_tests.cc | 4 +- .../kernel/arm/fp32/reduce_fp32_tests.cc | 4 +- .../arm/fp32/resize_bilinear_fp32_tests.cc | 2 +- .../resize_nearest_neighbor_fp32_tests.cc | 2 +- .../arm/fp32/reverse_sequence_fp32_tests.cc | 6 +- .../kernel/arm/fp32/roi_pooling_fp32_tests.cc | 2 +- .../kernel/arm/fp32/scale_fp32_tests.cc | 4 +- .../runtime/kernel/arm/fp32/skip_gram_fp32.cc | 2 +- .../runtime/kernel/arm/fp32/softmax_tests.cc | 4 +- .../arm/fp32/space_to_depth_fp32_tests.cc | 2 +- .../arm/fp32/sparse_to_dense_fp32_tests.cc | 10 +- .../arm/fp32/strided_slice_fp32_tests.cc | 14 +- .../kernel/arm/fp32/tile_fp32_tests.cc | 12 +- .../kernel/arm/fp32/topk_fp32_tests.cc | 4 +- .../kernel/arm/fp32/transpose_fp32_tests.cc | 2 +- .../kernel/arm/fp32/unique_fp32_tests.cc | 2 +- .../kernel/arm/fp32/unstack_fp32_tests.cc | 4 +- .../kernel/arm/fp32/upsample_fp32_tests.cc | 247 --- .../fp32_grad/arithmetic_grad_fp32_tests.cc | 38 +- .../arm/fp32_grad/bias_grad_fp32_tests.cc | 8 +- .../kernel/arm/fp32_grad/bn_grad_fp32_test.cc | 7 +- .../fp32_grad/convolution_grad_fp32_tests.cc | 34 +- .../deconvolution_grad_fp32_tests.cc | 12 +- .../kernel/arm/fp32_grad/network_test.cc | 23 +- .../arm/fp32_grad/pooling_grad_fp32_tests.cc | 35 +- .../softmax_crossentropy_fp32_tests.cc | 5 +- .../arm/fp32_grad/softmax_grad_fp32_tests.cc | 1 - .../runtime/kernel/arm/int8/add_int8_tests.cc | 4 +- .../arm/int8/arithmetic_self_int8_tests.cc | 32 +- .../kernel/arm/int8/batchnorm_int8_test.cc | 4 +- .../kernel/arm/int8/concat_int8_tests.cc | 6 +- .../kernel/arm/int8/conv_1x1_int8_tests.cc | 12 +- .../kernel/arm/int8/crop_int8_tests.cc | 20 +- .../kernel/arm/int8/deconv_int8_tests.cc | 4 +- .../arm/int8/fullconnection_int8_tests.cc | 2 +- .../kernel/arm/int8/gatherNd_int8_test.cc | 2 +- .../kernel/arm/int8/gather_int8_test.cc | 3 +- .../kernel/arm/int8/hswish_int8_tests.cc | 2 +- .../kernel/arm/int8/l2_norm_int8_tests.cc | 8 +- .../kernel/arm/int8/matmul_int8_tests.cc | 4 +- .../runtime/kernel/arm/int8/mul_int8_tests.cc | 30 +- .../runtime/kernel/arm/int8/pad_int8_tests.cc | 6 +- .../kernel/arm/int8/power_int8_tests.cc | 12 +- .../kernel/arm/int8/prelu_int8_tests.cc | 6 +- .../kernel/arm/int8/quant_dtype_cast_tests.cc | 4 +- .../kernel/arm/int8/reduce_int8_tests.cc | 4 +- .../kernel/arm/int8/relux_int8_tests.cc | 4 +- .../kernel/arm/int8/reshape_int8_tests.cc | 4 +- .../arm/int8/resize_bilinear_int8_tests.cc | 2 +- .../resize_nearest_neighbor_int8_tests.cc | 2 +- .../src/runtime/kernel/arm/int8/scale_int8.cc | 4 +- .../kernel/arm/int8/sigmoid_int8_tests.cc | 2 +- .../kernel/arm/int8/slice_int8_tests.cc | 4 +- .../kernel/arm/int8/softmax_int8_tests.cc | 6 +- .../arm/int8/space_to_batch_int8_tests.cc | 2 +- .../kernel/arm/int8/split_int8_tests.cc | 6 +- .../kernel/arm/int8/squeeze_int8_tests.cc | 2 +- .../runtime/kernel/arm/int8/sub_int_tests.cc | 8 +- .../kernel/arm/int8/topk_int8_tests.cc | 4 +- .../kernel/arm/int8/unsqueeze_int8_tests.cc | 2 +- .../runtime/kernel/arm/string/normalize.cc | 2 +- .../runtime/kernel/opencl/argminmax_tests.cc | 30 +- .../runtime/kernel/opencl/arithmetic_tests.cc | 13 +- .../src/runtime/kernel/opencl/cast_tests.cc | 8 +- .../ut/src/runtime/kernel/opencl/common.cc | 20 +- .../src/runtime/kernel/opencl/conv2d_tests.cc | 2 +- .../kernel/opencl/conv2d_transpose_tests.cc | 2 +- .../kernel/opencl/depthwise_conv2d_tests.cc | 2 +- .../src/runtime/kernel/opencl/fill_tests.cc | 8 +- .../runtime/kernel/opencl/layer_norm_tests.cc | 2 +- .../ut/src/runtime/kernel/opencl/pad_tests.cc | 2 +- .../runtime/kernel/opencl/pooling_tests.cc | 2 +- .../src/runtime/kernel/opencl/power_tests.cc | 4 +- .../src/runtime/kernel/opencl/prelu_tests.cc | 2 +- .../src/runtime/kernel/opencl/reduce_tests.cc | 2 +- .../src/runtime/kernel/opencl/scale_tests.cc | 2 +- .../src/runtime/kernel/opencl/slice_tests.cc | 2 +- .../runtime/kernel/opencl/softmax_tests.cc | 2 +- .../runtime/kernel/opencl/to_format_tests.cc | 2 +- mindspore/lite/test/ut/src/scheduler_test.cc | 17 +- .../tflite/tflite_activation_parser_test.cc | 20 +- .../tflite/tflite_argmax_parser_test.cc | 15 +- .../tflite/tflite_argmin_parser_test.cc | 15 +- .../tflite/tflite_arithmetic_parser_test.cc | 21 +- .../tflite_batch_to_space_nd_parser_test.cc | 8 +- .../parser/tflite/tflite_cast_parser_test.cc | 9 +- .../parser/tflite/tflite_conv_parser_test.cc | 30 +- .../tflite/tflite_deconv_parser_test.cc | 31 +- .../tflite_depth_to_space_parser_test.cc | 4 +- .../tflite_depthwise_conv_parser_test.cc | 57 +- .../parser/tflite/tflite_fill_parser_test.cc | 9 +- .../tflite/tflite_gather_parser_test.cc | 10 +- .../tflite/tflite_l2norm_parser_test.cc | 11 +- .../parser/tflite/tflite_lrn_parser_test.cc | 9 +- .../parser/tflite/tflite_pad_parser_test.cc | 9 +- .../tflite/tflite_parsers_test_utils.cc | 13 +- .../tflite/tflite_pooling_parser_test.cc | 46 +- .../tflite/tflite_reduce_parser_test.cc | 52 +- .../tflite/tflite_reshape_parser_test.cc | 9 +- .../tflite/tflite_resize_parser_test.cc | 16 +- .../tflite/tflite_reverse_parser_test.cc | 10 +- .../tflite_reverse_sequence_parser_test.cc | 5 +- .../parser/tflite/tflite_slice_parser_test.cc | 12 +- .../tflite/tflite_softmax_parser_test.cc | 10 +- .../tflite_space_to_batch_nd_parser_test.cc | 8 +- .../tflite_space_to_depth_parser_test.cc | 4 +- .../parser/tflite/tflite_split_parser_test.cc | 10 +- .../tflite/tflite_split_v_parser_test.cc | 10 +- .../parser/tflite/tflite_stack_parser_test.cc | 5 +- .../tflite_strided_slice_parser_test.cc | 16 +- .../parser/tflite/tflite_tile_parser_test.cc | 12 +- .../tflite/tflite_topk_v2_parser_test.cc | 9 +- .../tflite/tflite_transpose_parser_test.cc | 9 +- .../tflite/tflite_unique_parser_test.cc | 7 +- .../tflite/tflite_unstack_parser_test.cc | 3 +- .../fusion/constant_folding_fusion_test.cc | 101 +- .../fusion/conv_activation_fusion_test.cc | 62 +- .../fusion/conv_biasadd_fusion_test.cc | 52 +- .../optimizer/fusion/conv_bn_fusion_test.cc | 50 +- .../fusion/conv_scale_fusion_test.cc | 53 +- mindspore/lite/test/win_models.cfg | 12 +- .../lite/tools/anf_exporter/anf_exporter.cc | 239 +-- .../lite/tools/anf_exporter/anf_exporter.h | 19 +- .../lite/tools/anf_importer/CMakeLists.txt | 11 - .../lite/tools/anf_importer/anf_importer.cc | 54 - .../lite/tools/anf_importer/anf_importer.h | 55 - .../anf_importer/import_from_meta_graphT.cc | 280 --- .../anf_importer/import_from_meta_graphT.h | 56 - .../tools/anf_importer/import_from_mindir.cc | 919 ---------- .../tools/anf_importer/import_from_mindir.h | 83 - mindspore/lite/tools/common/flag_parser.cc | 4 +- mindspore/lite/tools/common/flag_parser.h | 2 +- mindspore/lite/tools/common/graph_util.cc | 360 +--- mindspore/lite/tools/common/graph_util.h | 20 +- mindspore/lite/tools/common/node_util.cc | 102 +- mindspore/lite/tools/common/node_util.h | 4 +- mindspore/lite/tools/common/option.h | 2 +- mindspore/lite/tools/common/protobuf_utils.cc | 2 +- mindspore/lite/tools/common/protobuf_utils.h | 2 +- mindspore/lite/tools/common/storage.cc | 2 +- mindspore/lite/tools/common/storage.h | 2 +- mindspore/lite/tools/common/tensor_util.cc | 2 +- mindspore/lite/tools/common/tensor_util.h | 3 +- mindspore/lite/tools/converter/CMakeLists.txt | 16 +- .../lite/tools/converter/anf_transform.cc | 50 +- .../lite/tools/converter/anf_transform.h | 20 +- mindspore/lite/tools/converter/converter.cc | 166 +- mindspore/lite/tools/converter/converter.h | 40 +- .../tools/converter/graphdef_transform.cc | 3 - .../fusion/format_trans_fusion_pass.cc | 15 +- .../fusion/format_trans_fusion_pass.h | 2 +- .../legacy_optimizer/fusion/fusion_pass.cc | 73 +- .../legacy_optimizer/fusion/fusion_pass.h | 4 +- .../legacy_optimizer/fusion/fusion_pattern.cc | 2 +- .../legacy_optimizer/fusion/fusion_pattern.h | 5 +- .../fusion/matmul_biasadd_fusion_pass.cc | 37 +- .../fusion/matmul_biasadd_fusion_pass.h | 13 +- .../fusion/mul_add_fusion_pass.cc | 22 +- .../fusion/mul_add_fusion_pass.h | 2 +- .../fusion/quant_cast_fusion_pass.cc | 8 +- .../fusion/quant_cast_fusion_pass.h | 2 +- .../legacy_optimizer/graph/CMakeLists.txt | 1 - .../graph/batchnorm_convert_scale_pass.cc | 7 +- .../graph/batchnorm_convert_scale_pass.h | 2 +- .../graph/dropout_node_remove_pass.cc | 4 +- .../graph/dropout_node_remove_pass.h | 2 +- .../graph/dtype_trans_pass.cc | 11 +- .../legacy_optimizer/graph/dtype_trans_pass.h | 6 +- .../graph/format_trans_pass.cc | 264 ++- .../graph/format_trans_pass.h | 26 +- .../graph/global_format_transform_pass.cc | 11 +- .../graph/global_format_transform_pass.h | 7 +- .../legacy_optimizer/graph/infershape_pass.cc | 60 +- .../legacy_optimizer/graph/infershape_pass.h | 2 +- .../graph/isolated_node_remove_pass.cc | 2 +- .../graph/isolated_node_remove_pass.h | 2 +- .../graph/nested_loop_expand_pass.cc | 12 +- .../legacy_optimizer/graph/select_pass.cc | 6 +- .../legacy_optimizer/graph/select_pass.h | 2 +- .../graph/subgraph_node_pass.cc | 2 +- .../graph/subgraph_node_pass.h | 2 +- .../graph/subgraph_tensor_pass.cc | 2 +- .../graph/subgraph_tensor_pass.h | 2 +- .../legacy_optimizer/graph/switch_pass.cc | 38 +- .../legacy_optimizer/graph/switch_pass.h | 2 +- .../graph/tensor_quant_pass.cc | 178 +- .../graph/topological_sort_pass.cc | 4 +- .../graph/topological_sort_pass.h | 2 +- .../graph/trans_format_insert_pass.cc | 19 +- .../graph/trans_format_insert_pass.h | 6 +- .../graph/trans_format_remove_pass.cc | 9 +- .../graph/trans_format_remove_pass.h | 2 +- .../graph/unused_node_remove_pass.cc | 44 - .../graph/unused_node_remove_pass.h | 36 - mindspore/lite/tools/converter/model_parser.h | 37 +- mindspore/lite/tools/converter/ops/enter.h | 15 +- mindspore/lite/tools/converter/ops/exit.h | 15 +- .../lite/tools/converter/ops/loop_cond.h | 15 +- .../lite/tools/converter/ops/next_iteration.h | 15 +- .../parser/caffe/caffe_activation_parser.cc | 68 + .../parser/caffe/caffe_activation_parser.h | 60 + .../parser/caffe/caffe_argmax_parser.cc | 38 +- .../parser/caffe/caffe_argmax_parser.h | 4 +- .../parser/caffe/caffe_batchnorm_parser.cc | 30 +- .../parser/caffe/caffe_batchnorm_parser.h | 3 +- .../parser/caffe/caffe_concat_parser.cc | 31 +- .../parser/caffe/caffe_concat_parser.h | 9 +- .../converter/parser/caffe/caffe_converter.cc | 22 - .../converter/parser/caffe/caffe_converter.h | 9 +- .../parser/caffe/caffe_convolution_parser.cc | 119 +- .../parser/caffe/caffe_convolution_parser.h | 5 +- .../parser/caffe/caffe_crop_parser.cc | 26 +- .../parser/caffe/caffe_crop_parser.h | 2 +- .../caffe/caffe_deconvolution_parser.cc | 108 +- .../parser/caffe/caffe_deconvolution_parser.h | 5 +- .../parser/caffe/caffe_eltwise_parser.cc | 26 +- .../parser/caffe/caffe_eltwise_parser.h | 2 +- .../parser/caffe/caffe_elu_parser.cc | 18 +- .../converter/parser/caffe/caffe_elu_parser.h | 2 +- .../parser/caffe/caffe_exp_parser.cc | 28 +- .../converter/parser/caffe/caffe_exp_parser.h | 2 +- .../parser/caffe/caffe_flatten_parser.cc | 16 +- .../parser/caffe/caffe_flatten_parser.h | 8 +- .../parser/caffe/caffe_innerproduct_parser.cc | 27 +- .../parser/caffe/caffe_innerproduct_parser.h | 2 +- .../parser/caffe/caffe_interp_parser.cc | 30 +- .../parser/caffe/caffe_interp_parser.h | 2 +- .../parser/caffe/caffe_model_parser.cc | 43 +- .../parser/caffe/caffe_model_parser.h | 8 +- .../parser/caffe/caffe_node_parser.h | 7 +- .../parser/caffe/caffe_permute_parser.cc | 22 +- .../parser/caffe/caffe_permute_parser.h | 2 +- .../parser/caffe/caffe_pooling_parser.cc | 135 +- .../parser/caffe/caffe_pooling_parser.h | 10 +- .../parser/caffe/caffe_power_parser.cc | 39 +- .../parser/caffe/caffe_power_parser.h | 2 +- .../parser/caffe/caffe_prelu_parser.cc | 24 +- .../parser/caffe/caffe_prelu_parser.h | 2 +- .../parser/caffe/caffe_reduce_parser.cc | 31 +- .../parser/caffe/caffe_reduce_parser.h | 2 +- .../parser/caffe/caffe_relu6_parser.cc | 46 - .../parser/caffe/caffe_relu6_parser.h | 35 - .../parser/caffe/caffe_relu_parser.cc | 46 - .../parser/caffe/caffe_relu_parser.h | 36 - .../parser/caffe/caffe_reshape_parser.cc | 23 +- .../parser/caffe/caffe_reshape_parser.h | 2 +- .../parser/caffe/caffe_scale_parser.cc | 47 +- .../parser/caffe/caffe_scale_parser.h | 5 +- .../parser/caffe/caffe_sigmoid_parser.cc | 39 - .../parser/caffe/caffe_sigmoid_parser.h | 36 - .../parser/caffe/caffe_slice_parser.cc | 29 +- .../parser/caffe/caffe_slice_parser.h | 2 +- .../parser/caffe/caffe_softmax_parser.cc | 20 +- .../parser/caffe/caffe_softmax_parser.h | 2 +- .../parser/caffe/caffe_tanh_parser.cc | 39 - .../parser/caffe/caffe_tanh_parser.h | 36 - .../parser/caffe/caffe_tile_parser.cc | 26 +- .../parser/caffe/caffe_tile_parser.h | 2 +- .../parser/onnx/onnx_activation_parser.cc | 139 ++ .../parser/onnx/onnx_activation_parser.h | 75 + .../parser/onnx/onnx_adder_parser.cc | 20 +- .../converter/parser/onnx/onnx_adder_parser.h | 3 +- .../parser/onnx/onnx_argmax_parser.cc | 26 +- .../parser/onnx/onnx_argmax_parser.h | 2 +- .../onnx/onnx_arithmetic_operation_parser.cc | 601 ++----- .../onnx/onnx_arithmetic_operation_parser.h | 101 +- .../parser/onnx/onnx_batchnorm_parser.cc | 27 +- .../parser/onnx/onnx_batchnorm_parser.h | 2 +- .../parser/onnx/onnx_biasadd_parser.cc | 23 +- .../parser/onnx/onnx_biasadd_parser.h | 2 +- .../converter/parser/onnx/onnx_cast_parser.cc | 24 +- .../converter/parser/onnx/onnx_cast_parser.h | 2 +- .../converter/parser/onnx/onnx_clip_parser.cc | 30 +- .../converter/parser/onnx/onnx_clip_parser.h | 2 +- .../parser/onnx/onnx_concat_parser.cc | 23 +- .../parser/onnx/onnx_concat_parser.h | 2 +- .../onnx/onnx_constant_of_shape_parser.cc | 40 +- .../onnx/onnx_constant_of_shape_parser.h | 2 +- .../parser/onnx/onnx_constant_parser.cc | 31 +- .../parser/onnx/onnx_constant_parser.h | 5 +- .../converter/parser/onnx/onnx_conv_parser.cc | 197 +-- .../converter/parser/onnx/onnx_conv_parser.h | 10 +- .../parser/onnx/onnx_conv_transpose_parser.cc | 99 ++ .../parser/onnx/onnx_conv_transpose_parser.h | 35 + .../converter/parser/onnx/onnx_converter.cc | 25 - .../converter/parser/onnx/onnx_converter.h | 9 +- .../parser/onnx/onnx_deconv_parser.cc | 180 -- .../parser/onnx/onnx_deconv_parser.h | 40 - .../parser/onnx/onnx_depth_to_space_parser.cc | 23 +- .../parser/onnx/onnx_depth_to_space_parser.h | 2 +- .../parser/onnx/onnx_dropout_parser.cc | 23 +- .../parser/onnx/onnx_dropout_parser.h | 2 +- .../converter/parser/onnx/onnx_elu_parser.cc | 49 - .../converter/parser/onnx/onnx_elu_parser.h | 34 - .../converter/parser/onnx/onnx_erf_parser.cc | 21 +- .../converter/parser/onnx/onnx_erf_parser.h | 2 +- .../parser/onnx/onnx_expand_parser.cc | 25 +- .../parser/onnx/onnx_expand_parser.h | 2 +- .../parser/onnx/onnx_flatten_parser.cc | 32 +- .../parser/onnx/onnx_flatten_parser.h | 2 +- .../parser/onnx/onnx_gather_parser.cc | 24 +- .../parser/onnx/onnx_gather_parser.h | 2 +- .../converter/parser/onnx/onnx_gemm_parser.cc | 25 +- .../converter/parser/onnx/onnx_gemm_parser.h | 2 +- .../onnx/onnx_given_tensor_fill_parser.cc | 41 +- .../onnx/onnx_given_tensor_fill_parser.h | 7 +- .../parser/onnx/onnx_identity_parser.cc | 23 +- .../parser/onnx/onnx_identity_parser.h | 2 +- .../converter/parser/onnx/onnx_if_parser.cc | 21 +- .../converter/parser/onnx/onnx_if_parser.h | 2 +- .../parser/onnx/onnx_instance_norm_parser.cc | 23 +- .../parser/onnx/onnx_instance_norm_parser.h | 2 +- .../converter/parser/onnx/onnx_loop_parser.cc | 21 +- .../converter/parser/onnx/onnx_loop_parser.h | 2 +- .../parser/onnx/onnx_lp_norm_parser.cc | 31 +- .../parser/onnx/onnx_lp_norm_parser.h | 2 +- .../converter/parser/onnx/onnx_lrn_parser.cc | 42 +- .../converter/parser/onnx/onnx_lrn_parser.h | 2 +- .../converter/parser/onnx/onnx_lstm_parser.cc | 34 +- .../converter/parser/onnx/onnx_lstm_parser.h | 2 +- .../parser/onnx/onnx_matmul_parser.cc | 24 +- .../parser/onnx/onnx_matmul_parser.h | 2 +- .../parser/onnx/onnx_model_parser.cc | 115 +- .../converter/parser/onnx/onnx_model_parser.h | 52 +- .../converter/parser/onnx/onnx_node_parser.cc | 30 +- .../converter/parser/onnx/onnx_node_parser.h | 17 +- .../onnx/onnx_non_max_suppression_parser.cc | 23 +- .../onnx/onnx_non_max_suppression_parser.h | 2 +- .../parser/onnx/onnx_nonzero_parser.cc | 21 +- .../parser/onnx/onnx_nonzero_parser.h | 2 +- .../parser/onnx/onnx_onehot_parser.cc | 22 +- .../parser/onnx/onnx_onehot_parser.h | 2 +- .../converter/parser/onnx/onnx_pad_parser.cc | 40 +- .../converter/parser/onnx/onnx_pad_parser.h | 2 +- .../converter/parser/onnx/onnx_pool_parser.cc | 167 +- .../converter/parser/onnx/onnx_pool_parser.h | 16 +- .../parser/onnx/onnx_quantize_parser.cc | 30 +- .../parser/onnx/onnx_quantize_parser.h | 2 +- .../parser/onnx/onnx_range_parser.cc | 24 +- .../converter/parser/onnx/onnx_range_parser.h | 2 +- .../parser/onnx/onnx_reduce_parser.cc | 45 +- .../parser/onnx/onnx_reduce_parser.h | 2 +- .../converter/parser/onnx/onnx_relu_parser.cc | 115 -- .../converter/parser/onnx/onnx_relu_parser.h | 42 - .../parser/onnx/onnx_reshape_parser.cc | 27 +- .../parser/onnx/onnx_reshape_parser.h | 2 +- .../parser/onnx/onnx_resize_parser.cc | 81 +- .../parser/onnx/onnx_resize_parser.h | 2 +- .../parser/onnx/onnx_shape_parser.cc | 21 +- .../converter/parser/onnx/onnx_shape_parser.h | 2 +- .../parser/onnx/onnx_sigmoid_parser.cc | 45 - .../parser/onnx/onnx_sigmoid_parser.h | 34 - .../parser/onnx/onnx_slice_parser.cc | 53 +- .../converter/parser/onnx/onnx_slice_parser.h | 2 +- .../parser/onnx/onnx_softmax_parser.cc | 31 +- .../parser/onnx/onnx_softmax_parser.h | 2 +- .../parser/onnx/onnx_space_to_depth_parser.cc | 22 +- .../parser/onnx/onnx_space_to_depth_parser.h | 2 +- .../parser/onnx/onnx_split_parser.cc | 40 +- .../converter/parser/onnx/onnx_split_parser.h | 2 +- .../parser/onnx/onnx_squeeze_parser.cc | 25 +- .../parser/onnx/onnx_squeeze_parser.h | 2 +- .../converter/parser/onnx/onnx_tile_parser.cc | 22 +- .../converter/parser/onnx/onnx_tile_parser.h | 2 +- .../converter/parser/onnx/onnx_topk_parser.cc | 22 +- .../converter/parser/onnx/onnx_topk_parser.h | 2 +- .../parser/onnx/onnx_transpose_parser.cc | 28 +- .../parser/onnx/onnx_transpose_parser.h | 2 +- .../parser/onnx/onnx_unsqueeze_parser.cc | 25 +- .../parser/onnx/onnx_unsqueeze_parser.h | 2 +- .../parser/onnx/onnx_upsample_parser.cc | 33 +- .../parser/onnx/onnx_upsample_parser.h | 2 +- .../parser/tf/tf_activation_parser.cc | 84 +- .../parser/tf/tf_activation_parser.h | 15 +- .../converter/parser/tf/tf_argmax_parser.cc | 47 +- .../converter/parser/tf/tf_argmax_parser.h | 5 +- .../converter/parser/tf/tf_argmin_parser.cc | 47 +- .../converter/parser/tf/tf_argmin_parser.h | 5 +- .../parser/tf/tf_arithmetic_parser.cc | 578 ++++-- .../parser/tf/tf_arithmetic_parser.h | 262 ++- .../parser/tf/tf_arithmetic_self_parser.cc | 95 - .../parser/tf/tf_arithmetic_self_parser.h | 36 - .../converter/parser/tf/tf_assert_parser.cc | 46 +- .../converter/parser/tf/tf_assert_parser.h | 5 +- .../parser/tf/tf_batch_matmul_parser.cc | 49 +- .../parser/tf/tf_batch_matmul_parser.h | 5 +- .../parser/tf/tf_batch_to_space_nd_parser.cc | 41 +- .../parser/tf/tf_batch_to_space_nd_parser.h | 5 +- .../parser/tf/tf_batchnorm_parser.cc | 36 +- .../converter/parser/tf/tf_batchnorm_parser.h | 5 +- .../converter/parser/tf/tf_biasadd_parser.cc | 45 +- .../converter/parser/tf/tf_biasadd_parser.h | 5 +- .../converter/parser/tf/tf_cast_parser.cc | 42 +- .../converter/parser/tf/tf_cast_parser.h | 5 +- .../converter/parser/tf/tf_concat_parser.cc | 54 +- .../converter/parser/tf/tf_concat_parser.h | 5 +- .../parser/tf/tf_conv_base_parser.cc | 72 +- .../converter/parser/tf/tf_conv_base_parser.h | 13 +- .../parser/tf/tf_conv_depthwise_parser.cc | 110 -- .../parser/tf/tf_conv_depthwise_parser.h | 36 - .../converter/parser/tf/tf_conv_parser.cc | 98 +- .../converter/parser/tf/tf_conv_parser.h | 5 +- .../tools/converter/parser/tf/tf_converter.cc | 22 - .../tools/converter/parser/tf/tf_converter.h | 12 +- .../parser/tf/tf_crop_and_resize_parser.cc | 72 +- .../parser/tf/tf_crop_and_resize_parser.h | 5 +- .../converter/parser/tf/tf_deconv_parser.cc | 91 +- .../converter/parser/tf/tf_deconv_parser.h | 5 +- .../converter/parser/tf/tf_dropout_parser.cc | 42 +- .../converter/parser/tf/tf_dropout_parser.h | 5 +- .../converter/parser/tf/tf_enter_parser.cc | 20 +- .../converter/parser/tf/tf_enter_parser.h | 5 +- .../converter/parser/tf/tf_exit_parser.cc | 20 +- .../converter/parser/tf/tf_exit_parser.h | 5 +- .../parser/tf/tf_expand_dims_parser.cc | 54 +- .../parser/tf/tf_expand_dims_parser.h | 5 +- .../converter/parser/tf/tf_fill_parser.cc | 85 +- .../converter/parser/tf/tf_fill_parser.h | 5 +- .../parser/tf/tf_gather_nd_parser.cc | 45 +- .../converter/parser/tf/tf_gather_nd_parser.h | 5 +- .../converter/parser/tf/tf_gather_parser.cc | 65 +- .../converter/parser/tf/tf_gather_parser.h | 5 +- .../tools/converter/parser/tf/tf_if_parser.cc | 34 +- .../tools/converter/parser/tf/tf_if_parser.h | 5 +- .../parser/tf/tf_invert_permutation_parser.cc | 39 +- .../parser/tf/tf_invert_permutation_parser.h | 5 +- .../parser/tf/tf_is_finite_parser.cc | 31 +- .../converter/parser/tf/tf_is_finite_parser.h | 5 +- .../converter/parser/tf/tf_linspace_parser.cc | 33 +- .../converter/parser/tf/tf_linspace_parser.h | 5 +- .../converter/parser/tf/tf_logical_parser.cc | 66 +- .../converter/parser/tf/tf_logical_parser.h | 31 +- .../parser/tf/tf_loop_cond_parser.cc | 20 +- .../converter/parser/tf/tf_loop_cond_parser.h | 5 +- .../converter/parser/tf/tf_matmul_parser.cc | 46 +- .../converter/parser/tf/tf_matmul_parser.h | 5 +- .../converter/parser/tf/tf_merge_parser.cc | 34 +- .../converter/parser/tf/tf_merge_parser.h | 5 +- .../converter/parser/tf/tf_model_parser.cc | 557 +++--- .../converter/parser/tf/tf_model_parser.h | 36 +- .../parser/tf/tf_next_iteration_parser.cc | 20 +- .../parser/tf/tf_next_iteration_parser.h | 5 +- .../converter/parser/tf/tf_node_parser.h | 11 +- .../tf/tf_non_max_suppression_parser.cc | 64 +- .../parser/tf/tf_non_max_suppression_parser.h | 5 +- .../converter/parser/tf/tf_one_hot_parser.cc | 45 +- .../converter/parser/tf/tf_one_hot_parser.h | 5 +- .../converter/parser/tf/tf_pack_parser.cc | 52 +- .../converter/parser/tf/tf_pack_parser.h | 5 +- .../converter/parser/tf/tf_pad_parser.cc | 56 +- .../tools/converter/parser/tf/tf_pad_parser.h | 5 +- .../converter/parser/tf/tf_pool_parser.cc | 101 +- .../converter/parser/tf/tf_pool_parser.h | 21 +- .../parser/tf/tf_ragged_range_parser.cc | 63 +- .../parser/tf/tf_ragged_range_parser.h | 5 +- .../tf/tf_random_standard_normal_parser.cc | 48 +- .../tf/tf_random_standard_normal_parser.h | 5 +- .../converter/parser/tf/tf_range_parser.cc | 60 +- .../converter/parser/tf/tf_range_parser.h | 5 +- .../converter/parser/tf/tf_rank_parser.cc | 37 +- .../converter/parser/tf/tf_rank_parser.h | 5 +- .../converter/parser/tf/tf_reduce_parser.cc | 83 +- .../converter/parser/tf/tf_reduce_parser.h | 5 +- .../converter/parser/tf/tf_reshape_parser.cc | 45 +- .../converter/parser/tf/tf_reshape_parser.h | 5 +- .../converter/parser/tf/tf_resize_parser.cc | 63 +- .../converter/parser/tf/tf_resize_parser.h | 5 +- .../converter/parser/tf/tf_reverse_parser.cc | 57 +- .../converter/parser/tf/tf_reverse_parser.h | 5 +- .../parser/tf/tf_reverse_sequence_parser.cc | 50 +- .../parser/tf/tf_reverse_sequence_parser.h | 5 +- .../converter/parser/tf/tf_round_parser.cc | 59 - .../converter/parser/tf/tf_round_parser.h | 36 - .../converter/parser/tf/tf_rsqrt_parser.cc | 61 - .../converter/parser/tf/tf_rsqrt_parser.h | 38 - .../converter/parser/tf/tf_select_parser.cc | 35 +- .../converter/parser/tf/tf_select_parser.h | 5 +- .../converter/parser/tf/tf_shape_parser.cc | 39 +- .../converter/parser/tf/tf_shape_parser.h | 5 +- .../converter/parser/tf/tf_size_parser.cc | 40 +- .../converter/parser/tf/tf_size_parser.h | 5 +- .../converter/parser/tf/tf_slice_parser.cc | 70 +- .../converter/parser/tf/tf_slice_parser.h | 5 +- .../converter/parser/tf/tf_softmax_parser.cc | 40 +- .../converter/parser/tf/tf_softmax_parser.h | 5 +- .../parser/tf/tf_space_to_batch_nd_parser.cc | 40 +- .../parser/tf/tf_space_to_batch_nd_parser.h | 5 +- .../converter/parser/tf/tf_split_parser.cc | 69 +- .../converter/parser/tf/tf_split_parser.h | 5 +- .../parser/tf/tf_squared_difference_parser.cc | 61 - .../parser/tf/tf_squared_difference_parser.h | 37 - .../converter/parser/tf/tf_squeeze_parser.cc | 44 +- .../converter/parser/tf/tf_squeeze_parser.h | 5 +- .../parser/tf/tf_stride_slice_parser.cc | 62 +- .../parser/tf/tf_stride_slice_parser.h | 5 +- .../converter/parser/tf/tf_switch_parser.cc | 34 +- .../converter/parser/tf/tf_switch_parser.h | 5 +- .../tf/tf_tensor_list_from_tensor_parser.cc | 58 +- .../tf/tf_tensor_list_from_tensor_parser.h | 5 +- .../tf/tf_tensor_list_get_item_parser.cc | 48 +- .../tf/tf_tensor_list_get_item_parser.h | 5 +- .../tf/tf_tensor_list_reserve_parser.cc | 57 +- .../parser/tf/tf_tensor_list_reserve_parser.h | 5 +- .../tf/tf_tensor_list_set_item_parser.cc | 47 +- .../tf/tf_tensor_list_set_item_parser.h | 5 +- .../parser/tf/tf_tensor_list_stack_parser.cc | 55 +- .../parser/tf/tf_tensor_list_stack_parser.h | 5 +- .../converter/parser/tf/tf_tile_parser.cc | 67 +- .../converter/parser/tf/tf_tile_parser.h | 5 +- .../converter/parser/tf/tf_topk_parser.cc | 50 +- .../converter/parser/tf/tf_topk_parser.h | 5 +- .../parser/tf/tf_transpose_parser.cc | 70 +- .../converter/parser/tf/tf_transpose_parser.h | 5 +- .../parser/tf/tf_uniform_real_parser.cc | 44 +- .../parser/tf/tf_uniform_real_parser.h | 5 +- .../lite/tools/converter/parser/tf/tf_util.cc | 29 +- .../lite/tools/converter/parser/tf/tf_util.h | 3 +- .../converter/parser/tf/tf_where_parser.cc | 35 +- .../converter/parser/tf/tf_where_parser.h | 5 +- .../converter/parser/tf/tf_while_parser.cc | 36 +- .../converter/parser/tf/tf_while_parser.h | 5 +- .../parser/tf/tf_zeros_like_parser.cc | 34 +- .../parser/tf/tf_zeros_like_parser.h | 5 +- .../parser/tflite/tflite_activation_parser.cc | 114 +- .../parser/tflite/tflite_activation_parser.h | 62 +- .../parser/tflite/tflite_addn_parser.cc | 24 +- .../parser/tflite/tflite_addn_parser.h | 4 +- .../parser/tflite/tflite_argmax_parser.cc | 48 +- .../parser/tflite/tflite_argmax_parser.h | 4 +- .../parser/tflite/tflite_argmin_parser.cc | 48 +- .../parser/tflite/tflite_argmin_parser.h | 10 +- .../parser/tflite/tflite_arithmetic_parser.cc | 598 +++---- .../parser/tflite/tflite_arithmetic_parser.h | 224 ++- .../tflite/tflite_batch_to_space_parser.cc | 36 +- .../tflite/tflite_batch_to_space_parser.h | 11 +- .../tflite/tflite_broadcast_to_parser.cc | 37 +- .../tflite/tflite_broadcast_to_parser.h | 11 +- .../parser/tflite/tflite_cast_parser.cc | 36 +- .../parser/tflite/tflite_cast_parser.h | 4 +- .../parser/tflite/tflite_concat_parser.cc | 30 +- .../parser/tflite/tflite_concat_parser.h | 4 +- .../parser/tflite/tflite_conv_parser.cc | 139 +- .../parser/tflite/tflite_conv_parser.h | 18 +- .../tflite/tflite_conv_transpose_parser.cc | 78 + .../tflite/tflite_conv_transpose_parser.h | 38 + .../parser/tflite/tflite_converter.cc | 22 - .../parser/tflite/tflite_converter.h | 9 +- .../parser/tflite/tflite_custom_parser.cc | 304 ++-- .../parser/tflite/tflite_custom_parser.h | 46 +- .../parser/tflite/tflite_deconv_parser.cc | 81 - .../parser/tflite/tflite_deconv_parser.h | 36 - .../tflite/tflite_depth_to_space_parser.cc | 23 +- .../tflite/tflite_depth_to_space_parser.h | 4 +- .../tflite/tflite_depthwise_conv_parser.cc | 89 - .../tflite/tflite_depthwise_conv_parser.h | 36 - .../parser/tflite/tflite_dequantize_parser.cc | 42 +- .../parser/tflite/tflite_dequantize_parser.h | 4 +- .../tflite/tflite_expand_dims_parser.cc | 29 +- .../parser/tflite/tflite_expand_dims_parser.h | 4 +- .../parser/tflite/tflite_fill_parser.cc | 33 +- .../parser/tflite/tflite_fill_parser.h | 4 +- .../tflite/tflite_fullyconnected_parser.cc | 34 +- .../tflite/tflite_fullyconnected_parser.h | 4 +- .../parser/tflite/tflite_gather_nd_parser.cc | 21 +- .../parser/tflite/tflite_gather_nd_parser.h | 4 +- .../parser/tflite/tflite_gather_parser.cc | 25 +- .../parser/tflite/tflite_gather_parser.h | 4 +- .../tflite/tflite_hashtable_lookup_parser.cc | 22 +- .../tflite/tflite_hashtable_lookup_parser.h | 4 +- .../parser/tflite/tflite_l2norm_parser.cc | 31 +- .../parser/tflite/tflite_l2norm_parser.h | 4 +- .../parser/tflite/tflite_logical_parser.cc | 64 +- .../parser/tflite/tflite_logical_parser.h | 30 +- .../parser/tflite/tflite_lrn_parser.cc | 30 +- .../parser/tflite/tflite_lrn_parser.h | 4 +- .../tflite/tflite_lsh_projection_parser.cc | 31 +- .../tflite/tflite_lsh_projection_parser.h | 4 +- .../parser/tflite/tflite_matmul_parser.cc | 33 +- .../parser/tflite/tflite_matmul_parser.h | 15 +- .../parser/tflite/tflite_model_parser.cc | 54 +- .../parser/tflite/tflite_model_parser.h | 16 +- .../parser/tflite/tflite_node_parser.h | 101 +- .../parser/tflite/tflite_one_hot_parser.cc | 31 +- .../parser/tflite/tflite_one_hot_parser.h | 4 +- .../parser/tflite/tflite_pad_parser.cc | 53 +- .../parser/tflite/tflite_pad_parser.h | 4 +- .../parser/tflite/tflite_pooling_parser.cc | 108 +- .../parser/tflite/tflite_pooling_parser.h | 22 +- .../parser/tflite/tflite_prelu_parser.cc | 45 - .../parser/tflite/tflite_prelu_parser.h | 38 - .../parser/tflite/tflite_quantize_parser.cc | 47 +- .../parser/tflite/tflite_quantize_parser.h | 4 +- .../parser/tflite/tflite_range_parser.cc | 48 +- .../parser/tflite/tflite_range_parser.h | 4 +- .../parser/tflite/tflite_rank_parser.cc | 22 +- .../parser/tflite/tflite_rank_parser.h | 4 +- .../parser/tflite/tflite_reduce_parser.cc | 54 +- .../parser/tflite/tflite_reduce_parser.h | 4 +- .../parser/tflite/tflite_reshape_parser.cc | 62 +- .../parser/tflite/tflite_reshape_parser.h | 10 +- .../parser/tflite/tflite_resize_parser.cc | 46 +- .../parser/tflite/tflite_resize_parser.h | 6 +- .../parser/tflite/tflite_reverse_parser.cc | 29 +- .../parser/tflite/tflite_reverse_parser.h | 4 +- .../tflite/tflite_reverse_sequence_parser.cc | 26 +- .../tflite/tflite_reverse_sequence_parser.h | 4 +- .../parser/tflite/tflite_scatter_nd_parser.cc | 27 +- .../parser/tflite/tflite_scatter_nd_parser.h | 4 +- .../parser/tflite/tflite_shape_parser.cc | 22 +- .../parser/tflite/tflite_shape_parser.h | 4 +- .../parser/tflite/tflite_skip_gram_parser.cc | 30 +- .../parser/tflite/tflite_skip_gram_parser.h | 4 +- .../parser/tflite/tflite_slice_parser.cc | 42 +- .../parser/tflite/tflite_slice_parser.h | 4 +- .../parser/tflite/tflite_softmax_parser.cc | 24 +- .../parser/tflite/tflite_softmax_parser.h | 10 +- .../tflite/tflite_space_to_batch_nd_parser.cc | 33 +- .../tflite/tflite_space_to_batch_nd_parser.h | 4 +- .../tflite/tflite_space_to_depth_parser.cc | 27 +- .../tflite/tflite_space_to_depth_parser.h | 4 +- .../tflite/tflite_sparse_to_dense_parser.cc | 22 +- .../tflite/tflite_sparse_to_dense_parser.h | 4 +- .../parser/tflite/tflite_split_parser.cc | 53 +- .../parser/tflite/tflite_split_parser.h | 4 +- .../parser/tflite/tflite_split_v_parser.cc | 46 +- .../parser/tflite/tflite_split_v_parser.h | 4 +- .../parser/tflite/tflite_squeeze_parser.cc | 29 +- .../parser/tflite/tflite_squeeze_parser.h | 4 +- .../parser/tflite/tflite_stack_parser.cc | 29 +- .../parser/tflite/tflite_stack_parser.h | 4 +- .../tflite/tflite_strided_slice_parser.cc | 55 +- .../tflite/tflite_strided_slice_parser.h | 4 +- .../parser/tflite/tflite_tile_parser.cc | 21 +- .../parser/tflite/tflite_tile_parser.h | 4 +- .../parser/tflite/tflite_topk_v2_parser.cc | 31 +- .../parser/tflite/tflite_topk_v2_parser.h | 4 +- .../parser/tflite/tflite_transpose_parser.cc | 28 +- .../parser/tflite/tflite_transpose_parser.h | 4 +- .../parser/tflite/tflite_unique_parser.cc | 28 +- .../parser/tflite/tflite_unique_parser.h | 4 +- .../parser/tflite/tflite_unstack_parser.cc | 26 +- .../parser/tflite/tflite_unstack_parser.h | 4 +- .../converter/parser/tflite/tflite_util.cc | 32 +- .../converter/parser/tflite/tflite_util.h | 9 +- .../parser/tflite/tflite_where_parser.cc | 28 +- .../parser/tflite/tflite_where_parser.h | 4 +- .../parser/tflite/tflite_while_parser.cc | 29 +- .../parser/tflite/tflite_while_parser.h | 4 +- .../parser/tflite/tflite_zeros_like_parser.cc | 22 +- .../parser/tflite/tflite_zeros_like_parser.h | 4 +- .../lite/tools/converter/quant_param_holder.h | 160 ++ .../tools/converter/quantizer/bitpacking.h | 2 +- .../converter/quantizer/calc_quant_param.cc | 26 +- .../converter/quantizer/calc_quant_param.h | 2 +- .../converter/quantizer/huffman_encode.cc | 13 +- .../converter/quantizer/huffman_encode.h | 14 +- .../quantizer/post_training_quantizer.cc | 1165 ++++++------ .../quantizer/post_training_quantizer.h | 40 +- .../tools/converter/quantizer/quant_cast.cc | 56 +- .../tools/converter/quantizer/quant_cast.h | 8 +- .../converter/quantizer/quantize_util.cc | 111 +- .../tools/converter/quantizer/quantize_util.h | 39 +- .../tools/converter/quantizer/quantizer.cc | 3 +- .../tools/converter/quantizer/quantizer.h | 3 +- .../converter/quantizer/weight_quantizer.cc | 116 +- .../converter/quantizer/weight_quantizer.h | 7 +- .../tools/cropper/build_cropper_config.sh | 2 - .../lite/tools/optimizer/common/gllo_utils.cc | 271 ++- .../lite/tools/optimizer/common/gllo_utils.h | 19 +- .../optimizer/common/node_pass_extends.cc | 4 +- .../optimizer/common/pass_manager_extends.cc | 4 +- .../optimizer/fusion/batchmatmul_fusion.cc | 80 +- .../optimizer/fusion/batchmatmul_fusion.h | 2 +- .../fusion/bidirection_tf_gru_cell_fusion.cc | 185 +- .../fusion/bidirection_tf_gru_cell_fusion.h | 2 +- .../fusion/constant_folding_fusion.cc | 152 +- .../fusion/constant_folding_fusion.h | 2 +- .../fusion/conv_activation_fusion.cc | 60 +- .../optimizer/fusion/conv_activation_fusion.h | 3 +- .../optimizer/fusion/conv_biasadd_fusion.cc | 59 +- .../optimizer/fusion/conv_biasadd_fusion.h | 2 +- .../tools/optimizer/fusion/conv_bn_fusion.cc | 42 +- .../tools/optimizer/fusion/conv_bn_fusion.h | 2 +- .../optimizer/fusion/conv_conv_fusion.cc | 66 +- .../tools/optimizer/fusion/conv_conv_fusion.h | 3 +- .../optimizer/fusion/conv_scale_fusion.cc | 14 +- .../optimizer/fusion/conv_scale_fusion.h | 2 +- .../optimizer/fusion/conv_transform_fusion.cc | 169 +- .../optimizer/fusion/conv_transform_fusion.h | 2 +- .../fusion/conv_tuple_activation_fusion.cc | 49 +- .../fusion/conv_tuple_activation_fusion.h | 3 +- .../fusion/conv_tuplegetitem_fusion.cc | 10 +- .../fusion/conv_tuplegetitem_fusion.h | 2 +- .../optimizer/fusion/layer_norm_fusion.cc | 122 +- .../optimizer/fusion/layer_norm_fusion.h | 6 +- .../fusion/pooling_activation_fusion.cc | 5 +- .../fusion/pooling_activation_fusion.h | 2 +- .../fusion/quant_dtype_cast_fusion.cc | 7 +- .../fusion/quant_dtype_cast_fusion.h | 3 +- .../optimizer/fusion/sigmoid_mul_fusion.cc | 20 +- .../optimizer/fusion/sigmoid_mul_fusion.h | 2 +- .../optimizer/fusion/tf_lstm_cell_fusion.cc | 40 +- .../optimizer/fusion/tf_lstm_cell_fusion.h | 5 +- .../fusion/tflite_lstm_cell_fusion.cc | 157 +- .../fusion/tflite_lstm_cell_fusion.h | 15 +- .../graph/clip_convert_activation_pass.cc | 52 +- .../graph/clip_convert_activation_pass.h | 3 +- .../graph/functionalize_control_op_pass.cc | 5 +- .../graph/functionalize_control_op_pass.h | 16 +- .../optimizer/graph/functionalize_while.cc | 38 +- .../graph/group_depthwise_op_convert_pass.cc | 65 +- .../graph/group_depthwise_op_convert_pass.h | 3 +- .../lite/tools/optimizer/graph/if_pass.cc | 29 +- .../tools/optimizer/graph/infershape_pass.cc | 77 +- .../tools/optimizer/graph/infershape_pass.h | 2 +- .../optimizer/graph/inputs_adjust_pass.cc | 43 +- .../optimizer/graph/inputs_adjust_pass.h | 4 +- .../optimizer/graph/mindir_adjust_pass.cc | 231 ++- .../optimizer/graph/mindir_adjust_pass.h | 4 +- .../graph/mindir_inputs_adjust_pass.cc | 236 --- .../graph/mindir_inputs_adjust_pass.h | 41 - .../graph/onnx_inputs_adjust_pass.cc | 546 ++---- .../optimizer/graph/onnx_inputs_adjust_pass.h | 22 +- .../optimizer/graph/primitive_adjust_pass.cc | 182 +- .../optimizer/graph/primitive_adjust_pass.h | 76 + .../graph/redundant_op_remove_pass.cc | 33 +- .../graph/redundant_op_remove_pass.h | 2 +- .../optimizer/graph/slice_prepose_pass.cc | 779 +++++---- .../optimizer/graph/slice_prepose_pass.h | 44 +- .../graph/tflite_inputs_adjust_pass.cc | 190 ++ .../graph/tflite_inputs_adjust_pass.h | 37 + .../tflite_inputs_order_exchange_pass.cc | 107 -- .../graph/tflite_inputs_order_exchange_pass.h | 33 - .../graph/unused_cast_node_remove_pass.cc | 9 +- .../graph/unused_cast_node_remove_pass.h | 2 +- .../unused_transpose_node_remove_pass.cc | 76 +- .../graph/unused_transpose_node_remove_pass.h | 2 +- .../graph/update_conv2d_param_pass.cc | 145 +- .../graph/update_conv2d_param_pass.h | 8 +- .../graph/weight_format_hardcode_pass.cc | 157 +- .../graph/weight_format_hardcode_pass.h | 12 +- .../graph/weight_format_transform_pass.cc | 102 +- .../graph/weight_format_transform_pass.h | 7 +- .../lite/tools/optimizer/graph/while_pass.cc | 36 +- .../lite/tools/optimizer/graph/while_pass.h | 2 +- .../lite/tools/schema_gen/CMakeLists.txt | 3 +- mindspore/lite/tools/schema_gen/schema_gen.cc | 23 +- .../lite/tools/schema_gen/schema_type_def.cc | 62 - .../lite/tools/schema_gen/schema_type_def.h | 42 - .../tools/schema_gen/schema_type_register.h | 30 - 2507 files changed, 56236 insertions(+), 55490 deletions(-) create mode 100644 mindspore/lite/micro/cmake/package_cmsis.cmake delete mode 100644 mindspore/lite/micro/cmake/package_micro_ops.cmake create mode 100644 mindspore/lite/micro/cmake/package_nnacl.cmake create mode 100644 mindspore/lite/micro/cmake/package_wrapper.cmake delete mode 100644 mindspore/lite/micro/cmake/wrapper.cmake create mode 100644 mindspore/lite/micro/coder/generator/component/const_blocks/thread_pool.h create mode 100644 mindspore/lite/micro/coder/generator/component/parallel_component.cc create mode 100644 mindspore/lite/micro/coder/generator/component/parallel_component.h create mode 100644 mindspore/lite/micro/coder/opcoders/base/detection_post_process_base_coder.cc create mode 100644 mindspore/lite/micro/coder/opcoders/base/detection_post_process_base_coder.h create mode 100644 mindspore/lite/micro/coder/opcoders/base/resize_base_coder.cc create mode 100644 mindspore/lite/micro/coder/opcoders/base/resize_base_coder.h create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/dequant/de_quant.cc create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/dequant/de_quant.h create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/fp32/biasadd_fp32_coder.cc create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/fp32/biasadd_fp32_coder.h delete mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/fp32/slice_fp32_coder.cc delete mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/fp32/slice_fp32_coder.h create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/fp32/splice_fp32_coder.cc create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/fp32/splice_fp32_coder.h create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/activation_int8_coder.cc create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/batchnorm_int8_coder.cc create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/batchnorm_int8_coder.h create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.cc create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.h create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/detection_post_process_int8_coder.cc create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/detection_post_process_int8_coder.h create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/div_int8_coder.cc create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/div_int8_coder.h create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/relux_int8_coder.cc create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/relux_int8_coder.h create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/resize_int8_coder.cc create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/resize_int8_coder.h create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/sigmoid_int8_coder.cc create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/sigmoid_int8_coder.h create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/sub_int8_coder.cc create mode 100644 mindspore/lite/micro/coder/opcoders/nnacl/int8/sub_int8_coder.h create mode 100644 mindspore/lite/micro/coder/opcoders/parallel.cc create mode 100644 mindspore/lite/micro/coder/opcoders/parallel.h create mode 100644 mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.cc create mode 100644 mindspore/lite/micro/coder/operator_library/CMakeLists.txt create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/base/detection_post_process_base_wrapper.c create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/base/detection_post_process_base_wrapper.h create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/fp32/dequant_int8_to_fp32_wrapper.c create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/fp32/dequant_int8_to_fp32_wrapper.h rename mindspore/lite/micro/{ => coder/operator_library}/wrapper/fp32/matmul_fp32_wrapper.c (100%) create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/fp32/matmul_fp32_wrapper.h create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/add_int8_wrapper.c create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/add_int8_wrapper.h create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/batchnorm_int8_wrapper.c create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/batchnorm_int8_wrapper.h create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/concat_int8_wrapper.c create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/concat_int8_wrapper.h create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_init_int8_wrapper.c create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_init_int8_wrapper.h create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_run_int8_wrapper.c create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_run_int8_wrapper.h create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/conv_init_int8_wrapper.c create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/conv_init_int8_wrapper.h create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_depthwise_int8_wrapper.c create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_depthwise_int8_wrapper.h create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_int8_wrapper.c create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_int8_wrapper.h rename mindspore/lite/micro/{ => coder/operator_library}/wrapper/int8/matmul_int8_wrapper.c (100%) create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/matmul_int8_wrapper.h create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/resize_int8_wrapper.c create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/resize_int8_wrapper.h create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/slice_int8_wrapper.c create mode 100644 mindspore/lite/micro/coder/operator_library/wrapper/int8/slice_int8_wrapper.h delete mode 100644 mindspore/lite/micro/example/mobilenetv2_quant/1_224_224_3.bin create mode 100644 mindspore/lite/micro/example/mobilenetv2_quant/input_1_224_224_3_uint8.bin delete mode 100644 mindspore/lite/micro/wrapper/fp32/matmul_fp32_wrapper.h delete mode 100644 mindspore/lite/micro/wrapper/int8/add_int8_wrapper.c delete mode 100644 mindspore/lite/micro/wrapper/int8/add_int8_wrapper.h delete mode 100644 mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.c delete mode 100644 mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.h delete mode 100644 mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.c delete mode 100644 mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.h delete mode 100644 mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.c delete mode 100644 mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.h delete mode 100644 mindspore/lite/micro/wrapper/int8/matmul_int8_wrapper.h create mode 100644 mindspore/lite/nnacl/fp16_grad/arithmetic_self_grad.c create mode 100644 mindspore/lite/nnacl/fp16_grad/arithmetic_self_grad.h create mode 100644 mindspore/lite/nnacl/fp32/splice_fp32.c create mode 100644 mindspore/lite/nnacl/fp32/splice_fp32.h create mode 100644 mindspore/lite/nnacl/infer/adam_infer.c create mode 100644 mindspore/lite/nnacl/infer/adam_infer.h create mode 100644 mindspore/lite/nnacl/infer/add_sub_grad_infer.c create mode 100644 mindspore/lite/nnacl/infer/add_sub_grad_infer.h create mode 100644 mindspore/lite/nnacl/infer/addn_infer.c create mode 100644 mindspore/lite/nnacl/infer/addn_infer.h create mode 100644 mindspore/lite/nnacl/infer/apply_momentum_infer.c create mode 100644 mindspore/lite/nnacl/infer/apply_momentum_infer.h create mode 100644 mindspore/lite/nnacl/infer/argmin_max_infer.c create mode 100644 mindspore/lite/nnacl/infer/argmin_max_infer.h create mode 100644 mindspore/lite/nnacl/infer/arithmetic_compare_infer.c create mode 100644 mindspore/lite/nnacl/infer/arithmetic_compare_infer.h create mode 100644 mindspore/lite/nnacl/infer/arithmetic_grad_infer.c create mode 100644 mindspore/lite/nnacl/infer/arithmetic_grad_infer.h create mode 100644 mindspore/lite/nnacl/infer/arithmetic_infer.c create mode 100644 mindspore/lite/nnacl/infer/arithmetic_infer.h create mode 100644 mindspore/lite/nnacl/infer/assert_op_infer.c create mode 100644 mindspore/lite/nnacl/infer/assert_op_infer.h create mode 100644 mindspore/lite/nnacl/infer/assign_add_infer.c create mode 100644 mindspore/lite/nnacl/infer/assign_add_infer.h create mode 100644 mindspore/lite/nnacl/infer/assign_infer.c create mode 100644 mindspore/lite/nnacl/infer/assign_infer.h create mode 100644 mindspore/lite/nnacl/infer/audio_spectrogram_infer.c create mode 100644 mindspore/lite/nnacl/infer/audio_spectrogram_infer.h create mode 100644 mindspore/lite/nnacl/infer/batch_to_space_infer.c create mode 100644 mindspore/lite/nnacl/infer/batch_to_space_infer.h create mode 100644 mindspore/lite/nnacl/infer/bias_grad_infer.c create mode 100644 mindspore/lite/nnacl/infer/bias_grad_infer.h create mode 100644 mindspore/lite/nnacl/infer/binary_cross_entropy_infer.c create mode 100644 mindspore/lite/nnacl/infer/binary_cross_entropy_infer.h create mode 100644 mindspore/lite/nnacl/infer/bn_grad_infer.c create mode 100644 mindspore/lite/nnacl/infer/bn_grad_infer.h create mode 100644 mindspore/lite/nnacl/infer/broadcast_to_infer.c create mode 100644 mindspore/lite/nnacl/infer/broadcast_to_infer.h create mode 100644 mindspore/lite/nnacl/infer/cast_infer.c create mode 100644 mindspore/lite/nnacl/infer/cast_infer.h create mode 100644 mindspore/lite/nnacl/infer/common_infer.c create mode 100644 mindspore/lite/nnacl/infer/common_infer.h create mode 100644 mindspore/lite/nnacl/infer/concat_infer.c create mode 100644 mindspore/lite/nnacl/infer/concat_infer.h create mode 100644 mindspore/lite/nnacl/infer/constant_of_shape_infer.c create mode 100644 mindspore/lite/nnacl/infer/constant_of_shape_infer.h create mode 100644 mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.c create mode 100644 mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.h create mode 100644 mindspore/lite/nnacl/infer/conv2d_grad_input_infer.c create mode 100644 mindspore/lite/nnacl/infer/conv2d_grad_input_infer.h create mode 100644 mindspore/lite/nnacl/infer/conv2d_infer.c create mode 100644 mindspore/lite/nnacl/infer/conv2d_infer.h create mode 100644 mindspore/lite/nnacl/infer/crop_and_resize_infer.c create mode 100644 mindspore/lite/nnacl/infer/crop_and_resize_infer.h create mode 100644 mindspore/lite/nnacl/infer/crop_infer.c create mode 100644 mindspore/lite/nnacl/infer/crop_infer.h create mode 100644 mindspore/lite/nnacl/infer/custom_extract_features_infer.c create mode 100644 mindspore/lite/nnacl/infer/custom_extract_features_infer.h create mode 100644 mindspore/lite/nnacl/infer/custom_normalize_infer.c create mode 100644 mindspore/lite/nnacl/infer/custom_normalize_infer.h create mode 100644 mindspore/lite/nnacl/infer/custom_predict_infer.c create mode 100644 mindspore/lite/nnacl/infer/custom_predict_infer.h create mode 100644 mindspore/lite/nnacl/infer/deconv2d_infer.c create mode 100644 mindspore/lite/nnacl/infer/deconv2d_infer.h create mode 100644 mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.c create mode 100644 mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.h create mode 100644 mindspore/lite/nnacl/infer/depth_to_space_infer.c create mode 100644 mindspore/lite/nnacl/infer/depth_to_space_infer.h create mode 100644 mindspore/lite/nnacl/infer/depthwise_conv2d_infer.c create mode 100644 mindspore/lite/nnacl/infer/depthwise_conv2d_infer.h create mode 100644 mindspore/lite/nnacl/infer/detection_post_process_infer.c create mode 100644 mindspore/lite/nnacl/infer/detection_post_process_infer.h create mode 100644 mindspore/lite/nnacl/infer/dropout_grad_infer.c create mode 100644 mindspore/lite/nnacl/infer/dropout_grad_infer.h create mode 100644 mindspore/lite/nnacl/infer/dropout_infer.c create mode 100644 mindspore/lite/nnacl/infer/dropout_infer.h create mode 100644 mindspore/lite/nnacl/infer/embedding_lookup_infer.c create mode 100644 mindspore/lite/nnacl/infer/embedding_lookup_infer.h create mode 100644 mindspore/lite/nnacl/infer/expand_dims_infer.c create mode 100644 mindspore/lite/nnacl/infer/expand_dims_infer.h create mode 100644 mindspore/lite/nnacl/infer/fft_imag_infer.c create mode 100644 mindspore/lite/nnacl/infer/fft_imag_infer.h create mode 100644 mindspore/lite/nnacl/infer/fft_real_infer.c create mode 100644 mindspore/lite/nnacl/infer/fft_real_infer.h create mode 100644 mindspore/lite/nnacl/infer/fill_infer.c create mode 100644 mindspore/lite/nnacl/infer/fill_infer.h create mode 100644 mindspore/lite/nnacl/infer/flatten_grad_infer.c create mode 100644 mindspore/lite/nnacl/infer/flatten_grad_infer.h create mode 100644 mindspore/lite/nnacl/infer/flatten_infer.c create mode 100644 mindspore/lite/nnacl/infer/flatten_infer.h create mode 100644 mindspore/lite/nnacl/infer/full_connection_infer.c create mode 100644 mindspore/lite/nnacl/infer/full_connection_infer.h create mode 100644 mindspore/lite/nnacl/infer/fused_batchnorm_infer.c create mode 100644 mindspore/lite/nnacl/infer/fused_batchnorm_infer.h create mode 100644 mindspore/lite/nnacl/infer/gather_infer.c create mode 100644 mindspore/lite/nnacl/infer/gather_infer.h create mode 100644 mindspore/lite/nnacl/infer/gather_nd_infer.c create mode 100644 mindspore/lite/nnacl/infer/gather_nd_infer.h create mode 100644 mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.c create mode 100644 mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.h create mode 100644 mindspore/lite/nnacl/infer/gru_infer.c create mode 100644 mindspore/lite/nnacl/infer/gru_infer.h create mode 100644 mindspore/lite/nnacl/infer/hashtable_lookup_infer.c create mode 100644 mindspore/lite/nnacl/infer/hashtable_lookup_infer.h create mode 100644 mindspore/lite/nnacl/infer/invert_permutation_infer.c create mode 100644 mindspore/lite/nnacl/infer/invert_permutation_infer.h create mode 100644 mindspore/lite/nnacl/infer/layer_norm_infer.c create mode 100644 mindspore/lite/nnacl/infer/layer_norm_infer.h create mode 100644 mindspore/lite/nnacl/infer/lin_space_infer.c create mode 100644 mindspore/lite/nnacl/infer/lin_space_infer.h create mode 100644 mindspore/lite/nnacl/infer/lsh_projection_infer.c create mode 100644 mindspore/lite/nnacl/infer/lsh_projection_infer.h create mode 100644 mindspore/lite/nnacl/infer/lstm_infer.c create mode 100644 mindspore/lite/nnacl/infer/lstm_infer.h create mode 100644 mindspore/lite/nnacl/infer/matmul_infer.c create mode 100644 mindspore/lite/nnacl/infer/matmul_infer.h create mode 100644 mindspore/lite/nnacl/infer/maximum_grad_infer.c create mode 100644 mindspore/lite/nnacl/infer/maximum_grad_infer.h create mode 100644 mindspore/lite/nnacl/infer/mean_infer.c create mode 100644 mindspore/lite/nnacl/infer/mean_infer.h create mode 100644 mindspore/lite/nnacl/infer/merge_infer.c create mode 100644 mindspore/lite/nnacl/infer/merge_infer.h create mode 100644 mindspore/lite/nnacl/infer/mfcc_infer.c create mode 100644 mindspore/lite/nnacl/infer/mfcc_infer.h create mode 100644 mindspore/lite/nnacl/infer/non_max_suppression_infer.c create mode 100644 mindspore/lite/nnacl/infer/non_max_suppression_infer.h create mode 100644 mindspore/lite/nnacl/infer/one_hot_infer.c create mode 100644 mindspore/lite/nnacl/infer/one_hot_infer.h create mode 100644 mindspore/lite/nnacl/infer/pad_infer.c create mode 100644 mindspore/lite/nnacl/infer/pad_infer.h create mode 100644 mindspore/lite/nnacl/infer/partial_infer.c create mode 100644 mindspore/lite/nnacl/infer/partial_infer.h create mode 100644 mindspore/lite/nnacl/infer/pooling_grad_infer.c create mode 100644 mindspore/lite/nnacl/infer/pooling_grad_infer.h create mode 100644 mindspore/lite/nnacl/infer/pooling_infer.c create mode 100644 mindspore/lite/nnacl/infer/pooling_infer.h create mode 100644 mindspore/lite/nnacl/infer/power_infer.c create mode 100644 mindspore/lite/nnacl/infer/power_infer.h create mode 100644 mindspore/lite/nnacl/infer/prior_box_infer.c create mode 100644 mindspore/lite/nnacl/infer/prior_box_infer.h create mode 100644 mindspore/lite/nnacl/infer/quant_dtype_cast_infer.c create mode 100644 mindspore/lite/nnacl/infer/quant_dtype_cast_infer.h create mode 100644 mindspore/lite/nnacl/infer/random_standard_normal_infer.c create mode 100644 mindspore/lite/nnacl/infer/random_standard_normal_infer.h create mode 100644 mindspore/lite/nnacl/infer/range_infer.c create mode 100644 mindspore/lite/nnacl/infer/range_infer.h create mode 100644 mindspore/lite/nnacl/infer/rank_infer.c create mode 100644 mindspore/lite/nnacl/infer/rank_infer.h create mode 100644 mindspore/lite/nnacl/infer/reduce_infer.c create mode 100644 mindspore/lite/nnacl/infer/reduce_infer.h create mode 100644 mindspore/lite/nnacl/infer/reshape_infer.c create mode 100644 mindspore/lite/nnacl/infer/reshape_infer.h create mode 100644 mindspore/lite/nnacl/infer/resize_infer.c create mode 100644 mindspore/lite/nnacl/infer/resize_infer.h create mode 100644 mindspore/lite/nnacl/infer/rfft_infer.c create mode 100644 mindspore/lite/nnacl/infer/rfft_infer.h create mode 100644 mindspore/lite/nnacl/infer/roi_pooling_infer.c create mode 100644 mindspore/lite/nnacl/infer/roi_pooling_infer.h create mode 100644 mindspore/lite/nnacl/infer/scatter_nd_infer.c create mode 100644 mindspore/lite/nnacl/infer/scatter_nd_infer.h create mode 100644 mindspore/lite/nnacl/infer/select_infer.c create mode 100644 mindspore/lite/nnacl/infer/select_infer.h create mode 100644 mindspore/lite/nnacl/infer/sgd_infer.c create mode 100644 mindspore/lite/nnacl/infer/sgd_infer.h create mode 100644 mindspore/lite/nnacl/infer/shape_infer.c create mode 100644 mindspore/lite/nnacl/infer/shape_infer.h create mode 100644 mindspore/lite/nnacl/infer/size_infer.c create mode 100644 mindspore/lite/nnacl/infer/size_infer.h create mode 100644 mindspore/lite/nnacl/infer/skip_gram_infer.c create mode 100644 mindspore/lite/nnacl/infer/skip_gram_infer.h create mode 100644 mindspore/lite/nnacl/infer/slice_infer.c create mode 100644 mindspore/lite/nnacl/infer/slice_infer.h create mode 100644 mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.c create mode 100644 mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.h create mode 100644 mindspore/lite/nnacl/infer/softmax_infer.c create mode 100644 mindspore/lite/nnacl/infer/softmax_infer.h create mode 100644 mindspore/lite/nnacl/infer/space_to_batch_infer.c create mode 100644 mindspore/lite/nnacl/infer/space_to_batch_infer.h create mode 100644 mindspore/lite/nnacl/infer/space_to_batch_nd_infer.c create mode 100644 mindspore/lite/nnacl/infer/space_to_batch_nd_infer.h create mode 100644 mindspore/lite/nnacl/infer/space_to_depth_infer.c create mode 100644 mindspore/lite/nnacl/infer/space_to_depth_infer.h create mode 100644 mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_infer.c create mode 100644 mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_infer.h create mode 100644 mindspore/lite/nnacl/infer/sparse_to_dense_infer.c create mode 100644 mindspore/lite/nnacl/infer/sparse_to_dense_infer.h create mode 100644 mindspore/lite/nnacl/infer/splice_infer.c create mode 100644 mindspore/lite/nnacl/infer/splice_infer.h create mode 100644 mindspore/lite/nnacl/infer/split_infer.c create mode 100644 mindspore/lite/nnacl/infer/split_infer.h create mode 100644 mindspore/lite/nnacl/infer/squeeze_infer.c create mode 100644 mindspore/lite/nnacl/infer/squeeze_infer.h create mode 100644 mindspore/lite/nnacl/infer/stack_infer.c create mode 100644 mindspore/lite/nnacl/infer/stack_infer.h create mode 100644 mindspore/lite/nnacl/infer/strided_slice_grad_infer.c create mode 100644 mindspore/lite/nnacl/infer/strided_slice_grad_infer.h create mode 100644 mindspore/lite/nnacl/infer/strided_slice_infer.c create mode 100644 mindspore/lite/nnacl/infer/strided_slice_infer.h create mode 100644 mindspore/lite/nnacl/infer/switch_infer.c create mode 100644 mindspore/lite/nnacl/infer/switch_infer.h create mode 100644 mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.c create mode 100644 mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.h create mode 100644 mindspore/lite/nnacl/infer/tensorlist_getitem_infer.c create mode 100644 mindspore/lite/nnacl/infer/tensorlist_getitem_infer.h create mode 100644 mindspore/lite/nnacl/infer/tensorlist_reserve_infer.c create mode 100644 mindspore/lite/nnacl/infer/tensorlist_reserve_infer.h create mode 100644 mindspore/lite/nnacl/infer/tensorlist_setitem_infer.c create mode 100644 mindspore/lite/nnacl/infer/tensorlist_setitem_infer.h create mode 100644 mindspore/lite/nnacl/infer/tensorlist_stack_infer.c create mode 100644 mindspore/lite/nnacl/infer/tensorlist_stack_infer.h create mode 100644 mindspore/lite/nnacl/infer/tile_infer.c create mode 100644 mindspore/lite/nnacl/infer/tile_infer.h create mode 100644 mindspore/lite/nnacl/infer/topk_infer.c create mode 100644 mindspore/lite/nnacl/infer/topk_infer.h create mode 100644 mindspore/lite/nnacl/infer/transpose_infer.c create mode 100644 mindspore/lite/nnacl/infer/transpose_infer.h create mode 100644 mindspore/lite/nnacl/infer/uniform_real_infer.c create mode 100644 mindspore/lite/nnacl/infer/uniform_real_infer.h create mode 100644 mindspore/lite/nnacl/infer/unique_infer.c create mode 100644 mindspore/lite/nnacl/infer/unique_infer.h create mode 100644 mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.c create mode 100644 mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.h create mode 100644 mindspore/lite/nnacl/infer/unsqueeze_infer.c create mode 100644 mindspore/lite/nnacl/infer/unsqueeze_infer.h create mode 100644 mindspore/lite/nnacl/infer/unstack_infer.c create mode 100644 mindspore/lite/nnacl/infer/unstack_infer.h create mode 100644 mindspore/lite/nnacl/infer/where_infer.c create mode 100644 mindspore/lite/nnacl/infer/where_infer.h create mode 100644 mindspore/lite/nnacl/infer/while_infer.c create mode 100644 mindspore/lite/nnacl/infer/while_infer.h create mode 100644 mindspore/lite/nnacl/int8/splice_int8.c create mode 100644 mindspore/lite/nnacl/int8/splice_int8.h create mode 100644 mindspore/lite/nnacl/splice_parameter.h create mode 100644 mindspore/lite/nnacl/tensor_c.h create mode 100644 mindspore/lite/schema/ops_types.fbs create mode 100644 mindspore/lite/src/common/log_util.h create mode 100644 mindspore/lite/src/common/prim_inner.h create mode 100644 mindspore/lite/src/common/prim_util.cc create mode 100644 mindspore/lite/src/common/prim_util.h create mode 100644 mindspore/lite/src/common/tensor_util.cc create mode 100644 mindspore/lite/src/common/tensor_util.h delete mode 100644 mindspore/lite/src/ops/abs.cc delete mode 100644 mindspore/lite/src/ops/abs.h delete mode 100644 mindspore/lite/src/ops/abs_grad.cc delete mode 100644 mindspore/lite/src/ops/abs_grad.h delete mode 100644 mindspore/lite/src/ops/activation.cc delete mode 100644 mindspore/lite/src/ops/activation.h delete mode 100644 mindspore/lite/src/ops/activation_grad.cc delete mode 100644 mindspore/lite/src/ops/activation_grad.h delete mode 100644 mindspore/lite/src/ops/adam.cc delete mode 100644 mindspore/lite/src/ops/adam.h delete mode 100644 mindspore/lite/src/ops/add.cc delete mode 100644 mindspore/lite/src/ops/add.h delete mode 100644 mindspore/lite/src/ops/adder.cc delete mode 100644 mindspore/lite/src/ops/adder.h delete mode 100644 mindspore/lite/src/ops/addn.cc delete mode 100644 mindspore/lite/src/ops/addn.h delete mode 100644 mindspore/lite/src/ops/apply_momentum.cc delete mode 100644 mindspore/lite/src/ops/apply_momentum.h delete mode 100644 mindspore/lite/src/ops/argmax.cc delete mode 100644 mindspore/lite/src/ops/argmax.h delete mode 100644 mindspore/lite/src/ops/argmin.cc delete mode 100644 mindspore/lite/src/ops/argmin.h delete mode 100644 mindspore/lite/src/ops/arithmetic.cc delete mode 100644 mindspore/lite/src/ops/arithmetic.h delete mode 100644 mindspore/lite/src/ops/arithmetic_compare.cc delete mode 100644 mindspore/lite/src/ops/arithmetic_compare.h delete mode 100644 mindspore/lite/src/ops/arithmetic_grad.cc delete mode 100644 mindspore/lite/src/ops/arithmetic_grad.h delete mode 100644 mindspore/lite/src/ops/arithmetic_self.cc delete mode 100644 mindspore/lite/src/ops/arithmetic_self.h delete mode 100644 mindspore/lite/src/ops/assert_op.cc delete mode 100644 mindspore/lite/src/ops/assert_op.h delete mode 100644 mindspore/lite/src/ops/assign.cc delete mode 100644 mindspore/lite/src/ops/assign.h delete mode 100644 mindspore/lite/src/ops/assign_add.cc delete mode 100644 mindspore/lite/src/ops/assign_add.h delete mode 100644 mindspore/lite/src/ops/audio_spectrogram.cc delete mode 100644 mindspore/lite/src/ops/audio_spectrogram.h delete mode 100644 mindspore/lite/src/ops/batch_norm.cc delete mode 100644 mindspore/lite/src/ops/batch_norm.h delete mode 100644 mindspore/lite/src/ops/batch_to_space.cc delete mode 100644 mindspore/lite/src/ops/batch_to_space.h delete mode 100644 mindspore/lite/src/ops/bias_add.cc delete mode 100644 mindspore/lite/src/ops/bias_add.h delete mode 100644 mindspore/lite/src/ops/bias_grad.cc delete mode 100644 mindspore/lite/src/ops/bias_grad.h delete mode 100644 mindspore/lite/src/ops/binary_cross_entropy.cc delete mode 100644 mindspore/lite/src/ops/binary_cross_entropy.h delete mode 100644 mindspore/lite/src/ops/binary_cross_entropy_grad.cc delete mode 100644 mindspore/lite/src/ops/binary_cross_entropy_grad.h delete mode 100644 mindspore/lite/src/ops/bn_grad.cc delete mode 100644 mindspore/lite/src/ops/bn_grad.h delete mode 100644 mindspore/lite/src/ops/broadcast_to.cc delete mode 100644 mindspore/lite/src/ops/broadcast_to.h delete mode 100644 mindspore/lite/src/ops/cast.cc delete mode 100644 mindspore/lite/src/ops/cast.h delete mode 100644 mindspore/lite/src/ops/ceil.cc delete mode 100644 mindspore/lite/src/ops/ceil.h delete mode 100644 mindspore/lite/src/ops/clip.cc delete mode 100644 mindspore/lite/src/ops/clip.h create mode 100644 mindspore/lite/src/ops/compat/v0/broadcast_to_compat_v0.cc delete mode 100644 mindspore/lite/src/ops/compat/v0/broadcat_to_compat_v0.cc create mode 100644 mindspore/lite/src/ops/compat/v0/cast_compat_v0.cc create mode 100644 mindspore/lite/src/ops/compat/v0/expand_dims_compat_v0.cc create mode 100644 mindspore/lite/src/ops/compat/v0/fill_compat_v0.cc create mode 100644 mindspore/lite/src/ops/compat/v0/gather_compat_v0.cc create mode 100644 mindspore/lite/src/ops/compat/v0/nchw2nhwc_compat_v0.cc create mode 100644 mindspore/lite/src/ops/compat/v0/nhwc2nchw_compat_v0.cc create mode 100644 mindspore/lite/src/ops/compat/v0/pad_compat_v0.cc create mode 100644 mindspore/lite/src/ops/compat/v0/permute_compat_v0.cc create mode 100644 mindspore/lite/src/ops/compat/v0/power_compat_v0.cc create mode 100644 mindspore/lite/src/ops/compat/v0/reduce_compat_v0.cc create mode 100644 mindspore/lite/src/ops/compat/v0/slice_compat_v0.cc create mode 100644 mindspore/lite/src/ops/compat/v0/tile_compat_v0.cc create mode 100644 mindspore/lite/src/ops/compat/v0/topk_compat_v0.cc create mode 100644 mindspore/lite/src/ops/compat/v0/transpose_compat_v0.cc delete mode 100644 mindspore/lite/src/ops/concat.cc delete mode 100644 mindspore/lite/src/ops/concat.h delete mode 100644 mindspore/lite/src/ops/constant.h delete mode 100644 mindspore/lite/src/ops/constant_of_shape.cc delete mode 100644 mindspore/lite/src/ops/constant_of_shape.h delete mode 100644 mindspore/lite/src/ops/control_depend.cc delete mode 100644 mindspore/lite/src/ops/control_depend.h delete mode 100644 mindspore/lite/src/ops/conv2d.cc delete mode 100644 mindspore/lite/src/ops/conv2d.h delete mode 100644 mindspore/lite/src/ops/conv2d_grad_filter.cc delete mode 100644 mindspore/lite/src/ops/conv2d_grad_filter.h delete mode 100644 mindspore/lite/src/ops/conv2d_grad_input.cc delete mode 100644 mindspore/lite/src/ops/conv2d_grad_input.h delete mode 100644 mindspore/lite/src/ops/cos.cc delete mode 100644 mindspore/lite/src/ops/cos.h delete mode 100644 mindspore/lite/src/ops/crop.cc delete mode 100644 mindspore/lite/src/ops/crop.h delete mode 100644 mindspore/lite/src/ops/crop_and_resize.cc delete mode 100644 mindspore/lite/src/ops/crop_and_resize.h delete mode 100644 mindspore/lite/src/ops/custom_extract_features.cc delete mode 100644 mindspore/lite/src/ops/custom_extract_features.h delete mode 100644 mindspore/lite/src/ops/custom_normalize.cc delete mode 100644 mindspore/lite/src/ops/custom_normalize.h delete mode 100644 mindspore/lite/src/ops/custom_predict.cc delete mode 100644 mindspore/lite/src/ops/custom_predict.h delete mode 100644 mindspore/lite/src/ops/deconv2d.cc delete mode 100644 mindspore/lite/src/ops/deconv2d.h delete mode 100644 mindspore/lite/src/ops/dedepthwise_conv2d.cc delete mode 100644 mindspore/lite/src/ops/dedepthwise_conv2d.h delete mode 100644 mindspore/lite/src/ops/depend.cc delete mode 100644 mindspore/lite/src/ops/depend.h delete mode 100644 mindspore/lite/src/ops/depth_to_space.cc delete mode 100644 mindspore/lite/src/ops/depth_to_space.h delete mode 100644 mindspore/lite/src/ops/depthwise_conv2d.cc delete mode 100644 mindspore/lite/src/ops/depthwise_conv2d.h delete mode 100644 mindspore/lite/src/ops/dequant.cc delete mode 100644 mindspore/lite/src/ops/dequant.h delete mode 100644 mindspore/lite/src/ops/detection_post_process.cc delete mode 100644 mindspore/lite/src/ops/detection_post_process.h delete mode 100644 mindspore/lite/src/ops/div.cc delete mode 100644 mindspore/lite/src/ops/div.h delete mode 100644 mindspore/lite/src/ops/dropout.cc delete mode 100644 mindspore/lite/src/ops/dropout.h delete mode 100644 mindspore/lite/src/ops/dropout_grad.cc delete mode 100644 mindspore/lite/src/ops/dropout_grad.h delete mode 100644 mindspore/lite/src/ops/eltwise.cc delete mode 100644 mindspore/lite/src/ops/eltwise.h delete mode 100644 mindspore/lite/src/ops/elu.cc delete mode 100644 mindspore/lite/src/ops/elu.h delete mode 100644 mindspore/lite/src/ops/embedding_lookup.cc delete mode 100644 mindspore/lite/src/ops/embedding_lookup.h delete mode 100644 mindspore/lite/src/ops/equal.cc delete mode 100644 mindspore/lite/src/ops/equal.h delete mode 100644 mindspore/lite/src/ops/erf.h delete mode 100644 mindspore/lite/src/ops/exp.cc delete mode 100644 mindspore/lite/src/ops/exp.h delete mode 100644 mindspore/lite/src/ops/expand_dims.cc delete mode 100644 mindspore/lite/src/ops/expand_dims.h delete mode 100644 mindspore/lite/src/ops/fake_quant_with_min_max_vars.cc delete mode 100644 mindspore/lite/src/ops/fake_quant_with_min_max_vars.h delete mode 100644 mindspore/lite/src/ops/fft_imag.cc delete mode 100644 mindspore/lite/src/ops/fft_imag.h delete mode 100644 mindspore/lite/src/ops/fft_real.cc delete mode 100644 mindspore/lite/src/ops/fft_real.h delete mode 100644 mindspore/lite/src/ops/fill.cc delete mode 100644 mindspore/lite/src/ops/fill.h delete mode 100644 mindspore/lite/src/ops/flatten.cc delete mode 100644 mindspore/lite/src/ops/flatten.h delete mode 100644 mindspore/lite/src/ops/flatten_grad.cc delete mode 100644 mindspore/lite/src/ops/flatten_grad.h delete mode 100644 mindspore/lite/src/ops/floor.cc delete mode 100644 mindspore/lite/src/ops/floor.h delete mode 100644 mindspore/lite/src/ops/floor_div.cc delete mode 100644 mindspore/lite/src/ops/floor_div.h delete mode 100644 mindspore/lite/src/ops/floor_mod.cc delete mode 100644 mindspore/lite/src/ops/floor_mod.h delete mode 100644 mindspore/lite/src/ops/full_connection.cc delete mode 100644 mindspore/lite/src/ops/full_connection.h delete mode 100644 mindspore/lite/src/ops/fused_batchnorm.cc delete mode 100644 mindspore/lite/src/ops/fused_batchnorm.h delete mode 100644 mindspore/lite/src/ops/gather.cc delete mode 100644 mindspore/lite/src/ops/gather.h delete mode 100644 mindspore/lite/src/ops/gather_nd.cc delete mode 100644 mindspore/lite/src/ops/gather_nd.h delete mode 100644 mindspore/lite/src/ops/gelu.cc delete mode 100644 mindspore/lite/src/ops/gelu.h delete mode 100644 mindspore/lite/src/ops/greater.cc delete mode 100644 mindspore/lite/src/ops/greater.h delete mode 100644 mindspore/lite/src/ops/greater_equal.cc delete mode 100644 mindspore/lite/src/ops/greater_equal.h delete mode 100644 mindspore/lite/src/ops/group_conv2d_grad_input.cc delete mode 100644 mindspore/lite/src/ops/group_conv2d_grad_input.h delete mode 100644 mindspore/lite/src/ops/gru.cc delete mode 100644 mindspore/lite/src/ops/gru.h delete mode 100644 mindspore/lite/src/ops/hashtable_lookup.cc delete mode 100644 mindspore/lite/src/ops/hashtable_lookup.h delete mode 100644 mindspore/lite/src/ops/identity.h delete mode 100644 mindspore/lite/src/ops/if.h delete mode 100644 mindspore/lite/src/ops/instance_norm.cc delete mode 100644 mindspore/lite/src/ops/instance_norm.h delete mode 100644 mindspore/lite/src/ops/invert_permutation.cc delete mode 100644 mindspore/lite/src/ops/invert_permutation.h delete mode 100644 mindspore/lite/src/ops/is_finite.h delete mode 100644 mindspore/lite/src/ops/l2_norm.cc delete mode 100644 mindspore/lite/src/ops/l2_norm.h delete mode 100644 mindspore/lite/src/ops/layer_norm.cc delete mode 100644 mindspore/lite/src/ops/layer_norm.h delete mode 100644 mindspore/lite/src/ops/leaky_relu.cc delete mode 100644 mindspore/lite/src/ops/leaky_relu.h delete mode 100644 mindspore/lite/src/ops/less.cc delete mode 100644 mindspore/lite/src/ops/less.h delete mode 100644 mindspore/lite/src/ops/less_equal.cc delete mode 100644 mindspore/lite/src/ops/less_equal.h delete mode 100644 mindspore/lite/src/ops/lin_space.cc delete mode 100644 mindspore/lite/src/ops/lin_space.h delete mode 100644 mindspore/lite/src/ops/local_response_normalization.cc delete mode 100644 mindspore/lite/src/ops/local_response_normalization.h delete mode 100644 mindspore/lite/src/ops/log.cc delete mode 100644 mindspore/lite/src/ops/log.h delete mode 100644 mindspore/lite/src/ops/log_grad.cc delete mode 100644 mindspore/lite/src/ops/log_grad.h delete mode 100644 mindspore/lite/src/ops/logical_and.cc delete mode 100644 mindspore/lite/src/ops/logical_and.h delete mode 100644 mindspore/lite/src/ops/logical_not.cc delete mode 100644 mindspore/lite/src/ops/logical_not.h delete mode 100644 mindspore/lite/src/ops/logical_or.cc delete mode 100644 mindspore/lite/src/ops/logical_or.h delete mode 100644 mindspore/lite/src/ops/lrn.cc delete mode 100644 mindspore/lite/src/ops/lrn.h delete mode 100644 mindspore/lite/src/ops/lsh_projection.cc delete mode 100644 mindspore/lite/src/ops/lsh_projection.h delete mode 100644 mindspore/lite/src/ops/lstm.cc delete mode 100644 mindspore/lite/src/ops/lstm.h delete mode 100644 mindspore/lite/src/ops/make_tuple.cc delete mode 100644 mindspore/lite/src/ops/make_tuple.h delete mode 100644 mindspore/lite/src/ops/matmul.cc delete mode 100644 mindspore/lite/src/ops/matmul.h delete mode 100644 mindspore/lite/src/ops/maximum.cc delete mode 100644 mindspore/lite/src/ops/maximum.h delete mode 100644 mindspore/lite/src/ops/maximum_grad.cc delete mode 100644 mindspore/lite/src/ops/maximum_grad.h delete mode 100644 mindspore/lite/src/ops/merge.cc delete mode 100644 mindspore/lite/src/ops/merge.h delete mode 100644 mindspore/lite/src/ops/mfcc.cc delete mode 100644 mindspore/lite/src/ops/mfcc.h delete mode 100644 mindspore/lite/src/ops/minimum.cc delete mode 100644 mindspore/lite/src/ops/minimum.h delete mode 100644 mindspore/lite/src/ops/minimum_grad.cc delete mode 100644 mindspore/lite/src/ops/minimum_grad.h delete mode 100644 mindspore/lite/src/ops/mod.cc delete mode 100644 mindspore/lite/src/ops/mod.h delete mode 100644 mindspore/lite/src/ops/mul.cc delete mode 100644 mindspore/lite/src/ops/mul.h delete mode 100644 mindspore/lite/src/ops/nchw2nhwc.cc delete mode 100644 mindspore/lite/src/ops/nchw2nhwc.h delete mode 100644 mindspore/lite/src/ops/neg.cc delete mode 100644 mindspore/lite/src/ops/neg.h delete mode 100644 mindspore/lite/src/ops/neg_grad.cc delete mode 100644 mindspore/lite/src/ops/neg_grad.h delete mode 100644 mindspore/lite/src/ops/nhwc2nchw.cc delete mode 100644 mindspore/lite/src/ops/nhwc2nchw.h delete mode 100644 mindspore/lite/src/ops/non_max_suppression.cc delete mode 100644 mindspore/lite/src/ops/non_max_suppression.h delete mode 100644 mindspore/lite/src/ops/nonzero.cc delete mode 100644 mindspore/lite/src/ops/nonzero.h delete mode 100644 mindspore/lite/src/ops/not_equal.cc delete mode 100644 mindspore/lite/src/ops/not_equal.h delete mode 100644 mindspore/lite/src/ops/one_hot.cc delete mode 100644 mindspore/lite/src/ops/one_hot.h delete mode 100644 mindspore/lite/src/ops/oneslike.cc delete mode 100644 mindspore/lite/src/ops/oneslike.h create mode 100644 mindspore/lite/src/ops/ops_def.h create mode 100644 mindspore/lite/src/ops/ops_func_declare.h delete mode 100644 mindspore/lite/src/ops/ops_register.h create mode 100644 mindspore/lite/src/ops/ops_utils.cc create mode 100644 mindspore/lite/src/ops/ops_utils.h delete mode 100644 mindspore/lite/src/ops/p_relu.cc delete mode 100644 mindspore/lite/src/ops/p_relu.h delete mode 100644 mindspore/lite/src/ops/pad.cc delete mode 100644 mindspore/lite/src/ops/pad.h delete mode 100644 mindspore/lite/src/ops/partial.cc delete mode 100644 mindspore/lite/src/ops/partial.h delete mode 100644 mindspore/lite/src/ops/pooling.cc delete mode 100644 mindspore/lite/src/ops/pooling.h delete mode 100644 mindspore/lite/src/ops/pooling_grad.cc delete mode 100644 mindspore/lite/src/ops/pooling_grad.h create mode 100644 mindspore/lite/src/ops/populate/audio_spectrogram_populate.cc create mode 100644 mindspore/lite/src/ops/populate/clip_populate.cc create mode 100644 mindspore/lite/src/ops/populate/default_populate.cc create mode 100644 mindspore/lite/src/ops/populate/default_populate.h create mode 100644 mindspore/lite/src/ops/populate/erf_populate.cc create mode 100644 mindspore/lite/src/ops/populate/if_populate.cc create mode 100644 mindspore/lite/src/ops/populate/invert_permutation_populate.cc create mode 100644 mindspore/lite/src/ops/populate/isfinite_populate.cc create mode 100644 mindspore/lite/src/ops/populate/lin_space_populate.cc create mode 100644 mindspore/lite/src/ops/populate/mfcc_populate.cc delete mode 100644 mindspore/lite/src/ops/populate/nchw2nhwc_populate.cc delete mode 100644 mindspore/lite/src/ops/populate/nhwc2nchw_populate.cc create mode 100644 mindspore/lite/src/ops/populate/rank_populate.cc create mode 100644 mindspore/lite/src/ops/populate/size_populate.cc create mode 100644 mindspore/lite/src/ops/populate/uniform_real_populate.cc delete mode 100644 mindspore/lite/src/ops/populate/upsample_populate.cc create mode 100644 mindspore/lite/src/ops/populate/v0/activation_grad_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/activation_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/adam_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/add_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/addn_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/argmax_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/argmin_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/arithmetic_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/arithmetic_populate_v0.h create mode 100644 mindspore/lite/src/ops/populate/v0/arithmetic_self_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/assert_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/assign_add_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/assign_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/batch_norm_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/batch_to_space_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/bias_add_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/bias_grad_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/binary_cross_entropy_grad_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/binary_cross_entropy_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/broadcast_to_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/cast_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/clip_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/common_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/concat_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/constant_of_shape_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/conv2d_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/crop_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/custom_extract_features_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/custom_normalize_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/custom_predict_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/deconv2d_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/dedepthwise_conv2d_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/depth_to_space_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/depthwise_conv2d_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/detection_post_process_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/div_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/eltwise_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/elu_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/embedding_lookup_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/exp_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/expand_dims_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/fill_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/flatten_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/full_connection_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/fused_batchnorm_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/gather_nd_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/gather_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/hashtable_lookup_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/instance_norm_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/l2_norm_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/layer_norm_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/layer_norm_populate_v0.h create mode 100644 mindspore/lite/src/ops/populate/v0/local_response_normalization_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/lsh_projection_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/lstm_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/matmul_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/mul_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/nchw2nhwc_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/nhwc2nchw_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/non_max_suppression_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/one_hot_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/oneslike_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/p_relu_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/pad_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/partial_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/pooling_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/power_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/prior_box_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/quant_dtype_cast_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/range_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/rank_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/reduce_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/reshape_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/resize_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/reverse_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/reverse_sequence_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/roi_pooling_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/scale_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/scatter_nd_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/shape_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/skip_gram_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/slice_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/softmax_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/space_to_batch_nd_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/space_to_batch_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/space_to_depth_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/sparse_to_dense_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/split_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/squared_difference_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/squeeze_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/stack_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/strided_slice_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/strided_slice_populate_v0.h create mode 100644 mindspore/lite/src/ops/populate/v0/sub_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/switch_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/tensorlistfromtensor_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/tensorlistgetitem_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/tensorlistreserve_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/tensorlistsetlitem_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/tensorliststack_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/tile_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/topk_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/transpose_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/unique_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/unsorted_segment_sum_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/unsqueeze_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/unstack_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/where_populate_v0.cc create mode 100644 mindspore/lite/src/ops/populate/v0/while_populate_v0.cc delete mode 100644 mindspore/lite/src/ops/power.cc delete mode 100644 mindspore/lite/src/ops/power.h delete mode 100644 mindspore/lite/src/ops/power_grad.cc delete mode 100644 mindspore/lite/src/ops/power_grad.h delete mode 100644 mindspore/lite/src/ops/primitive_c.cc delete mode 100644 mindspore/lite/src/ops/primitive_c.h delete mode 100644 mindspore/lite/src/ops/prior_box.cc delete mode 100644 mindspore/lite/src/ops/prior_box.h delete mode 100644 mindspore/lite/src/ops/quant.cc delete mode 100644 mindspore/lite/src/ops/quant.h delete mode 100644 mindspore/lite/src/ops/quant_dtype_cast.cc delete mode 100644 mindspore/lite/src/ops/quant_dtype_cast.h delete mode 100644 mindspore/lite/src/ops/random_standard_normal.cc delete mode 100644 mindspore/lite/src/ops/random_standard_normal.h delete mode 100644 mindspore/lite/src/ops/range.cc delete mode 100644 mindspore/lite/src/ops/range.h delete mode 100644 mindspore/lite/src/ops/rank.cc delete mode 100644 mindspore/lite/src/ops/rank.h delete mode 100644 mindspore/lite/src/ops/real_div.cc delete mode 100644 mindspore/lite/src/ops/real_div.h delete mode 100644 mindspore/lite/src/ops/reciprocal.cc delete mode 100644 mindspore/lite/src/ops/reciprocal.h delete mode 100644 mindspore/lite/src/ops/reduce.cc delete mode 100644 mindspore/lite/src/ops/reduce.h delete mode 100644 mindspore/lite/src/ops/reshape.cc delete mode 100644 mindspore/lite/src/ops/reshape.h delete mode 100644 mindspore/lite/src/ops/resize.cc delete mode 100644 mindspore/lite/src/ops/resize.h delete mode 100644 mindspore/lite/src/ops/return.cc delete mode 100644 mindspore/lite/src/ops/return.h delete mode 100644 mindspore/lite/src/ops/reverse.cc delete mode 100644 mindspore/lite/src/ops/reverse.h delete mode 100644 mindspore/lite/src/ops/reverse_sequence.cc delete mode 100644 mindspore/lite/src/ops/reverse_sequence.h delete mode 100644 mindspore/lite/src/ops/rfft.cc delete mode 100644 mindspore/lite/src/ops/rfft.h delete mode 100644 mindspore/lite/src/ops/roi_pooling.cc delete mode 100644 mindspore/lite/src/ops/roi_pooling.h delete mode 100644 mindspore/lite/src/ops/round.cc delete mode 100644 mindspore/lite/src/ops/round.h delete mode 100644 mindspore/lite/src/ops/rsqrt.cc delete mode 100644 mindspore/lite/src/ops/rsqrt.h delete mode 100644 mindspore/lite/src/ops/scale.cc delete mode 100644 mindspore/lite/src/ops/scale.h delete mode 100644 mindspore/lite/src/ops/scatter_nd.cc delete mode 100644 mindspore/lite/src/ops/scatter_nd.h delete mode 100644 mindspore/lite/src/ops/schema_def.h delete mode 100644 mindspore/lite/src/ops/select.cc delete mode 100644 mindspore/lite/src/ops/select.h delete mode 100644 mindspore/lite/src/ops/sgd.cc delete mode 100644 mindspore/lite/src/ops/sgd.h delete mode 100644 mindspore/lite/src/ops/shape.cc delete mode 100644 mindspore/lite/src/ops/shape.h delete mode 100644 mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits.cc delete mode 100644 mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits.h delete mode 100644 mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits_grad.cc delete mode 100644 mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits_grad.h delete mode 100644 mindspore/lite/src/ops/sin.cc delete mode 100644 mindspore/lite/src/ops/sin.h delete mode 100644 mindspore/lite/src/ops/size.cc delete mode 100644 mindspore/lite/src/ops/size.h delete mode 100644 mindspore/lite/src/ops/skip_gram.cc delete mode 100644 mindspore/lite/src/ops/skip_gram.h delete mode 100644 mindspore/lite/src/ops/slice.cc delete mode 100644 mindspore/lite/src/ops/slice.h delete mode 100644 mindspore/lite/src/ops/smooth_l1_loss.cc delete mode 100644 mindspore/lite/src/ops/smooth_l1_loss.h delete mode 100644 mindspore/lite/src/ops/smooth_l1_loss_grad.cc delete mode 100644 mindspore/lite/src/ops/smooth_l1_loss_grad.h delete mode 100644 mindspore/lite/src/ops/softmax.cc delete mode 100644 mindspore/lite/src/ops/softmax.h delete mode 100644 mindspore/lite/src/ops/softmax_cross_entropy.cc delete mode 100644 mindspore/lite/src/ops/softmax_cross_entropy.h delete mode 100644 mindspore/lite/src/ops/space_to_batch.cc delete mode 100644 mindspore/lite/src/ops/space_to_batch.h delete mode 100644 mindspore/lite/src/ops/space_to_batch_nd.cc delete mode 100644 mindspore/lite/src/ops/space_to_batch_nd.h delete mode 100644 mindspore/lite/src/ops/space_to_depth.cc delete mode 100644 mindspore/lite/src/ops/space_to_depth.h delete mode 100644 mindspore/lite/src/ops/sparse_softmax_cross_entropy.cc delete mode 100644 mindspore/lite/src/ops/sparse_softmax_cross_entropy.h delete mode 100644 mindspore/lite/src/ops/sparse_to_dense.cc delete mode 100644 mindspore/lite/src/ops/sparse_to_dense.h delete mode 100644 mindspore/lite/src/ops/split.cc delete mode 100644 mindspore/lite/src/ops/split.h delete mode 100644 mindspore/lite/src/ops/sqrt.cc delete mode 100644 mindspore/lite/src/ops/sqrt.h delete mode 100644 mindspore/lite/src/ops/square.cc delete mode 100644 mindspore/lite/src/ops/square.h delete mode 100644 mindspore/lite/src/ops/squared_difference.cc delete mode 100644 mindspore/lite/src/ops/squared_difference.h delete mode 100644 mindspore/lite/src/ops/squeeze.cc delete mode 100644 mindspore/lite/src/ops/squeeze.h delete mode 100644 mindspore/lite/src/ops/stack.cc delete mode 100644 mindspore/lite/src/ops/stack.h delete mode 100644 mindspore/lite/src/ops/strided_slice.cc delete mode 100644 mindspore/lite/src/ops/strided_slice.h delete mode 100644 mindspore/lite/src/ops/strided_slice_grad.cc delete mode 100644 mindspore/lite/src/ops/strided_slice_grad.h delete mode 100644 mindspore/lite/src/ops/sub.cc delete mode 100644 mindspore/lite/src/ops/sub.h delete mode 100644 mindspore/lite/src/ops/switch.cc delete mode 100644 mindspore/lite/src/ops/switch.h delete mode 100644 mindspore/lite/src/ops/tensorlist_fromtensor.cc delete mode 100644 mindspore/lite/src/ops/tensorlist_fromtensor.h delete mode 100644 mindspore/lite/src/ops/tensorlist_getitem.cc delete mode 100644 mindspore/lite/src/ops/tensorlist_getitem.h delete mode 100644 mindspore/lite/src/ops/tensorlist_reserve.cc delete mode 100644 mindspore/lite/src/ops/tensorlist_reserve.h delete mode 100644 mindspore/lite/src/ops/tensorlist_setitem.cc delete mode 100644 mindspore/lite/src/ops/tensorlist_setitem.h delete mode 100644 mindspore/lite/src/ops/tensorlist_stack.cc delete mode 100644 mindspore/lite/src/ops/tensorlist_stack.h delete mode 100644 mindspore/lite/src/ops/tile.cc delete mode 100644 mindspore/lite/src/ops/tile.h delete mode 100644 mindspore/lite/src/ops/topk.cc delete mode 100644 mindspore/lite/src/ops/topk.h delete mode 100644 mindspore/lite/src/ops/transpose.cc delete mode 100644 mindspore/lite/src/ops/transpose.h delete mode 100644 mindspore/lite/src/ops/tuple_get_item.cc delete mode 100644 mindspore/lite/src/ops/tuple_get_item.h delete mode 100644 mindspore/lite/src/ops/uniform_real.cc delete mode 100644 mindspore/lite/src/ops/uniform_real.h delete mode 100644 mindspore/lite/src/ops/unique.cc delete mode 100644 mindspore/lite/src/ops/unique.h delete mode 100644 mindspore/lite/src/ops/unsorted_segment_sum.cc delete mode 100644 mindspore/lite/src/ops/unsorted_segment_sum.h delete mode 100644 mindspore/lite/src/ops/unsqueeze.cc delete mode 100644 mindspore/lite/src/ops/unsqueeze.h delete mode 100644 mindspore/lite/src/ops/unstack.cc delete mode 100644 mindspore/lite/src/ops/unstack.h delete mode 100644 mindspore/lite/src/ops/upsample.cc delete mode 100644 mindspore/lite/src/ops/upsample.h delete mode 100644 mindspore/lite/src/ops/where.cc delete mode 100644 mindspore/lite/src/ops/where.h delete mode 100644 mindspore/lite/src/ops/while.cc delete mode 100644 mindspore/lite/src/ops/while.h delete mode 100644 mindspore/lite/src/ops/zeros_like.cc delete mode 100644 mindspore/lite/src/ops/zeros_like.h create mode 100644 mindspore/lite/src/runtime/infer_manager.cc create mode 100644 mindspore/lite/src/runtime/infer_manager.h create mode 100644 mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad.cc create mode 100644 mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad.h delete mode 100644 mindspore/lite/src/runtime/kernel/arm/fp32/upsample_fp32.cc delete mode 100644 mindspore/lite/src/runtime/kernel/arm/fp32/upsample_fp32.h delete mode 100644 mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc delete mode 100644 mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.h create mode 100644 mindspore/lite/src/train/train_populate_parameter_v0.cc create mode 100644 mindspore/lite/src/train/train_populate_parameter_v0.h create mode 100644 mindspore/lite/test/common/import_from_meta_graphT.cc create mode 100644 mindspore/lite/test/common/import_from_meta_graphT.h create mode 100644 mindspore/lite/test/ut/nnacl/infer/adam_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/addn_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/apply_momentum_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/argmax_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/argmin_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/arithmetic_compare_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/arithmetic_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/assign_add_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/assign_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/audio_spectrogram_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/batch_to_space_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/bias_grad_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/binary_cross_entropy_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/bn_grad_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/broadcast_to_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/cast_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/concat_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/constant_of_shape_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/conv2d_grad_filter_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/conv2d_grad_input_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/conv2d_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/crop_and_resize_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/crop_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/custom_extract_features_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/custom_normalize_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/custom_predict_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/deconv2d_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/dedepthwise_conv2d_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/depth_to_space_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/depthwise_conv2d_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/detection_post_process_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/dropout_grad_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/embedding_lookup_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/expand_dims_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/fft_imag_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/fill_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/flatten_grad_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/flatten_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/full_connection_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/fused_batchnorm_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/gather_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/gather_nd_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/group_conv2d_grad_input_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/gru_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/hashtable_lookup_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/infer_manager_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/invert_permutation_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/layer_norm_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/lsh_projection_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/lstm_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/matmul_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/maximum_grad_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/mean_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/mfcc_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/one_hot_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/pad_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/pooling_grad_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/pooling_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/power_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/quant_dtype_cast_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/random_standard_normal_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/range_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/rank_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/reduce_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/reshape_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/resize_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/rfft_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/roi_pooling_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/scatter_nd_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/select_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/sgd_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/shape_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/size_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/skip_gram_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/slice_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/softmax_cross_entropy_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/softmax_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/space_to_batch_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/space_to_batch_nd_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/space_to_depth_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/sparse_to_dense_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/split_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/squeeze_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/stack_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/strided_slice_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/tile_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/topk_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/transpose_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/unique_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/unsorted_segment_sum_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/unsqueeze_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/unstack_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/where_infer_test.cc create mode 100644 mindspore/lite/test/ut/nnacl/infer/while_infer_test.cc create mode 100644 mindspore/lite/test/ut/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad_tests.cc delete mode 100644 mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/upsample_fp32_tests.cc delete mode 100644 mindspore/lite/tools/anf_importer/CMakeLists.txt delete mode 100644 mindspore/lite/tools/anf_importer/anf_importer.cc delete mode 100644 mindspore/lite/tools/anf_importer/anf_importer.h delete mode 100644 mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc delete mode 100644 mindspore/lite/tools/anf_importer/import_from_meta_graphT.h delete mode 100644 mindspore/lite/tools/anf_importer/import_from_mindir.cc delete mode 100644 mindspore/lite/tools/anf_importer/import_from_mindir.h delete mode 100644 mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.cc delete mode 100644 mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.h create mode 100644 mindspore/lite/tools/converter/parser/caffe/caffe_activation_parser.cc create mode 100644 mindspore/lite/tools/converter/parser/caffe/caffe_activation_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/caffe/caffe_converter.cc delete mode 100644 mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.h create mode 100644 mindspore/lite/tools/converter/parser/onnx/onnx_activation_parser.cc create mode 100644 mindspore/lite/tools/converter/parser/onnx/onnx_activation_parser.h create mode 100644 mindspore/lite/tools/converter/parser/onnx/onnx_conv_transpose_parser.cc create mode 100644 mindspore/lite/tools/converter/parser/onnx/onnx_conv_transpose_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/onnx/onnx_converter.cc delete mode 100644 mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/tf/tf_arithmetic_self_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/tf/tf_arithmetic_self_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/tf/tf_conv_depthwise_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/tf/tf_conv_depthwise_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/tf/tf_converter.cc delete mode 100644 mindspore/lite/tools/converter/parser/tf/tf_round_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/tf/tf_round_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/tf/tf_rsqrt_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/tf/tf_rsqrt_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/tf/tf_squared_difference_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/tf/tf_squared_difference_parser.h create mode 100644 mindspore/lite/tools/converter/parser/tflite/tflite_conv_transpose_parser.cc create mode 100644 mindspore/lite/tools/converter/parser/tflite/tflite_conv_transpose_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/tflite/tflite_converter.cc delete mode 100644 mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.h delete mode 100644 mindspore/lite/tools/converter/parser/tflite/tflite_prelu_parser.cc delete mode 100644 mindspore/lite/tools/converter/parser/tflite/tflite_prelu_parser.h create mode 100644 mindspore/lite/tools/converter/quant_param_holder.h delete mode 100644 mindspore/lite/tools/optimizer/graph/mindir_inputs_adjust_pass.cc delete mode 100644 mindspore/lite/tools/optimizer/graph/mindir_inputs_adjust_pass.h create mode 100644 mindspore/lite/tools/optimizer/graph/primitive_adjust_pass.h create mode 100644 mindspore/lite/tools/optimizer/graph/tflite_inputs_adjust_pass.cc create mode 100644 mindspore/lite/tools/optimizer/graph/tflite_inputs_adjust_pass.h delete mode 100644 mindspore/lite/tools/optimizer/graph/tflite_inputs_order_exchange_pass.cc delete mode 100644 mindspore/lite/tools/optimizer/graph/tflite_inputs_order_exchange_pass.h delete mode 100644 mindspore/lite/tools/schema_gen/schema_type_def.cc delete mode 100644 mindspore/lite/tools/schema_gen/schema_type_def.h delete mode 100644 mindspore/lite/tools/schema_gen/schema_type_register.h diff --git a/mindspore/lite/CMakeLists.txt b/mindspore/lite/CMakeLists.txt index 522883d328..65128f7fd3 100644 --- a/mindspore/lite/CMakeLists.txt +++ b/mindspore/lite/CMakeLists.txt @@ -72,6 +72,7 @@ add_compile_definitions(NO_DLIB) add_compile_options(-fPIC) if(SUPPORT_TRAIN) + set(BUILD_MINDDATA "full") if(PLATFORM_ARM64) set(RUNTIME_COMPONENT_NAME train-android-aarch64) elseif(PLATFORM_ARM32) diff --git a/mindspore/lite/include/model.h b/mindspore/lite/include/model.h index 91cea9c941..1f73e9e963 100644 --- a/mindspore/lite/include/model.h +++ b/mindspore/lite/include/model.h @@ -19,14 +19,14 @@ #include "include/lite_utils.h" namespace mindspore::lite { -class PrimitiveC; struct MS_API Model { struct Node { String name_; NodeType node_type_; - PrimitiveC *primitive_; + const void *primitive_; Uint32Vector input_indices_; Uint32Vector output_indices_; + int quant_type_; }; using NodePtrVector = std::vector; struct SubGraph { @@ -55,7 +55,7 @@ struct MS_API Model { /// \brief Free meta graph temporary buffer virtual void Free() = 0; - /// \brief Free all temporay buffer.EG: nodes in the model. + /// \brief Free all temporary buffer.EG: nodes in the model. virtual void Destroy() = 0; /// \brief Model destruct, free all memory diff --git a/mindspore/lite/include/version.h b/mindspore/lite/include/version.h index 857452bb6a..476fc20625 100644 --- a/mindspore/lite/include/version.h +++ b/mindspore/lite/include/version.h @@ -22,7 +22,7 @@ namespace mindspore { namespace lite { const int ms_version_major = 1; -const int ms_version_minor = 1; +const int ms_version_minor = 2; const int ms_version_revision = 0; /// \brief Global method to get a version string. diff --git a/mindspore/lite/micro/CMakeLists.txt b/mindspore/lite/micro/CMakeLists.txt index d49c00d88f..912acff2ce 100644 --- a/mindspore/lite/micro/CMakeLists.txt +++ b/mindspore/lite/micro/CMakeLists.txt @@ -9,16 +9,10 @@ include_directories(${CMAKE_BINARY_DIR}) include(${TOP_DIR}/cmake/utils.cmake) include(${TOP_DIR}/cmake/dependency_utils.cmake) include(${TOP_DIR}/cmake/dependency_securec.cmake) +include(${TOP_DIR}/cmake/external_libs/glog.cmake) include(${TOP_DIR}/cmake/external_libs/flatbuffers.cmake) -include(${TOP_DIR}/cmake/external_libs/cmsis.cmake) - -set(FBS_FILES - ${CMAKE_CURRENT_SOURCE_DIR}/../schema/model.fbs - ${CMAKE_CURRENT_SOURCE_DIR}/../schema/ops.fbs - ${CMAKE_CURRENT_SOURCE_DIR}/../schema/model_v0.fbs - ${CMAKE_CURRENT_SOURCE_DIR}/../schema/ops_v0.fbs - ) +file(GLOB FBS_FILES ${CMAKE_CURRENT_SOURCE_DIR}/../schema/*.fbs) ms_build_flatbuffers_lite(FBS_FILES ${CMAKE_CURRENT_SOURCE_DIR}/../schema/ fbs_src @@ -50,6 +44,6 @@ if(ENABLE_ASAN) endif() add_subdirectory(coder) -if(${BUILD_TESTCASES}) +if(BUILD_TESTCASES) add_subdirectory(test) endif() diff --git a/mindspore/lite/micro/cmake/file_list.cmake b/mindspore/lite/micro/cmake/file_list.cmake index 7417c87d5a..e72274f2a5 100644 --- a/mindspore/lite/micro/cmake/file_list.cmake +++ b/mindspore/lite/micro/cmake/file_list.cmake @@ -5,6 +5,9 @@ set(CODER_SRC ${MICRO_DIR}/coder/graph.cc ${MICRO_DIR}/coder/session.cc ${MICRO_DIR}/coder/train.cc + ${MICRO_DIR}/coder/utils/coder_utils.cc + ${MICRO_DIR}/coder/utils/dir_utils.cc + ${MICRO_DIR}/coder/utils/type_cast.cc ) set(CODER_ALLOCATOR_SRC @@ -21,6 +24,11 @@ set(CODER_GENERATOR_SRC ${MICRO_DIR}/coder/generator/component/weight_component.cc ${MICRO_DIR}/coder/generator/component/cmake_component.cc ${MICRO_DIR}/coder/generator/component/train_component.cc + ${MICRO_DIR}/coder/generator/component/parallel_component.cc + ) + +set(MINDSPORE_CORE + ${TOP_DIR}/mindspore/core/gvar/logging_level.cc ) set(CODER_OPCODERS_SRC @@ -28,16 +36,20 @@ set(CODER_OPCODERS_SRC ${MICRO_DIR}/coder/opcoders/op_coder.cc ${MICRO_DIR}/coder/opcoders/op_coder_builder.cc ${MICRO_DIR}/coder/opcoders/op_coder_register.cc + ${MICRO_DIR}/coder/opcoders/parallel.cc #### serializer ${MICRO_DIR}/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.cc ${MICRO_DIR}/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.cc + ${MICRO_DIR}/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.cc #### base coder ${MICRO_DIR}/coder/opcoders/base/conv2d_base_coder.cc ${MICRO_DIR}/coder/opcoders/base/dtype_cast_coder.cc ${MICRO_DIR}/coder/opcoders/base/full_connection_base_coder.cc ${MICRO_DIR}/coder/opcoders/base/quant_dtype_cast_coder.cc ${MICRO_DIR}/coder/opcoders/base/reduce_base_coder.cc + ${MICRO_DIR}/coder/opcoders/base/resize_base_coder.cc ${MICRO_DIR}/coder/opcoders/base/softmax_base_coder.cc + ${MICRO_DIR}/coder/opcoders/base/detection_post_process_base_coder.cc #### cmsis int8 coder ${MICRO_DIR}/coder/opcoders/cmsis-nn/int8/add_int8_coder.cc ${MICRO_DIR}/coder/opcoders/cmsis-nn/int8/conv2d_base_coder.cc @@ -55,6 +67,7 @@ set(CODER_OPCODERS_SRC ${MICRO_DIR}/coder/opcoders/nnacl/fp32/arithmetic_self_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.cc + ${MICRO_DIR}/coder/opcoders/nnacl/fp32/biasadd_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/concat_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/convolution_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.cc @@ -64,21 +77,20 @@ set(CODER_OPCODERS_SRC ${MICRO_DIR}/coder/opcoders/nnacl/fp32/gather_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/matmul_fp32_base_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/matmul_fp32_coder.cc - ${MICRO_DIR}/coder/opcoders/nnacl/fp32/nchw2nhwc_fp32_coder.cc - ${MICRO_DIR}/coder/opcoders/nnacl/fp32/nhwc2nchw_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/pad_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/pooling_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/power_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/reduce_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/reshape_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/scale_fp32_coder.cc - ${MICRO_DIR}/coder/opcoders/nnacl/fp32/slice_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/softmax_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/squeeze_dims_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/tile_fp32_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/fp32/transpose_fp32_coder.cc #### nnacl int8 coder + ${MICRO_DIR}/coder/opcoders/nnacl/int8/activation_int8_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/int8/add_int8_coder.cc + ${MICRO_DIR}/coder/opcoders/nnacl/int8/batchnorm_int8_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/int8/concat_int8_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/int8/fullconnection_int8_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/int8/matmul_int8_coder.cc @@ -87,40 +99,69 @@ set(CODER_OPCODERS_SRC ${MICRO_DIR}/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/int8/deconvolution_int8_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/int8/pooling_int8_coder.cc + ${MICRO_DIR}/coder/opcoders/nnacl/int8/resize_int8_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/int8/reduce_int8_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/int8/reshape_int8_coder.cc ${MICRO_DIR}/coder/opcoders/nnacl/int8/softmax_int8_coder.cc - ) - -set(CODER_UTILS_SRC - ${MICRO_DIR}/coder/utils/coder_utils.cc - ${MICRO_DIR}/coder/utils/dir_utils.cc - ${MICRO_DIR}/coder/utils/type_cast.cc + ${MICRO_DIR}/coder/opcoders/nnacl/int8/sub_int8_coder.cc + ${MICRO_DIR}/coder/opcoders/nnacl/int8/detection_post_process_int8_coder.cc + ${MICRO_DIR}/coder/opcoders/nnacl/int8/sigmoid_int8_coder.cc + ${MICRO_DIR}/coder/opcoders/nnacl/int8/relux_int8_coder.cc + ${MICRO_DIR}/coder/opcoders/nnacl/int8/div_int8_coder.cc + #### nnacl dequant coder + ${MICRO_DIR}/coder/opcoders/nnacl/dequant/de_quant.cc ) set(LITE_SRC ${LITE_DIR}/src/common/file_utils.cc ${LITE_DIR}/src/common/graph_util.cc ${LITE_DIR}/src/common/string_util.cc + ${LITE_DIR}/src/common/prim_util.cc + ${LITE_DIR}/src/common/tensor_util.cc ${LITE_DIR}/src/runtime/allocator.cc + ${LITE_DIR}/src/runtime/infer_manager.cc + ${LITE_DIR}/src/runtime/runtime_api.cc ${LITE_DIR}/src/lite_model.cc ${LITE_DIR}/src/tensorlist.cc ${LITE_DIR}/src/tensor.cc + ${LITE_DIR}/src/scheduler.cc + ${LITE_DIR}/src/inner_context.cc + ${LITE_DIR}/src/dequant.cc + ${LITE_DIR}/src/kernel_registry.cc + ${LITE_DIR}/src/lite_kernel.cc + ${LITE_DIR}/src/sub_graph_kernel.cc + ${LITE_DIR}/src/huffman_decode.cc + ${LITE_DIR}/src/executor.cc ${LITE_DIR}/src/common/log_adapter.cc - ### src/ops for parameter and infer shape - ${LITE_DIR}/src/ops/batch_norm.cc - ${LITE_DIR}/src/ops/conv2d.cc - ${LITE_DIR}/src/ops/primitive_c.cc - ${LITE_DIR}/src/ops/slice.cc - ${LITE_DIR}/src/ops/while.cc + ${LITE_DIR}/src/common/utils.cc ### populate operator parameter ${LITE_DIR}/src/ops/populate/conv2d_populate.cc + ${LITE_DIR}/src/ops/populate/arithmetic_populate.cc + ${LITE_DIR}/src/ops/populate/add_populate.cc + ${LITE_DIR}/src/ops/populate/concat_populate.cc + ${LITE_DIR}/src/ops/populate/conv2d_populate.cc + ${LITE_DIR}/src/ops/populate/detection_post_process_populate.cc + ${LITE_DIR}/src/ops/populate/depthwise_conv2d_populate.cc + ${LITE_DIR}/src/ops/populate/full_connection_populate.cc + ${LITE_DIR}/src/ops/populate/pooling_populate.cc + ${LITE_DIR}/src/ops/populate/quant_dtype_cast_populate.cc + ${LITE_DIR}/src/ops/populate/resize_populate.cc + ${LITE_DIR}/src/ops/populate/reshape_populate.cc + ${LITE_DIR}/src/ops/populate/batch_norm_populate.cc + ${LITE_DIR}/src/ops/populate/slice_populate.cc + ${LITE_DIR}/src/ops/populate/while_populate.cc + ${LITE_DIR}/src/ops/populate/matmul_populate.cc + ${LITE_DIR}/src/ops/populate/bias_add_populate.cc + ${LITE_DIR}/src/ops/populate/activation_populate.cc ### tools ${LITE_DIR}/tools/common/flag_parser.cc ) set(LITE_KERNEL_SRC ### nnacl + ${LITE_DIR}/nnacl/common_func.c ${LITE_DIR}/nnacl/base/minimal_filtering_generator.c + ${LITE_DIR}/nnacl/base/arithmetic_base.c + ${LITE_DIR}/nnacl/base/slice_base.c ${LITE_DIR}/nnacl/fp32/winograd_utils.c ${LITE_DIR}/nnacl/fp32/pack_fp32.c ${LITE_DIR}/nnacl/int8/quantize.c @@ -128,13 +169,138 @@ set(LITE_KERNEL_SRC ${LITE_DIR}/nnacl/int8/matmul_int8.c ${LITE_DIR}/nnacl/int8/fixed_point.c ${LITE_DIR}/nnacl/fp32/matmul_fp32.c + ${LITE_DIR}/nnacl/int8/arithmetic_int8.c + ${LITE_DIR}/nnacl/int8/add_int8.c + ${LITE_DIR}/nnacl/int8/concat_int8.c + ${LITE_DIR}/nnacl/int8/conv_int8.c ${LITE_DIR}/nnacl/int8/conv3x3_int8.c ${LITE_DIR}/nnacl/int8/conv1x1_int8.c ${LITE_DIR}/nnacl/base/conv1x1_base.c + ${LITE_DIR}/nnacl/int8/conv_depthwise_int8.c ${LITE_DIR}/nnacl/int8/deconv_int8.c ${LITE_DIR}/nnacl/int8/common_func_int8.c + ${LITE_DIR}/nnacl/int8/slice_int8.c + ${LITE_DIR}/nnacl/int8/batchnorm_int8.c + ${LITE_DIR}/nnacl/int8/sub_int8.c + ${LITE_DIR}/nnacl/int8/quant_dtype_cast_int8.c + ${LITE_DIR}/nnacl/int8/sigmoid_int8.c + ${LITE_DIR}/nnacl/int8/resize_int8.c + ### infer + ${LITE_DIR}/nnacl/infer/adam_infer.c + ${LITE_DIR}/nnacl/infer/add_sub_grad_infer.c + ${LITE_DIR}/nnacl/infer/addn_infer.c + ${LITE_DIR}/nnacl/infer/apply_momentum_infer.c + ${LITE_DIR}/nnacl/infer/argmin_max_infer.c + ${LITE_DIR}/nnacl/infer/arithmetic_compare_infer.c + ${LITE_DIR}/nnacl/infer/arithmetic_grad_infer.c + ${LITE_DIR}/nnacl/infer/arithmetic_infer.c + ${LITE_DIR}/nnacl/infer/assert_op_infer.c + ${LITE_DIR}/nnacl/infer/assign_add_infer.c + ${LITE_DIR}/nnacl/infer/assign_infer.c + ${LITE_DIR}/nnacl/infer/audio_spectrogram_infer.c + ${LITE_DIR}/nnacl/infer/batch_to_space_infer.c + ${LITE_DIR}/nnacl/infer/bias_grad_infer.c + ${LITE_DIR}/nnacl/infer/binary_cross_entropy_infer.c + ${LITE_DIR}/nnacl/infer/bn_grad_infer.c + ${LITE_DIR}/nnacl/infer/broadcast_to_infer.c + ${LITE_DIR}/nnacl/infer/cast_infer.c + ${LITE_DIR}/nnacl/infer/common_infer.c + ${LITE_DIR}/nnacl/infer/concat_infer.c + ${LITE_DIR}/nnacl/infer/constant_of_shape_infer.c + ${LITE_DIR}/nnacl/infer/conv2d_grad_filter_infer.c + ${LITE_DIR}/nnacl/infer/conv2d_grad_input_infer.c + ${LITE_DIR}/nnacl/infer/conv2d_infer.c + ${LITE_DIR}/nnacl/infer/crop_and_resize_infer.c + ${LITE_DIR}/nnacl/infer/crop_infer.c + ${LITE_DIR}/nnacl/infer/custom_extract_features_infer.c + ${LITE_DIR}/nnacl/infer/custom_normalize_infer.c + ${LITE_DIR}/nnacl/infer/custom_predict_infer.c + ${LITE_DIR}/nnacl/infer/deconv2d_infer.c + ${LITE_DIR}/nnacl/infer/dedepthwise_conv2d_infer.c + ${LITE_DIR}/nnacl/infer/depth_to_space_infer.c + ${LITE_DIR}/nnacl/infer/depthwise_conv2d_infer.c + ${LITE_DIR}/nnacl/infer/detection_post_process_infer.c + ${LITE_DIR}/nnacl/infer/dropout_grad_infer.c + ${LITE_DIR}/nnacl/infer/dropout_infer.c + ${LITE_DIR}/nnacl/infer/embedding_lookup_infer.c + ${LITE_DIR}/nnacl/infer/expand_dims_infer.c + ${LITE_DIR}/nnacl/infer/fft_imag_infer.c + ${LITE_DIR}/nnacl/infer/fft_real_infer.c + ${LITE_DIR}/nnacl/infer/fill_infer.c + ${LITE_DIR}/nnacl/infer/flatten_grad_infer.c + ${LITE_DIR}/nnacl/infer/flatten_infer.c + ${LITE_DIR}/nnacl/infer/full_connection_infer.c + ${LITE_DIR}/nnacl/infer/fused_batchnorm_infer.c + ${LITE_DIR}/nnacl/infer/gather_infer.c + ${LITE_DIR}/nnacl/infer/gather_nd_infer.c + ${LITE_DIR}/nnacl/infer/group_conv2d_grad_input_infer.c + ${LITE_DIR}/nnacl/infer/gru_infer.c + ${LITE_DIR}/nnacl/infer/hashtable_lookup_infer.c + ${LITE_DIR}/nnacl/infer/invert_permutation_infer.c + ${LITE_DIR}/nnacl/infer/layer_norm_infer.c + ${LITE_DIR}/nnacl/infer/lin_space_infer.c + ${LITE_DIR}/nnacl/infer/lsh_projection_infer.c + ${LITE_DIR}/nnacl/infer/lstm_infer.c + ${LITE_DIR}/nnacl/infer/matmul_infer.c + ${LITE_DIR}/nnacl/infer/maximum_grad_infer.c + ${LITE_DIR}/nnacl/infer/mean_infer.c + ${LITE_DIR}/nnacl/infer/merge_infer.c + ${LITE_DIR}/nnacl/infer/mfcc_infer.c + ${LITE_DIR}/nnacl/infer/non_max_suppression_infer.c + ${LITE_DIR}/nnacl/infer/one_hot_infer.c + ${LITE_DIR}/nnacl/infer/pad_infer.c + ${LITE_DIR}/nnacl/infer/partial_infer.c + ${LITE_DIR}/nnacl/infer/pooling_grad_infer.c + ${LITE_DIR}/nnacl/infer/pooling_infer.c + ${LITE_DIR}/nnacl/infer/power_infer.c + ${LITE_DIR}/nnacl/infer/prior_box_infer.c + ${LITE_DIR}/nnacl/infer/quant_dtype_cast_infer.c + ${LITE_DIR}/nnacl/infer/random_standard_normal_infer.c + ${LITE_DIR}/nnacl/infer/range_infer.c + ${LITE_DIR}/nnacl/infer/rank_infer.c + ${LITE_DIR}/nnacl/infer/reduce_infer.c + ${LITE_DIR}/nnacl/infer/reshape_infer.c + ${LITE_DIR}/nnacl/infer/resize_infer.c + ${LITE_DIR}/nnacl/infer/rfft_infer.c + ${LITE_DIR}/nnacl/infer/roi_pooling_infer.c + ${LITE_DIR}/nnacl/infer/scatter_nd_infer.c + ${LITE_DIR}/nnacl/infer/select_infer.c + ${LITE_DIR}/nnacl/infer/sgd_infer.c + ${LITE_DIR}/nnacl/infer/shape_infer.c + ${LITE_DIR}/nnacl/infer/size_infer.c + ${LITE_DIR}/nnacl/infer/skip_gram_infer.c + ${LITE_DIR}/nnacl/infer/slice_infer.c + ${LITE_DIR}/nnacl/infer/softmax_cross_entropy_infer.c + ${LITE_DIR}/nnacl/infer/softmax_infer.c + ${LITE_DIR}/nnacl/infer/space_to_batch_infer.c + ${LITE_DIR}/nnacl/infer/space_to_batch_nd_infer.c + ${LITE_DIR}/nnacl/infer/space_to_depth_infer.c + ${LITE_DIR}/nnacl/infer/sparse_softmax_cross_entropy_infer.c + ${LITE_DIR}/nnacl/infer/sparse_to_dense_infer.c + ${LITE_DIR}/nnacl/infer/split_infer.c + ${LITE_DIR}/nnacl/infer/squeeze_infer.c + ${LITE_DIR}/nnacl/infer/stack_infer.c + ${LITE_DIR}/nnacl/infer/strided_slice_grad_infer.c + ${LITE_DIR}/nnacl/infer/strided_slice_infer.c + ${LITE_DIR}/nnacl/infer/switch_infer.c + ${LITE_DIR}/nnacl/infer/tensorlist_fromtensor_infer.c + ${LITE_DIR}/nnacl/infer/tensorlist_getitem_infer.c + ${LITE_DIR}/nnacl/infer/tensorlist_reserve_infer.c + ${LITE_DIR}/nnacl/infer/tensorlist_setitem_infer.c + ${LITE_DIR}/nnacl/infer/tensorlist_stack_infer.c + ${LITE_DIR}/nnacl/infer/tile_infer.c + ${LITE_DIR}/nnacl/infer/topk_infer.c + ${LITE_DIR}/nnacl/infer/transpose_infer.c + ${LITE_DIR}/nnacl/infer/uniform_real_infer.c + ${LITE_DIR}/nnacl/infer/unique_infer.c + ${LITE_DIR}/nnacl/infer/unsorted_segment_sum_infer.c + ${LITE_DIR}/nnacl/infer/unsqueeze_infer.c + ${LITE_DIR}/nnacl/infer/unstack_infer.c + ${LITE_DIR}/nnacl/infer/where_infer.c + ${LITE_DIR}/nnacl/infer/while_infer.c + ${LITE_DIR}/nnacl/infer/splice_infer.c ) -list(APPEND FILE_SET ${CODER_SRC} ${CODER_UTILS_SRC} ${CODER_OPCODERS_SRC} ${CODER_GENERATOR_SRC} - ${CODER_ALLOCATOR_SRC} ${LITE_SRC} ${LITE_KERNEL_SRC}) +list(APPEND FILE_SET ${CODER_SRC} ${CODER_OPCODERS_SRC} ${CODER_GENERATOR_SRC} + ${CODER_ALLOCATOR_SRC} ${LITE_SRC} ${LITE_KERNEL_SRC} ${MINDSPORE_CORE}) diff --git a/mindspore/lite/micro/cmake/package_cmsis.cmake b/mindspore/lite/micro/cmake/package_cmsis.cmake new file mode 100644 index 0000000000..eaed65def9 --- /dev/null +++ b/mindspore/lite/micro/cmake/package_cmsis.cmake @@ -0,0 +1,21 @@ +set(CMSIS_DIR ${LITE_DIR}/micro/build/cmsis) +if(MICRO_CMSIS_X86) + message("build cmsis kernels") + include_directories(${CMSIS_DIR}/CMSIS/Core/Include) + include_directories(${CMSIS_DIR}/CMSIS/DSP/Include) + include_directories(${CMSIS_DIR}/CMSIS/NN/Include) + + file(REMOVE ${CMSIS_DIR}/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_no_shift.c) + + file(GLOB CMSIS_OPS + ${CMSIS_DIR}/CMSIS/NN/Source/BasicMathFunctions/*.c + ${CMSIS_DIR}/CMSIS/NN/Source/ActivationFunctions/*.c + ${CMSIS_DIR}/CMSIS/NN/Source/ConcatenationFunctions/*.c + ${CMSIS_DIR}/CMSIS/NN/Source/ConvolutionFunctions/*.c + ${CMSIS_DIR}/CMSIS/NN/Source/FullyConnectedFunctions/*.c + ${CMSIS_DIR}/CMSIS/NN/Source/NNSupportFunctions/*.c + ${CMSIS_DIR}/CMSIS/NN/Source/PoolingFunctions/*.c + ${CMSIS_DIR}/CMSIS/NN/Source/ReshapeFunctions/*.c + ${CMSIS_DIR}/CMSIS/NN/Source/SoftmaxFunctions/*.c + ) +endif() diff --git a/mindspore/lite/micro/cmake/package_micro_ops.cmake b/mindspore/lite/micro/cmake/package_micro_ops.cmake deleted file mode 100644 index 1a43baed2d..0000000000 --- a/mindspore/lite/micro/cmake/package_micro_ops.cmake +++ /dev/null @@ -1,32 +0,0 @@ -include_directories(${NNACL_DIR}/..) - -set(CMSIS_SRC ${NNACL_DIR}/../micro/build/cmsis) -if(MICRO_CMSIS_X86) - message("*****build cmsis x86 codes****") - include_directories(${CMSIS_SRC}/CMSIS/Core/Include) - include_directories(${CMSIS_SRC}/CMSIS/DSP/Include) - include_directories(${CMSIS_SRC}/CMSIS/NN/Include) - file(GLOB RUNTIME_KERNEL_CMSIS_SRC - ${CMSIS_SRC}/CMSIS/NN/Source/BasicMathFunctions/*.c - ${CMSIS_SRC}/CMSIS/NN/Source/ActivationFunctions/*.c - ${CMSIS_SRC}/CMSIS/NN/Source/ConcatenationFunctions/*.c - ${CMSIS_SRC}/CMSIS/NN/Source/ConvolutionFunctions/*.c - ${CMSIS_SRC}/CMSIS/NN/Source/FullyConnectedFunctions/*.c - ${CMSIS_SRC}/CMSIS/NN/Source/NNSupportFunctions/*.c - ${CMSIS_SRC}/CMSIS/NN/Source/PoolingFunctions/*.c - ${CMSIS_SRC}/CMSIS/NN/Source/ReshapeFunctions/*.c - ${CMSIS_SRC}/CMSIS/NN/Source/SoftmaxFunctions/*.c - ) -endif() - -########################### files ########################### -file(GLOB RUNTIME_KERNEL_SRC - ${NNACL_DIR}/kernel/fp32/*.c - ${NNACL_DIR}/kernel/int8/*.c - ) -if(MICRO_CMSIS_X86) - set(RUNTIME_OPS ${RUNTIME_KERNEL_SRC} ${RUNTIME_TRAIN_SRC} ${RUNTIME_KERNEL_CMSIS_SRC}) -else() - set(RUNTIME_OPS ${RUNTIME_KERNEL_SRC} ${RUNTIME_TRAIN_SRC}) -endif() - diff --git a/mindspore/lite/micro/cmake/package_nnacl.cmake b/mindspore/lite/micro/cmake/package_nnacl.cmake new file mode 100644 index 0000000000..f3f26fc46e --- /dev/null +++ b/mindspore/lite/micro/cmake/package_nnacl.cmake @@ -0,0 +1,20 @@ +include_directories(${LITE_DIR}) +set(NNACL_DIR ${LITE_DIR}/nnacl) +file(GLOB KERNEL_SRC + ${NNACL_DIR}/*.c + ${NNACL_DIR}/base/*.c + ${NNACL_DIR}/fp32/*.c + ${NNACL_DIR}/int8/*.c +) + +if(MICRO_BUILD_ARM64) + file(GLOB ASSEMBLY_SRC ${NNACL_DIR}/assembly/arm64/*.S) + set_property(SOURCE ${ASSEMBLY_SRC} PROPERTY LANGUAGE C) +endif() + +if(MICRO_BUILD_ARM32A) + file(GLOB ASSEMBLY_SRC ${NNACL_DIR}/assembly/arm32/*.S) + set_property(SOURCE ${ASSEMBLY_SRC} PROPERTY LANGUAGE C) +endif() + +set(NNACL_OPS ${KERNEL_SRC} ${ASSEMBLY_SRC}) diff --git a/mindspore/lite/micro/cmake/package_wrapper.cmake b/mindspore/lite/micro/cmake/package_wrapper.cmake new file mode 100644 index 0000000000..fbaa20b567 --- /dev/null +++ b/mindspore/lite/micro/cmake/package_wrapper.cmake @@ -0,0 +1,25 @@ +include_directories(${LITE_DIR}/micro/coder/operator_library) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread") +set(WRAPPER_DIR ${LITE_DIR}/micro/coder/operator_library/wrapper/) + +set(RUNTIME_SRC + ${LITE_DIR}/src/runtime/thread_pool.c + ) + +set(WRAPPER_SRC + ${WRAPPER_DIR}/base/detection_post_process_base_wrapper.c + ${WRAPPER_DIR}/fp32/matmul_fp32_wrapper.c + ${WRAPPER_DIR}/int8/matmul_int8_wrapper.c + ${WRAPPER_DIR}/int8/add_int8_wrapper.c + ${WRAPPER_DIR}/int8/concat_int8_wrapper.c + ${WRAPPER_DIR}/int8/convolution_int8_wrapper.c + ${WRAPPER_DIR}/int8/conv_init_int8_wrapper.c + ${WRAPPER_DIR}/int8/conv1x1_init_int8_wrapper.c + ${WRAPPER_DIR}/int8/conv1x1_run_int8_wrapper.c + ${WRAPPER_DIR}/int8/convolution_depthwise_int8_wrapper.c + ${WRAPPER_DIR}/int8/resize_int8_wrapper.c + ${WRAPPER_DIR}/int8/slice_int8_wrapper.c + ${WRAPPER_DIR}/int8/batchnorm_int8_wrapper.c + ) + +list(APPEND FILE_SET ${WRAPPER_SRC} ${RUNTIME_SRC}) diff --git a/mindspore/lite/micro/cmake/wrapper.cmake b/mindspore/lite/micro/cmake/wrapper.cmake deleted file mode 100644 index 5c2fd0c21f..0000000000 --- a/mindspore/lite/micro/cmake/wrapper.cmake +++ /dev/null @@ -1,12 +0,0 @@ -SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread") - -set(MICRO_WRAPPER_SRC - ${LITE_DIR}/src/runtime/thread_pool.c - ${MICRO_DIR}/wrapper/fp32/matmul_fp32_wrapper.c - ${MICRO_DIR}/wrapper/int8/matmul_int8_wrapper.c - ${MICRO_DIR}/wrapper/int8/conv_init_int8_wrapper.c - ${MICRO_DIR}/wrapper/int8/conv1x1_init_int8_wrapper.c - ${MICRO_DIR}/wrapper/int8/conv1x1_run_int8_wrapper.c - ) - -list(APPEND FILE_SET ${MICRO_WRAPPER_SRC}) \ No newline at end of file diff --git a/mindspore/lite/micro/coder/CMakeLists.txt b/mindspore/lite/micro/coder/CMakeLists.txt index 0cd7aefebf..97a05fe2e1 100644 --- a/mindspore/lite/micro/coder/CMakeLists.txt +++ b/mindspore/lite/micro/coder/CMakeLists.txt @@ -1,7 +1,10 @@ +add_definitions(-DUSE_GLOG) + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gc-sections ") -set(MICRO_DIR ${CMAKE_CURRENT_SOURCE_DIR}/..) -set(LITE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../..) set(3RD_DIR ${TOP_DIR}/third_party) +set(LITE_DIR ${TOP_DIR}/mindspore/lite) +set(MICRO_DIR ${LITE_DIR}/micro) + if(ENABLE_CONVERTER) set(CODEGEN_PATH ${CMAKE_BINARY_DIR}/micro/coder/codegen) else() @@ -13,17 +16,19 @@ include_directories(${3RD_DIR}) include_directories(${3RD_DIR}/flatbuffers/include) #include ms include_directories(${TOP_DIR}/) -include_directories(${LITE_DIR}) include_directories(${TOP_DIR}/mindspore/core/) - +include_directories(${LITE_DIR}) +include_directories(${MICRO_DIR}) #include coder -include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../) +include(${TOP_DIR}/cmake/external_libs/cmsis.cmake) include(${MICRO_DIR}/cmake/file_list.cmake) -include(${MICRO_DIR}/cmake/wrapper.cmake) +include(${MICRO_DIR}/cmake/package_wrapper.cmake) +add_subdirectory(operator_library) + add_executable(codegen main.cc ${FILE_SET}) add_dependencies(codegen fbs_src) add_dependencies(codegen fbs_inner_src) -target_link_libraries(codegen PRIVATE ${SECUREC_LIBRARY}) -if(NOT WIN32) - add_custom_command(TARGET codegen POST_BUILD COMMAND strip ${CODEGEN_PATH}) +target_link_libraries(codegen PRIVATE ${SECUREC_LIBRARY} mindspore::glog) +if(NOT WIN32 AND "${CMAKE_BUILD_TYPE}" STREQUAL "Release") + add_custom_command(TARGET codegen POST_BUILD COMMAND strip ${CODEGEN_PATH}) endif() diff --git a/mindspore/lite/micro/coder/allocator/allocator.cc b/mindspore/lite/micro/coder/allocator/allocator.cc index 71c4f5d72f..61674a3f77 100644 --- a/mindspore/lite/micro/coder/allocator/allocator.cc +++ b/mindspore/lite/micro/coder/allocator/allocator.cc @@ -22,11 +22,9 @@ namespace mindspore::lite::micro { void *MemoryAllocator::MallocWeightTensor(TypeId type_id, size_t size, MallocType type) { - static const std::map size_map = {{kNumberTypeFloat32, sizeof(float)}, - {kNumberTypeInt32, sizeof(int)}, - {kNumberTypeInt32, sizeof(int32_t)}, - {kNumberTypeInt16, sizeof(int16_t)}, - {kNumberTypeInt8, sizeof(int8_t)}}; + static const std::map size_map = { + {kNumberTypeFloat, sizeof(float)}, {kNumberTypeFloat32, sizeof(float)}, {kNumberTypeInt32, sizeof(int32_t)}, + {kNumberTypeInt16, sizeof(int16_t)}, {kNumberTypeInt8, sizeof(int8_t)}, {kNumberTypeUInt8, sizeof(uint8_t)}}; auto item = size_map.find(type_id); MS_CHECK_TRUE_RET_NULL(item != size_map.end(), "unsupported type idnex"); size_t type_size = item->second; diff --git a/mindspore/lite/micro/coder/allocator/allocator.h b/mindspore/lite/micro/coder/allocator/allocator.h index f4f6614ca0..660d48e121 100644 --- a/mindspore/lite/micro/coder/allocator/allocator.h +++ b/mindspore/lite/micro/coder/allocator/allocator.h @@ -73,7 +73,7 @@ class MemoryAllocator { if (type != kWorkspace) { return MallocWeightTensor(type_id, size, type); } - if (size == 0 && size >= UINT_MAX) { + if (size == 0 || size >= UINT_MAX) { return nullptr; } @@ -94,12 +94,12 @@ class MemoryAllocator { template std::string GetRuntimeAddr(T t, bool is_const = false) { if (!t) { - return "NULL"; + return ""; } std::string type_info = is_const ? "const " : ""; std::string type_name; if (std::type_index(typeid(T)) == std::type_index(typeid(Tensor *))) { - type_name = GetTensorDataType(reinterpret_cast(t)->data_type()) + " *"; + type_name = GetTensorDataType(reinterpret_cast(t)->data_type()) + "*"; } else { type_name = GetVariableTypeName(); } diff --git a/mindspore/lite/micro/coder/coder.cc b/mindspore/lite/micro/coder/coder.cc index 74d99b26f2..44315114cf 100644 --- a/mindspore/lite/micro/coder/coder.cc +++ b/mindspore/lite/micro/coder/coder.cc @@ -34,19 +34,20 @@ namespace mindspore::lite::micro { class CoderFlags : public virtual FlagParser { public: CoderFlags() { - AddFlag(&CoderFlags::is_weight_file_, "isWeightFile", "whether generating weight .net file, true| false", false); + AddFlag(&CoderFlags::is_weight_file_, "isWeightFile", "whether generating weight binary file, true| false", false); AddFlag(&CoderFlags::model_path_, "modelPath", "Input model path", ""); AddFlag(&CoderFlags::code_path_, "codePath", "Input code path", "."); AddFlag(&CoderFlags::code_module_name_, "moduleName", "Input code module name", ""); - AddFlag(&CoderFlags::target_, "target", "generateed code target, x86| ARM32M| ARM32A| ARM64", "x86"); - AddFlag(&CoderFlags::code_mode_, "codeMode", "generated code mode, Normal | Inference | Train", "Normal"); - AddFlag(&CoderFlags::debug_mode_, "debugMode", "dump perlayer's time cost and tensor, true | false", false); + AddFlag(&CoderFlags::target_, "target", "generated code target, x86| ARM32M| ARM32A| ARM64", "x86"); + AddFlag(&CoderFlags::code_mode_, "codeMode", "generated code mode, Inference | Train", "Inference"); + AddFlag(&CoderFlags::support_parallel_, "supportParallel", "whether support parallel launch, true | false", false); + AddFlag(&CoderFlags::debug_mode_, "debugMode", "dump the tensors data for debugging, true | false", false); } ~CoderFlags() override = default; - public: std::string model_path_; + bool support_parallel_{false}; bool is_weight_file_{false}; std::string code_module_name_; std::string code_path_; @@ -87,8 +88,7 @@ int Coder::Run(const std::string &model_path) { int Coder::Init(const CoderFlags &flags) const { static const std::map kTargetMap = { {"x86", kX86}, {"ARM32M", kARM32M}, {"ARM32A", kARM32A}, {"ARM64", kARM64}, {"All", kAllTargets}}; - static const std::map kCodeModeMap = { - {"Normal", Code_Normal}, {"Inference", Code_Inference}, {"Train", Code_Train}}; + static const std::map kCodeModeMap = {{"Inference", Inference}, {"Train", Train}}; Configurator *config = Configurator::GetInstance(); @@ -112,6 +112,11 @@ int Coder::Init(const CoderFlags &flags) const { return true; }); + parsers.emplace_back([&flags, config]() -> bool { + config->set_support_parallel(flags.support_parallel_); + return true; + }); + parsers.emplace_back([&flags, config]() -> bool { config->set_debug_mode(flags.debug_mode_); return true; diff --git a/mindspore/lite/micro/coder/coder_config.h b/mindspore/lite/micro/coder/coder_config.h index d1b89b6b36..949b420578 100644 --- a/mindspore/lite/micro/coder/coder_config.h +++ b/mindspore/lite/micro/coder/coder_config.h @@ -21,7 +21,7 @@ namespace mindspore::lite::micro { enum Target { kX86 = 0, kARM32M = 1, kARM32A = 2, kARM64 = 3, kAllTargets = 4, kTargetUnknown = 99 }; -enum CodeMode { Code_Normal = 0, Code_Inference = 1, Code_Train = 2, Code_Unknown = 99 }; +enum CodeMode { Inference = 0, Train = 1, Code_Unknown = 99 }; class Configurator { public: @@ -36,9 +36,6 @@ class Configurator { void set_code_path(const std::string &code_path) { code_path_ = code_path; } std::string code_path() const { return code_path_; } - void set_subgraph_(const std::string &subgraph) { sub_graph_ = subgraph; } - std::string sub_graph() { return sub_graph_; } - void set_target(Target target) { target_ = target; } Target target() const { return target_; } @@ -51,16 +48,19 @@ class Configurator { void set_is_weight_file(bool flag) { is_weight_file_ = flag; } bool is_weight_file() const { return is_weight_file_; } + void set_support_parallel(bool parallel) { support_parallel_ = parallel; } + bool support_parallel() const { return support_parallel_; } + private: Configurator() = default; ~Configurator() = default; - bool is_weight_file_{false}; std::string module_name_; std::string code_path_; - std::string sub_graph_; Target target_{kTargetUnknown}; CodeMode code_mode_{Code_Unknown}; + bool is_weight_file_{false}; + bool support_parallel_{false}; bool debug_mode_{false}; }; } // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/context.cc b/mindspore/lite/micro/coder/context.cc index 065879d1cb..781312b2cc 100644 --- a/mindspore/lite/micro/coder/context.cc +++ b/mindspore/lite/micro/coder/context.cc @@ -14,9 +14,9 @@ * limitations under the License. */ -#include "micro/coder/context.h" -#include "micro/coder/coder_config.h" -#include "micro/coder/allocator/allocator.h" +#include "coder/context.h" +#include "coder/coder_config.h" +#include "coder/allocator/allocator.h" namespace mindspore::lite::micro { CoderContext::CoderContext() { diff --git a/mindspore/lite/micro/coder/generator/component/benchmark_component.cc b/mindspore/lite/micro/coder/generator/component/benchmark_component.cc index 71ab14bfdd..1b24efc4ed 100644 --- a/mindspore/lite/micro/coder/generator/component/benchmark_component.cc +++ b/mindspore/lite/micro/coder/generator/component/benchmark_component.cc @@ -108,7 +108,7 @@ void CodeBenchmarkSetBuffer(std::ofstream &ofs, const std::string &module_name) << "_SetBuffer(buffer);\n" " if (ret != RET_OK) {\n" " MICRO_ERROR(\"set inputs failed\");\n" - " return RET_ERROR;" + " return RET_ERROR;\n" " }\n"; } @@ -128,19 +128,6 @@ void CodeBenchmarkInitWeight(std::ofstream &ofs, const std::string &module_name) " weight_buffer = NULL;\n"; } -void CodeBenchmarkConfigThread(std::ofstream &ofs) { - ofs << " int thread_num = 4;\n" - " BindMode bind_mode = NO_BIND_MODE;\n" - " if (argc >= 6) {\n" - " thread_num = atoi(argv[4]);\n" - " bind_mode = atoi(argv[5]);\n" - " }\n" - " ret = ConfigThreadPool(THREAD_POOL_DEFAULT, thread_num, bind_mode);\n" - " if (ret != 0) {\n" - " MICRO_ERROR(\"create thread pool failed\");\n" - " }\n"; -} - void CodeBenchmarkInference(std::ofstream &ofs, const std::string &module_name) { ofs << " if (argc >= 4) {\n" << " " << module_name << "_WarmUp();\n" @@ -170,7 +157,6 @@ void CodeBenchmarkPrintOutputs(std::ofstream &ofs, const std::string &module_nam " PrintTensorData(tensor);\n" " }\n"; ofs << " printf(\"" << module_name << " inference success.\\n\");\n"; - ofs << " free(buffer);\n"; } /** diff --git a/mindspore/lite/micro/coder/generator/component/benchmark_component.h b/mindspore/lite/micro/coder/generator/component/benchmark_component.h index e73e916c2d..a19a5c9ef9 100644 --- a/mindspore/lite/micro/coder/generator/component/benchmark_component.h +++ b/mindspore/lite/micro/coder/generator/component/benchmark_component.h @@ -39,8 +39,6 @@ void CodeBenchmarkSetBuffer(std::ofstream &ofs, const std::string &module_name); void CodeBenchmarkInitWeight(std::ofstream &ofs, const std::string &module_name); -void CodeBenchmarkConfigThread(std::ofstream &ofs); - void CodeBenchmarkInference(std::ofstream &ofs, const std::string &module_name); void CodeBenchmarkPrintOutputs(std::ofstream &ofs, const std::string &module_name); diff --git a/mindspore/lite/micro/coder/generator/component/cmake_component.cc b/mindspore/lite/micro/coder/generator/component/cmake_component.cc index 58cd691d8c..9096f3872e 100644 --- a/mindspore/lite/micro/coder/generator/component/cmake_component.cc +++ b/mindspore/lite/micro/coder/generator/component/cmake_component.cc @@ -24,10 +24,9 @@ void CodeCMakeNetLibrary(std::ofstream &ofs, const std::string &module_name, con Target target) { ofs << "include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../include/)\n"; if (target == kARM32M) { - ofs << "include_directories(${OP_HEADER_PATH}/cmsis)\n" - << "include_directories(${OP_HEADER_PATH}/cmsis/CMSIS/NN/Include)\n" - << "include_directories(${OP_HEADER_PATH}/cmsis/CMSIS/DSP/Include)\n" - << "include_directories(${OP_HEADER_PATH}/cmsis/CMSIS/Core/Include)\n"; + ofs << "include_directories(${OP_HEADER_PATH}/CMSIS/NN/Include)\n" + << "include_directories(${OP_HEADER_PATH}/CMSIS/DSP/Include)\n" + << "include_directories(${OP_HEADER_PATH}/CMSIS/Core/Include)\n"; } ofs << "set(OP_SRC\n"; for (const std::string &c_file : ctx->c_files()) { @@ -38,7 +37,7 @@ void CodeCMakeNetLibrary(std::ofstream &ofs, const std::string &module_name, con << ")\n"; std::set kernel_cmake_asm_set_files = ctx->asm_files(); - if (!kernel_cmake_asm_set_files.empty()) { + if (!kernel_cmake_asm_set_files.empty() && (target == kARM32A || target == kARM64)) { ofs << "set(ASSEMBLY_SRC\n"; for (const std::string &asm_file : kernel_cmake_asm_set_files) { ofs << " " << asm_file << ".o\n"; diff --git a/mindspore/lite/micro/coder/generator/component/common_component.cc b/mindspore/lite/micro/coder/generator/component/common_component.cc index fabf32e71f..feb585f21b 100644 --- a/mindspore/lite/micro/coder/generator/component/common_component.cc +++ b/mindspore/lite/micro/coder/generator/component/common_component.cc @@ -26,7 +26,7 @@ namespace mindspore::lite::micro { void CodeSourceFileInclude(std::ofstream &ofs, const std::string &weight_file, const std::string &header) { ofs << g_hwLicense << "#include \"microtensor.h\"\n" << "#include \"" << weight_file << "\"\n" - << "#include \"" << header << "\"\n"; + << "#include \"" << header << "\"\n\n"; } void CodeInputAndOutputState(std::ofstream &ofs, const std::string &module_name) { @@ -53,13 +53,13 @@ void PrintMicroTensors(std::ofstream &ofs, std::vector tensors, const MS_LOG(ERROR) << "nonexistent tensor"; break; } - ofs << " static int dim[] = {"; + ofs << " static int dim" << i << "[] = {"; for (size_t j = 0; j < tensor->shape().size(); ++j) { ofs << tensor->shape()[j] << ", "; } ofs << "};\n" << " " << name << "[" << i << "].ndim = " << tensor->shape().size() << ";\n" - << " " << name << "[" << i << "].dim = dim;\n" + << " " << name << "[" << i << "].dim = dim" << i << ";\n" << " " << name << "[" << i << "].type = " << EnumMicroTensorDataType(tensor->data_type()) << ";\n" << " " << name << "[" << i << "].format = " << std::to_string(tensor->format()) << ";\n" << " " << name << "[" << i << "].data =" << item->second << ";\n"; @@ -69,7 +69,6 @@ void PrintMicroTensors(std::ofstream &ofs, std::vector tensors, const void CodeInputAndOutputImplement(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr &ctx) { // input tensors - ofs << "\n// input tensors\n"; std::vector inputs = ctx->graph_inputs(); for (size_t i = 0; i < inputs.size(); ++i) { ofs << "static const unsigned char *" << ctx->input_name() + std::to_string(i) << " = 0;\n"; @@ -88,7 +87,6 @@ void CodeInputAndOutputImplement(std::ofstream &ofs, const std::string &module_n ofs << " return RET_OK;\n}\n"; // output tensors - ofs << "\n// output tensors\n"; std::vector outputs = ctx->graph_outputs(); size_t output_num = outputs.size(); std::string output_name = ctx->output_name(); @@ -158,7 +156,7 @@ void CodeManageResourceState(std::ofstream &ofs, const std::string &module_name) void CodeInitResourceImplement(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr &ctx) { - ofs << "int " << module_name << "deconv_GetBufferSize() {\n" + ofs << "int " << module_name << "_GetBufferSize() {\n" << " return " << ctx->total_buffer_size() << ";\n" << "}\n"; ofs << "int " << module_name << "_SetBuffer( void *buffer) {\n"; diff --git a/mindspore/lite/micro/coder/generator/component/const_blocks/cmake_lists.h b/mindspore/lite/micro/coder/generator/component/const_blocks/cmake_lists.h index 4facccfec8..3e0166c6f1 100644 --- a/mindspore/lite/micro/coder/generator/component/const_blocks/cmake_lists.h +++ b/mindspore/lite/micro/coder/generator/component/const_blocks/cmake_lists.h @@ -14,10 +14,10 @@ * limitations under the License. */ -#ifndef MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_CMAKE_LISTS_CODE_H_ +#ifndef MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_CMAKE_LISTS_CODE_H_ #define MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_CMAKE_LISTS_CODE_H_ -static const char bench_cmake_lists_txt[] = +const char *bench_cmake_lists_txt = "cmake_minimum_required(VERSION 3.14)\n" "project(${PROJ_NAME})\n" "\n" @@ -55,9 +55,9 @@ static const char bench_cmake_lists_txt[] = "link_directories(${MODEL_LIB_PATH})\n" "include(benchmark.cmake)\n" "add_executable(${PROJ_NAME}_bench ${SRC_FILES})\n" - "target_link_libraries(${PROJ_NAME}_bench ${MODEL_LIB_NAME} -lm)\n"; + "target_link_libraries(${PROJ_NAME}_bench ${MODEL_LIB_NAME} -lm -pthread)\n"; -static const char src_cmake_lists_txt[] = +const char *src_cmake_lists_txt = "cmake_minimum_required(VERSION 3.14)\n" "project(${PROJ_NAME})\n" "\n" @@ -112,4 +112,4 @@ static const char src_cmake_lists_txt[] = "string(CONCAT library_name \"lib\" ${PROJ_NAME} \".a\")\n" "create_library()\n"; -#endif // MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_CMAKE_LISTS_CODE_H_ +#endif // MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_CMAKE_LISTS_CODE_H_ diff --git a/mindspore/lite/micro/coder/generator/component/const_blocks/debug_utils.h b/mindspore/lite/micro/coder/generator/component/const_blocks/debug_utils.h index 995b80b972..9af8eb6043 100644 --- a/mindspore/lite/micro/coder/generator/component/const_blocks/debug_utils.h +++ b/mindspore/lite/micro/coder/generator/component/const_blocks/debug_utils.h @@ -13,10 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BEN_DEBUG_UTILS_H_ -#define MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BEN_DEBUG_UTILS_H_ +#ifndef MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_DEBUG_UTILS_H_ +#define MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_DEBUG_UTILS_H_ -static const char debug_utils_h[] = +const char *debug_utils_h = "/**\n" " * Copyright 2021 Huawei Technologies Co., Ltd\n" " *\n" @@ -50,7 +50,7 @@ static const char debug_utils_h[] = "\n" "#endif // MINDSPORE_LITE_MICRO_MICRODEBUGUTIL_H_\n"; -static const char debug_utils_c[] = +const char *debug_utils_c = "/**\n" " * Copyright 2021 Huawei Technologies Co., Ltd\n" " *\n" @@ -239,7 +239,7 @@ static const char debug_utils_c[] = "}\n" "\n" "void PrintTensor(MicroTensor *tensor, FILE *output_file, const char *is_input) {\n" - " if (output_file != NULL) {\n" + " if (output_file == NULL) {\n" " MICRO_ERROR(\"output file is NULL\");\n" " return;\n" " }\n" @@ -269,4 +269,4 @@ static const char debug_utils_c[] = " return retval;\n" "}\n"; -#endif // MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BEN_DEBUG_UTILS_H_ +#endif // MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_DEBUG_UTILS_H_ diff --git a/mindspore/lite/micro/coder/generator/component/const_blocks/license.h b/mindspore/lite/micro/coder/generator/component/const_blocks/license.h index 6dcd967ca4..825f3b1721 100644 --- a/mindspore/lite/micro/coder/generator/component/const_blocks/license.h +++ b/mindspore/lite/micro/coder/generator/component/const_blocks/license.h @@ -14,12 +14,12 @@ * limitations under the License. */ -#ifndef MICRO_GENERATOR_CONST_BLOCK_LICENSE_INFOS_H -#define MICRO_GENERATOR_CONST_BLOCK_LICENSE_INFOS_H +#ifndef MINDSPORE_LITE_MICRO_GENERATOR_CONST_BLOCK_LICENSE_INFOS_H_ +#define MINDSPORE_LITE_MICRO_GENERATOR_CONST_BLOCK_LICENSE_INFOS_H_ namespace mindspore::lite::micro { -const char g_hwLicense[] = +static const char *g_hwLicense = "/**\n" " * Copyright 2021 Huawei Technologies Co., Ltd\n" " *\n" @@ -37,4 +37,4 @@ const char g_hwLicense[] = " */\n\n"; } // namespace mindspore::lite::micro -#endif // MICRO_GENERATOR_CONST_BLOCK_LICENSE_INFOS_H +#endif // MINDSPORE_LITE_MICRO_GENERATOR_CONST_BLOCK_LICENSE_INFOS_H_ diff --git a/mindspore/lite/micro/coder/generator/component/const_blocks/load_input.h b/mindspore/lite/micro/coder/generator/component/const_blocks/load_input.h index 9870e0b229..119b5da6ac 100644 --- a/mindspore/lite/micro/coder/generator/component/const_blocks/load_input.h +++ b/mindspore/lite/micro/coder/generator/component/const_blocks/load_input.h @@ -14,9 +14,9 @@ * limitations under the License. */ -#ifndef MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BENCH_LOAD_INPUT_H_ -#define MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BENCH_LOAD_INPUT_H_ -static const char load_input_h[] = +#ifndef MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BENCH_LOAD_INPUT_H_ +#define MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BENCH_LOAD_INPUT_H_ +const char *load_input_h = "/**\n" " * Copyright 2021 Huawei Technologies Co., Ltd\n" " *\n" @@ -43,7 +43,7 @@ static const char load_input_h[] = "\n" "#endif // MICRO_EXAMPLE_LOAD_INPUT_LOAD_INPUT_H_\n"; -static const char load_input_c[] = +const char *load_input_c = "/**\n" " * Copyright 2021 Huawei Technologies Co., Ltd\n" " *\n" @@ -131,11 +131,11 @@ static const char load_input_c[] = " int size = 0;\n" " buffers[i] = ReadInputData(inputs_path[i], &size);\n" " if (size != inputs_size[i] || buffers[i] == NULL) {\n" - " printf(\"size mismatch, %s, %d, %d\\n\", inputs_path[i], size, inputs_size[i]);\n" + " printf(\"size mismatch, %s, input: %d, needed: %d\\n\", inputs_path[i], size, inputs_size[i]);\n" " return -1;\n" " }\n" " }\n" " return 0;\n" "}\n"; -#endif // MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BENCH_LOAD_INPUT_H_ +#endif // MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_BENCH_LOAD_INPUT_H_ diff --git a/mindspore/lite/micro/coder/generator/component/const_blocks/micro_tensor.h b/mindspore/lite/micro/coder/generator/component/const_blocks/micro_tensor.h index eacb7416ed..79eb4484dc 100644 --- a/mindspore/lite/micro/coder/generator/component/const_blocks/micro_tensor.h +++ b/mindspore/lite/micro/coder/generator/component/const_blocks/micro_tensor.h @@ -13,10 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_MICRO_TENSOR_H_ -#define MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_MICRO_TENSOR_H_ +#ifndef MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_MICRO_TENSOR_H_ +#define MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_MICRO_TENSOR_H_ -static const char micro_tensor_h[] = +const char *micro_tensor_h = "/**\n" " * Copyright 2021 Huawei Technologies Co., Ltd\n" " *\n" @@ -42,20 +42,8 @@ static const char micro_tensor_h[] = "#include \n" "#include \n" "\n" - "inline bool IsPrint() {\n" - " char *env = getenv(\"GLOG_v\");\n" - " if (env == NULL) {\n" - " return false;\n" - " }\n" - " return strcmp(env, \"1\") == 0;\n" - "}\n" - "\n" - "#define MICRO_INFO(content, args...) \\\n" - " { \\\n" - " if (IsPrint()) { \\\n" - " printf(\"[INFO] %s|%d: \" #content \"\\r\\n\", __func__, __LINE__, ##args); \\\n" - " } \\\n" - " }\n" + "#define MICRO_INFO(content, args...) \\\n" + " { printf(\"[INFO] %s|%d: \" #content \"\\r\\n\", __func__, __LINE__, ##args); }\n" "#define MICRO_ERROR(content, args...) \\\n" " { printf(\"[ERROR] %s|%d: \" #content \"\\r\\n\", __func__, __LINE__, ##args); }\n" "\n" @@ -115,4 +103,4 @@ static const char micro_tensor_h[] = "} GraphQuantArgs;\n" "\n" "#endif // MSMICRO_TENSOR_H\n"; -#endif // MICRO_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_MICRO_TENSOR_H_ +#endif // MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_MICRO_TENSOR_H_ diff --git a/mindspore/lite/micro/coder/generator/component/const_blocks/thread_pool.h b/mindspore/lite/micro/coder/generator/component/const_blocks/thread_pool.h new file mode 100644 index 0000000000..959eacdca4 --- /dev/null +++ b/mindspore/lite/micro/coder/generator/component/const_blocks/thread_pool.h @@ -0,0 +1,99 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_THREAD_POOL_H_ +#define MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_THREAD_POOL_H_ + +namespace mindspore::lite::micro { + +const char *thread_pool_h = + "/**\n" + " * Copyright 2021 Huawei Technologies Co., Ltd\n" + " *\n" + " * Licensed under the Apache License, Version 2.0 (the \"License\");\n" + " * you may not use this file except in compliance with the License.\n" + " * You may obtain a copy of the License at\n" + " *\n" + " * http://www.apache.org/licenses/LICENSE-2.0\n" + " *\n" + " * Unless required by applicable law or agreed to in writing, software\n" + " * distributed under the License is distributed on an \"AS IS\" BASIS,\n" + " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" + " * See the License for the specific language governing permissions and\n" + " * limitations under the License.\n" + " */\n" + "\n" + "#ifndef MINDSPORE_LITE_SRC_RUNTIME_THREAD_POOL_H_\n" + "#define MINDSPORE_LITE_SRC_RUNTIME_THREAD_POOL_H_\n" + "\n" + "#include \n" + "\n" + "#define MAX_TASK_NUM (2)\n" + "\n" + "/// \\brief BindMode defined for holding bind cpu strategy argument.\n" + "typedef enum {\n" + " NO_BIND_MODE = 0, /**< no bind */\n" + " HIGHER_MODE = 1, /**< bind higher cpu first */\n" + " MID_MODE = 2 /**< bind middle cpu first */\n" + "} BindMode;\n" + "\n" + "struct ThreadPool;\n" + "\n" + "struct ThreadPool *CreateThreadPool(int thread_num, int mode);\n" + "\n" + "/**\n" + " *\n" + " * @param session_index, support multi session\n" + " * @param job\n" + " * @param content\n" + " * @param task_num\n" + " */\n" + "int ParallelLaunch(struct ThreadPool *thread_pool, int (*job)(void *, int), void *content, int task_num);\n" + "\n" + "/**\n" + " * bind each thread to specified cpu core\n" + " * @param is_bind\n" + " * @param mode\n" + " */\n" + "int BindThreads(struct ThreadPool *thread_pool, bool is_bind, int mode);\n" + "\n" + "/**\n" + " * activate the thread pool\n" + " * @param thread_pool_id\n" + " */\n" + "void ActivateThreadPool(struct ThreadPool *thread_pool);\n" + "\n" + "/**\n" + " * deactivate the thread pool\n" + " * @param thread_pool_id\n" + " */\n" + "void DeactivateThreadPool(struct ThreadPool *thread_pool);\n" + "\n" + "/**\n" + " *\n" + " * @return current thread num\n" + " */\n" + "int GetCurrentThreadNum(struct ThreadPool *thread_pool);\n" + "\n" + "/**\n" + " * destroy thread pool, and release resource\n" + " */\n" + "void DestroyThreadPool(struct ThreadPool *thread_pool);\n" + "\n" + "#endif // MINDSPORE_LITE_SRC_RUNTIME_THREAD_POOL_H_\n"; +} // namespace mindspore::lite::micro + +#endif // MINDSPORE_LITE_MICRO_CODER_GENERATOR_CONST_BLOCKS_THREAD_POOL_H_ diff --git a/mindspore/lite/micro/coder/generator/component/parallel_component.cc b/mindspore/lite/micro/coder/generator/component/parallel_component.cc new file mode 100644 index 0000000000..a4083498ab --- /dev/null +++ b/mindspore/lite/micro/coder/generator/component/parallel_component.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/generator/component/parallel_component.h" +#include + +namespace mindspore::lite::micro { + +void CodeCreateThreadPool(std::ofstream &ofs, const std::string &module_name) { + ofs << " int thread_num = 4;\n" + " BindMode bind_mode = NO_BIND_MODE;\n" + " if (argc >= 6) {\n" + " thread_num = atoi(argv[4]);\n" + " bind_mode = atoi(argv[5]);\n" + " }\n" + " struct ThreadPool *thread_pool = CreateThreadPool(thread_num, bind_mode);\n" + " if (thread_pool == NULL) {\n" + " MICRO_ERROR(\"create thread pool failed\");\n" + " return RET_ERROR;\n" + " }\n" + << " ret = " << module_name << "_SetThreadPool(thread_pool);\n" + << " if (ret != RET_OK) {\n" + " MICRO_ERROR(\"set global thread pool failed\");\n" + " return RET_ERROR;\n" + " }\n" + " MICRO_INFO(\"config: ThreadNum: %d, BindMode: %d\", thread_num, bind_mode);\n"; +} + +void CodeDestroyThreadPool(std::ofstream &ofs) { ofs << " DestroyThreadPool(thread_pool);\n"; } + +void CodeSetGlobalThreadPoolState(std::ofstream &ofs, const std::string &module_name) { + ofs << "/*\n" + " * set global thread pool, which is created by user\n" + " */\n" + << "int " << module_name << "_SetThreadPool(struct ThreadPool *thread_pool);\n\n"; +} + +void CodeSetGlobalThreadPoolImplement(std::ofstream &ofs, const std::string &module_name) { + ofs << "struct ThreadPool *g_thread_pool = NULL;\n" + << "int " << module_name << "_SetThreadPool(struct ThreadPool *thread_pool) {\n" + << " if (thread_pool == NULL) {\n" + " return RET_ERROR;\n" + " }\n" + " g_thread_pool = thread_pool;\n" + " return RET_OK;\n" + "}\n"; +} +} // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/generator/component/parallel_component.h b/mindspore/lite/micro/coder/generator/component/parallel_component.h new file mode 100644 index 0000000000..f92cad26ec --- /dev/null +++ b/mindspore/lite/micro/coder/generator/component/parallel_component.h @@ -0,0 +1,35 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_GENERATOR_PARALLEL_COMPONENT_H_ +#define MINDSPORE_LITE_MICRO_CODER_GENERATOR_PARALLEL_COMPONENT_H_ + +#include +#include + +namespace mindspore::lite::micro { + +void CodeCreateThreadPool(std::ofstream &ofs, const std::string &module_name); + +void CodeDestroyThreadPool(std::ofstream &ofs); + +void CodeSetGlobalThreadPoolState(std::ofstream &ofs, const std::string &module_name); + +void CodeSetGlobalThreadPoolImplement(std::ofstream &ofs, const std::string &module_name); + +} // namespace mindspore::lite::micro + +#endif // MINDSPORE_LITE_MICRO_CODER_GENERATOR_PARALLEL_COMPONENT_H_ diff --git a/mindspore/lite/micro/coder/generator/component/weight_component.cc b/mindspore/lite/micro/coder/generator/component/weight_component.cc index 6475e5f13c..28d2929c5b 100644 --- a/mindspore/lite/micro/coder/generator/component/weight_component.cc +++ b/mindspore/lite/micro/coder/generator/component/weight_component.cc @@ -74,10 +74,10 @@ void CodeModelParamsForNet(std::ofstream &hofs, std::ofstream &cofs, const std:: continue; } if (tensor->category() == Tensor::Category::CONST_TENSOR) { - hofs << "extern " << GetTensorDataType(tensor->data_type()) << name << " = [];\n"; - cofs << GetTensorDataType(tensor->data_type()) << name << " = [" << tensor->ElementsNum() << "];\n"; + hofs << "extern " << GetTensorDataType(tensor->data_type()) << name << "[];\n"; + cofs << GetTensorDataType(tensor->data_type()) << name << "[" << tensor->ElementsNum() << "];\n"; } else if (tensor->category() == Tensor::Category::VAR) { - hofs << "extern " << GetTensorDataType(tensor->data_type()) << " *" << name << ";\n"; + hofs << "extern " << GetTensorDataType(tensor->data_type()) << "*" << name << ";\n"; cofs << GetTensorDataType(tensor->data_type()) << "*" << name << " = NULL;\n"; } } @@ -87,7 +87,6 @@ void CodeModelParamsForNet(std::ofstream &hofs, std::ofstream &cofs, const std:: void CodeWeightInitFunc(std::ofstream &ofs, const std::string &module_name, const std::unique_ptr &ctx) { ofs << "int " << module_name << "_Init(void *weight_buffer, int weight_size) {\n" << " if (weight_buffer == NULL) {\n" - " MICRO_ERROR(\"weight buffer is NULL\");\n" << " return RET_ERROR;\n" << " }\n"; @@ -106,8 +105,9 @@ void CodeWeightInitFunc(std::ofstream &ofs, const std::string &module_name, cons if (tensor->category() != Tensor::Category::CONST_TENSOR) { continue; } - auto iter = ctx->tensors_map().find(tensor); - if (iter != ctx->tensors_map().end()) { + std::map ctx_tensor_map = ctx->tensors_map(); + auto iter = ctx_tensor_map.find(tensor); + if (iter != ctx_tensor_map.end()) { origins += " {" + name + ", " + std::to_string(tensor->Size()) + ", " + std::to_string(offset) + "},\n"; params_num++; } else { @@ -115,14 +115,14 @@ void CodeWeightInitFunc(std::ofstream &ofs, const std::string &module_name, cons params += " " + GetTensorDataType(data_type) + "*" + name + " = (weight_buffer + " + std::to_string(offset) + ");\n"; } + offset += tensor->Size(); } - ofs << " struct ModelParameter model_params[] = {\n" << origins << " };\n"; ofs << params << "\n"; + ofs << " struct ModelParameter model_params[] = {\n" << origins << " };\n"; ofs << "\n"; ofs << " for(int i = 0; i < " << params_num << "; ++i) {\n" << " if (model_params[i].offset + model_params[i].size > weight_size) {\n" - " MICRO_ERROR(\"buffer is invalid, size: %d, offset: %lu\", weight_size, model_params[i].offset);\n" " return RET_ERROR;\n" " }\n" << " memcpy(model_params[i].addr, (weight_buffer + model_params[i].offset), model_params[i].size);\n" diff --git a/mindspore/lite/micro/coder/generator/generator.cc b/mindspore/lite/micro/coder/generator/generator.cc index 7a8cf54d33..dce78c20db 100644 --- a/mindspore/lite/micro/coder/generator/generator.cc +++ b/mindspore/lite/micro/coder/generator/generator.cc @@ -24,8 +24,9 @@ #include "coder/generator/component/const_blocks/cmake_lists.h" #include "coder/generator/component/const_blocks/debug_utils.h" #include "coder/generator/component/const_blocks/load_input.h" +#include "coder/generator/component/const_blocks/thread_pool.h" #include "coder/generator/component/const_blocks/license.h" -#include "micro/coder/log.h" +#include "coder/log.h" namespace mindspore::lite::micro { int WriteContentToFile(const std::string &file, const std::string &content) { @@ -61,11 +62,13 @@ Generator::~Generator() { (void)umask(origin_umask_); } void Generator::CodeNetRunFunc(std::ofstream &ofs) { // generate net inference code ofs << "void " << config_->module_name() << "_Inference() {\n"; - if (config_->code_mode() == CodeMode::Code_Inference) { - ofs << "int thread_num = GetCurrentThreadNum(THREAD_POOL_DEFAULT);\n"; + if (config_->support_parallel()) { + ofs << " const int g_thread_num = GetCurrentThreadNum(g_thread_pool);\n"; + } else { + ofs << " const int g_thread_num = 1;\n"; } for (const auto &block : ctx_->code_blocks()) { - ofs << "\t{\n" << block << "\t}\n"; + ofs << " {\n" << block << " }\n"; } ofs << "}\n"; } @@ -98,7 +101,7 @@ int Generator::CodeSourceCMakeFile() { } int Generator::CodeStaticContent() { - const std::vector> static_blocks = { + std::vector> static_blocks = { {net_inc_file_path_ + "microtensor.h", micro_tensor_h}, {net_src_file_path_ + "CMakeLists.txt", src_cmake_lists_txt}, {net_main_file_path_ + "debug_utils.h", debug_utils_h}, @@ -106,12 +109,13 @@ int Generator::CodeStaticContent() { {net_main_file_path_ + "load_input.h", load_input_h}, {net_main_file_path_ + "load_input.c", load_input_c}, {net_main_file_path_ + "CMakeLists.txt", bench_cmake_lists_txt}}; + if (config_->support_parallel()) { + static_blocks.emplace_back(net_inc_file_path_ + "thread_pool.h", thread_pool_h); + } for (const auto &static_block : static_blocks) { std::string file_name = static_block.first; std::string content = static_block.second; - if (WriteContentToFile(file_name, content) != RET_OK) { - return RET_ERROR; - } + MS_CHECK_RET_CODE(WriteContentToFile(file_name, content), "write file failed"); } return RET_OK; } diff --git a/mindspore/lite/micro/coder/generator/inference/inference_generator.cc b/mindspore/lite/micro/coder/generator/inference/inference_generator.cc index cfa4c57830..5a29bd7582 100644 --- a/mindspore/lite/micro/coder/generator/inference/inference_generator.cc +++ b/mindspore/lite/micro/coder/generator/inference/inference_generator.cc @@ -18,6 +18,7 @@ #include #include #include "coder/generator/component/common_component.h" +#include "coder/generator/component/parallel_component.h" #include "coder/generator/component/benchmark_component.h" #include "coder/generator/component/const_blocks/license.h" @@ -28,14 +29,17 @@ int InferenceGenerator::CodeNetHFile() { MS_CHECK_TRUE(!ofs.bad(), "filed to open file"); MS_LOG(INFO) << "write " << net_include_file; ofs << g_hwLicense; - if (config_->code_mode() == CodeMode::Code_Inference) { - ofs << "#include \"src/runtime/thread_pool.h\"\n"; + if (config_->support_parallel()) { + ofs << "#include \"thread_pool.h\"\n"; } ofs << "#include \"microtensor.h\"\n\n"; CodeInputAndOutputState(ofs, config_->module_name()); if (is_get_quant_args_) { CodeGraphQuantArgsState(ofs, config_->module_name()); } + if (config_->support_parallel()) { + CodeSetGlobalThreadPoolState(ofs, config_->module_name()); + } if (config_->is_weight_file()) { CodeInitWeightState(ofs, config_->module_name()); } @@ -50,6 +54,9 @@ int InferenceGenerator::CodeNetCFile() { MS_CHECK_TRUE(!ofs.bad(), "filed to open file"); MS_LOG(INFO) << "write " << net_impl_file; CodeSourceFileInclude(ofs, net_weight_hfile_, net_inc_hfile_); + if (config_->support_parallel()) { + CodeSetGlobalThreadPoolImplement(ofs, config_->module_name()); + } CodeInputAndOutputImplement(ofs, config_->module_name(), ctx_); CodeInitResourceImplement(ofs, config_->module_name(), ctx_); CodeFreeResourceImplement(ofs, config_->module_name(), ctx_); @@ -78,12 +85,14 @@ int InferenceGenerator::CodeBenchmarkFile() { if (config_->is_weight_file()) { CodeBenchmarkInitWeight(ofs, config_->module_name()); } - if (config_->code_mode() == CodeMode::Code_Inference) { - CodeBenchmarkConfigThread(ofs); + if (config_->support_parallel()) { + CodeCreateThreadPool(ofs, config_->module_name()); } CodeBenchmarkInference(ofs, config_->module_name()); CodeBenchmarkPrintOutputs(ofs, config_->module_name()); - + if (config_->support_parallel()) { + CodeDestroyThreadPool(ofs); + } CodeBenchmarkFreeResourse(ofs, config_->module_name(), inputs_num); ofs.close(); return RET_OK; diff --git a/mindspore/lite/micro/coder/generator/inference/inference_generator.h b/mindspore/lite/micro/coder/generator/inference/inference_generator.h index 60f8092a93..78c8dca218 100644 --- a/mindspore/lite/micro/coder/generator/inference/inference_generator.h +++ b/mindspore/lite/micro/coder/generator/inference/inference_generator.h @@ -19,7 +19,7 @@ #include #include -#include "micro/coder/generator/generator.h" +#include "coder/generator/generator.h" namespace mindspore::lite::micro { class InferenceGenerator : public Generator { diff --git a/mindspore/lite/micro/coder/generator/train/train_generator.cc b/mindspore/lite/micro/coder/generator/train/train_generator.cc index bfb7d87d0b..fb908c4d9d 100644 --- a/mindspore/lite/micro/coder/generator/train/train_generator.cc +++ b/mindspore/lite/micro/coder/generator/train/train_generator.cc @@ -39,7 +39,7 @@ int TrainGenerator::CodeNetHFile() { MS_CHECK_TRUE(!ofs.bad(), "filed to open file"); MS_LOG(INFO) << "write " << net_include_file; ofs << g_hwLicense; - if (config_->code_mode() == CodeMode::Code_Inference) { + if (config_->code_mode() == CodeMode::Inference) { ofs << "#include \"src/runtime/thread_pool.h\"\n"; } ofs << "#include \"microtensor.h\"\n\n"; diff --git a/mindspore/lite/micro/coder/generator/train/train_generator.h b/mindspore/lite/micro/coder/generator/train/train_generator.h index c18fd2b5b1..e22d4f7f91 100644 --- a/mindspore/lite/micro/coder/generator/train/train_generator.h +++ b/mindspore/lite/micro/coder/generator/train/train_generator.h @@ -19,7 +19,7 @@ #include #include -#include "micro/coder/generator/generator.h" +#include "coder/generator/generator.h" namespace mindspore::lite::micro { class TrainGenerator : public Generator { diff --git a/mindspore/lite/micro/coder/graph.cc b/mindspore/lite/micro/coder/graph.cc index 27980c9e2b..bec25bc076 100644 --- a/mindspore/lite/micro/coder/graph.cc +++ b/mindspore/lite/micro/coder/graph.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "micro/coder/graph.h" +#include "coder/graph.h" #include #include #include @@ -23,7 +23,6 @@ #include #include "coder/log.h" #include "schema/inner/model_generated.h" -#include "src/ops/primitive_c.h" #include "securec/include/securec.h" namespace mindspore::lite::micro { @@ -92,8 +91,15 @@ int CoderGraph::ConvertTensors() { if (quant_params != nullptr) { for (int j = 0; j < static_cast(quant_params->size()); j++) { QuantArg quant_arg{}; + quant_arg.bitNum = quant_params->Get(j)->numBits(); quant_arg.scale = quant_params->Get(j)->scale(); quant_arg.zeroPoint = quant_params->Get(j)->zeroPoint(); + quant_arg.var_corr = quant_params->Get(j)->varCorr(); + quant_arg.mean_corr = quant_params->Get(j)->meanCorr(); + quant_arg.inited = quant_params->Get(j)->inited(); + quant_arg.roundType = quant_params->Get(j)->roundType(); + quant_arg.multiplier = quant_params->Get(j)->multiplier(); + quant_arg.dstDtype = quant_params->Get(j)->dstDtype(); dstTensor->AddQuantParam(quant_arg); } } diff --git a/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.cc b/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.cc index a1aa621a85..4b898da8ee 100644 --- a/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.cc @@ -14,12 +14,12 @@ * limitations under the License. */ -#include "micro/coder/opcoders/base/conv2d_base_coder.h" +#include "coder/opcoders/base/conv2d_base_coder.h" #include #include #include "nnacl/fp32/winograd_utils.h" #include "nnacl/int8/quantize.h" -#include "micro/coder/log.h" +#include "coder/log.h" namespace { int MallocConvQuantParams(ConvQuantArg *quant_arg, size_t input_arg_num, size_t filter_arg_num, size_t output_arg_num) { @@ -37,8 +37,8 @@ int MallocConvQuantParams(ConvQuantArg *quant_arg, size_t input_arg_num, size_t } // namespace namespace mindspore::lite::micro { -string Conv2DBaseCoder::LayoutTransformFp32(schema::Format src_format, schema::Format dst_format) { - string ret; +std::string Conv2DBaseCoder::LayoutTransformFp32(schema::Format src_format, schema::Format dst_format) { + std::string ret; if (src_format == schema::Format_NHWC && dst_format == schema::Format_NC4HW4) { ret = "PackNHWCToNC4HW4Fp32"; } else if (src_format == schema::Format_NHWC && dst_format == schema::Format_NHWC4) { @@ -56,8 +56,8 @@ string Conv2DBaseCoder::LayoutTransformFp32(schema::Format src_format, schema::F return ret; } -string Conv2DBaseCoder::LayoutTransformInt8(schema::Format src_format, schema::Format dst_format) { - string ret; +std::string Conv2DBaseCoder::LayoutTransformInt8(schema::Format src_format, schema::Format dst_format) { + std::string ret; if (src_format == schema::Format_NHWC && dst_format == schema::Format_NHWC4) { ret = "PackNHWCToNHWC4Int8"; } else { @@ -67,8 +67,8 @@ string Conv2DBaseCoder::LayoutTransformInt8(schema::Format src_format, schema::F return ret; } -string Conv2DBaseCoder::LayoutTransform(TypeId data_type, schema::Format src_format, schema::Format dst_format) { - string ret; +std::string Conv2DBaseCoder::LayoutTransform(TypeId data_type, schema::Format src_format, schema::Format dst_format) { + std::string ret; switch (data_type) { case kNumberTypeInt8: ret = LayoutTransformInt8(src_format, dst_format); @@ -197,7 +197,7 @@ int Conv2DBaseCoder::SetQuantMultiplier() { return RET_OK; } -int Conv2DBaseCoder::CheckResizeValid() { +int Conv2DBaseCoder::CheckResizeValid() const { // ===============check in channel================= // int32_t filter_in_channel = filter_tensor_->Channel(); int32_t resize_in_channel = input_tensor_->Channel(); @@ -206,12 +206,39 @@ int Conv2DBaseCoder::CheckResizeValid() { return RET_OK; } +void Conv2DBaseCoder::SetRoundingAndMultipilerMode() { + auto input_quant_arg = input_tensor_->quant_params().front(); + int round_type = input_quant_arg.roundType; + switch (round_type) { + case 1: + conv_quant_arg_->round_mode_ = Rounding_Away_from_zero; + break; + case 2: + conv_quant_arg_->round_mode_ = Rounding_Up; + break; + default: + conv_quant_arg_->round_mode_ = Rounding_No; + } + int cal_multiplier_type = input_quant_arg.multiplier; + switch (cal_multiplier_type) { + case 0: + conv_quant_arg_->quant_multiplier_mode_ = Method_SinglePrecision; + break; + case 1: + conv_quant_arg_->quant_multiplier_mode_ = Method_DoublePrecision; + break; + default: + conv_quant_arg_->quant_multiplier_mode_ = Method_No; + } +} + int Conv2DBaseCoder::SetQuantParam() { MS_CHECK_RET_CODE(MallocQuantParam(), "Malloc quant param failed."); MS_CHECK_RET_CODE(SetInputTensorQuantParam(), "Set Input Tensor Quant Param Failed."); MS_CHECK_RET_CODE(SetFilterTensorQuantParam(), "Set Filter Tensor Quant Param Failed."); MS_CHECK_RET_CODE(SetOutputTensorQuantParam(), "Set Output Tensor Quant Param Failed."); MS_CHECK_RET_CODE(SetIfPerChannel(), "Set if per tensor channel failed."); + SetRoundingAndMultipilerMode(); MS_CHECK_RET_CODE(SetQuantMultiplier(), "Set Quant Multiplier Failed."); // now only consider per tensor for output MS_CHECK_PTR(conv_param_->conv_quant_arg_.out_act_min_); diff --git a/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.h index 4ae00fc6e5..982b28f7a7 100644 --- a/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/conv2d_base_coder.h @@ -21,13 +21,11 @@ #include #include #include -#include "micro/coder/opcoders/op_coder.h" +#include "coder/opcoders/op_coder.h" #include "src/runtime/kernel/arm/base/layout_transform.h" #include "nnacl/conv_parameter.h" namespace mindspore::lite::micro { -using std::string; - class Conv2DBaseCoder : public OperatorCoder { public: Conv2DBaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, @@ -47,10 +45,14 @@ class Conv2DBaseCoder : public OperatorCoder { free(conv_quant_arg_->input_quant_args_); free(conv_quant_arg_->filter_quant_args_); free(conv_quant_arg_->output_quant_args_); + conv_param_ = nullptr; + conv_quant_arg_ = nullptr; + filter_tensor_ = nullptr; + bias_tensor_ = nullptr; } protected: - int Init(); + virtual int Init(); int SetQuantParam(); @@ -62,19 +64,21 @@ class Conv2DBaseCoder : public OperatorCoder { int SetOutputTensorQuantParam(); + void SetRoundingAndMultipilerMode(); + int SetQuantMultiplier(); - int CheckResizeValid(); + int CheckResizeValid() const; int SetIfPerChannel(); int CheckLayout(lite::Tensor *input_tensor); - string LayoutTransformFp32(schema::Format src_format, schema::Format dst_format); + std::string LayoutTransformFp32(schema::Format src_format, schema::Format dst_format); - string LayoutTransformInt8(schema::Format src_format, schema::Format dst_format); + std::string LayoutTransformInt8(schema::Format src_format, schema::Format dst_format); - string LayoutTransform(TypeId data_type, schema::Format src_format, schema::Format dst_format); + std::string LayoutTransform(TypeId data_type, schema::Format src_format, schema::Format dst_format); ConvParameter *conv_param_{nullptr}; @@ -84,7 +88,7 @@ class Conv2DBaseCoder : public OperatorCoder { Tensor *bias_tensor_{nullptr}; - string convert_func_; + std::string convert_func_; }; } // namespace mindspore::lite::micro #endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_BASE_CONV2D_BASE_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/base/detection_post_process_base_coder.cc b/mindspore/lite/micro/coder/opcoders/base/detection_post_process_base_coder.cc new file mode 100644 index 0000000000..cd0684d2d9 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/base/detection_post_process_base_coder.cc @@ -0,0 +1,153 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/opcoders/base/detection_post_process_base_coder.h" + +#include "nnacl/int8/quant_dtype_cast_int8.h" + +#include "coder/opcoders/file_collector.h" +#include "coder/log.h" +#include "include/errorcode.h" + +namespace mindspore::lite::micro { + +int DetectionPostProcessBaseCoder::Prepare(CoderContext *const context) { + MS_CHECK_PTR(parameter_); + params_ = reinterpret_cast(parameter_); + params_->anchors_ = nullptr; + params_->decoded_boxes_ = nullptr; + params_->nms_candidate_ = nullptr; + params_->indexes_ = nullptr; + params_->scores_ = nullptr; + params_->all_class_indexes_ = nullptr; + params_->all_class_scores_ = nullptr; + params_->single_class_indexes_ = nullptr; + params_->selected_ = nullptr; + + Tensor *anchor_tensor = input_tensors_.at(2); + MS_CHECK_PTR(anchor_tensor); + if (anchor_tensor->data_type() == kNumberTypeInt8) { + QuantArg quant_param = anchor_tensor->quant_params().at(0); + auto anchor_int8 = reinterpret_cast(anchor_tensor->data_c()); + MS_CHECK_PTR(anchor_int8); + auto anchor_fp32 = static_cast( + allocator_->Malloc(kNumberTypeFloat, anchor_tensor->ElementsNum() * sizeof(float), kOfflinePackWeight)); + MS_CHECK_PTR(anchor_fp32); + DoDequantizeInt8ToFp32(anchor_int8, anchor_fp32, quant_param.scale, quant_param.zeroPoint, + anchor_tensor->ElementsNum()); + params_->anchors_ = anchor_fp32; + } else if (anchor_tensor->data_type() == kNumberTypeUInt8) { + QuantArg quant_param = anchor_tensor->quant_params().front(); + auto anchor_uint8 = reinterpret_cast(anchor_tensor->data_c()); + MS_CHECK_PTR(anchor_uint8); + auto anchor_fp32 = static_cast( + allocator_->Malloc(kNumberTypeFloat, anchor_tensor->ElementsNum() * sizeof(float), kOfflinePackWeight)); + MS_CHECK_PTR(anchor_fp32); + DoDequantizeUInt8ToFp32(anchor_uint8, anchor_fp32, quant_param.scale, quant_param.zeroPoint, + anchor_tensor->ElementsNum()); + params_->anchors_ = anchor_fp32; + } else if (anchor_tensor->data_type() == kNumberTypeFloat32 || anchor_tensor->data_type() == kNumberTypeFloat) { + params_->anchors_ = static_cast( + allocator_->Malloc(kNumberTypeFloat, anchor_tensor->ElementsNum() * sizeof(float), kOfflinePackWeight)); + MS_CHECK_PTR(params_->anchors_); + memcpy(params_->anchors_, anchor_tensor->data_c(), anchor_tensor->Size()); + } else { + MS_LOG(ERROR) << "unsupported anchor data type " << anchor_tensor->data_type(); + return RET_ERROR; + } + MS_CHECK_RET_CODE(AllocateBuffer(), "AllocateBuffer failed"); + MS_CHECK_RET_CODE(MallocInputsBuffer(), "malloc inputs buffer failed"); + return RET_OK; +} + +int DetectionPostProcessBaseCoder::AllocateBuffer() { + MS_CHECK_PTR(input_tensors_.at(0)); + MS_CHECK_PTR(input_tensors_.at(1)); + num_boxes_ = input_tensors_.at(0)->shape().at(1); + num_classes_with_bg_ = input_tensors_.at(1)->shape().at(2); + params_->decoded_boxes_ = allocator_->Malloc(kNumberTypeFloat, num_boxes_ * 4 * sizeof(float), kWorkspace); + MS_CHECK_PTR(params_->decoded_boxes_); + params_->nms_candidate_ = allocator_->Malloc(kNumberTypeUInt8, num_boxes_ * sizeof(uint8_t), kWorkspace); + MS_CHECK_PTR(params_->nms_candidate_); + params_->selected_ = allocator_->Malloc(kNumberTypeInt, num_boxes_ * sizeof(int), kWorkspace); + MS_CHECK_PTR(params_->selected_); + params_->single_class_indexes_ = allocator_->Malloc(kNumberTypeInt, num_boxes_ * sizeof(int), kWorkspace); + MS_CHECK_PTR(params_->single_class_indexes_); + + if (params_->use_regular_nms_) { + params_->scores_ = + allocator_->Malloc(kNumberTypeFloat, (num_boxes_ + params_->max_detections_) * sizeof(float), kWorkspace); + MS_CHECK_PTR(params_->scores_); + params_->indexes_ = + allocator_->Malloc(kNumberTypeInt, (num_boxes_ + params_->max_detections_) * sizeof(int), kWorkspace); + MS_CHECK_PTR(params_->indexes_); + params_->all_class_scores_ = + allocator_->Malloc(kNumberTypeFloat, (num_boxes_ + params_->max_detections_) * sizeof(float), kWorkspace); + MS_CHECK_PTR(params_->all_class_scores_); + params_->all_class_indexes_ = + allocator_->Malloc(kNumberTypeInt, (num_boxes_ + params_->max_detections_) * sizeof(int), kWorkspace); + MS_CHECK_PTR(params_->all_class_indexes_); + } else { + params_->scores_ = allocator_->Malloc(kNumberTypeFloat, num_boxes_ * sizeof(float), kWorkspace); + MS_CHECK_PTR(params_->scores_); + params_->indexes_ = + allocator_->Malloc(kNumberTypeFloat, num_boxes_ * params_->num_classes_ * sizeof(int), kWorkspace); + MS_CHECK_PTR(params_->indexes_); + } + return RET_OK; +} + +int DetectionPostProcessBaseCoder::DoCode(CoderContext *const context) { + Collect(context, {"nnacl/detection_post_process_parameter.h", "wrapper/base/detection_post_process_base_wrapper.h"}, + {"detection_post_process_fp32.c", "detection_post_process_base_wrapper.c"}); + + Serializer code; + MS_CHECK_RET_CODE(GetInputData(context, &code), "GetInputData failed"); + Tensor *output_boxes = output_tensors_.at(0); + Tensor *output_classes = output_tensors_.at(1); + Tensor *output_scores = output_tensors_.at(2); + Tensor *output_num = output_tensors_.at(3); + + code.CodeBaseStruct("DetectionPostProcessParameter", "params", params_->op_parameter_, params_->h_scale_, + params_->w_scale_, params_->x_scale_, params_->y_scale_, params_->nms_iou_threshold_, + params_->nms_score_threshold_, params_->max_detections_, params_->detections_per_class_, + params_->max_classes_per_detection_, params_->num_classes_, params_->use_regular_nms_, + params_->out_quantized_, params_->anchors_, params_->decoded_boxes_, params_->nms_candidate_, + params_->indexes_, params_->scores_, params_->all_class_indexes_, params_->all_class_scores_, + params_->single_class_indexes_, params_->selected_); + + code.CodeFunction("DecodeBoxes", num_boxes_, input_boxes_, params_->anchors_, "¶ms"); + + if (params_->use_regular_nms_) { + code.CodeFunction("DetectionPostProcessRegular", num_boxes_, num_classes_with_bg_, input_scores_, output_boxes, + output_classes, output_scores, output_num, "PartialArgSort", "¶ms"); + } else { + int task_id = 0; + int thread_num = 1; + code.CodeFunction("NmsMultiClassesFastCore", num_boxes_, num_classes_with_bg_, input_scores_, "PartialArgSort", + "¶ms", task_id, thread_num); + + code.CodeFunction("DetectionPostProcessFast", num_boxes_, num_classes_with_bg_, input_scores_, + "(float *)(params.decoded_boxes_)", output_boxes, output_classes, output_scores, output_num, + "PartialArgSort", "¶ms"); + } + + context->AppendCode(code.str()); + + return RET_OK; +} + +} // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/opcoders/base/detection_post_process_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/detection_post_process_base_coder.h new file mode 100644 index 0000000000..dceaaf562e --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/base/detection_post_process_base_coder.h @@ -0,0 +1,54 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_BASE_DETECTION_POST_PROCESS_BASE_CODER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_BASE_DETECTION_POST_PROCESS_BASE_CODER_H_ + +#include +#include +#include +#include +#include "coder/opcoders/op_coder.h" +#include "nnacl/detection_post_process_parameter.h" +#include "coder/opcoders/serializers/serializer.h" + +namespace mindspore::lite::micro { + +class DetectionPostProcessBaseCoder : public OperatorCoder { + public: + DetectionPostProcessBaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} + + ~DetectionPostProcessBaseCoder() override = default; + + int Prepare(CoderContext *const context) override; + + int DoCode(CoderContext *const context) override; + + protected: + int AllocateBuffer(); + virtual int GetInputData(CoderContext *const context, Serializer *const coder) = 0; + virtual int MallocInputsBuffer() = 0; + + int num_boxes_{0}; + int num_classes_with_bg_{0}; + float *input_boxes_{nullptr}; + float *input_scores_{nullptr}; + DetectionPostProcessParameter *params_{nullptr}; +}; +} // namespace mindspore::lite::micro +#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_BASE_DETECTION_POST_PROCESS_BASE_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/base/dtype_cast_coder.cc b/mindspore/lite/micro/coder/opcoders/base/dtype_cast_coder.cc index 409cbb4bbb..f962d75311 100644 --- a/mindspore/lite/micro/coder/opcoders/base/dtype_cast_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/base/dtype_cast_coder.cc @@ -15,7 +15,7 @@ */ #include -#include "micro/coder/opcoders/op_coder.h" +#include "coder/opcoders/op_coder.h" #include "micro/coder/opcoders/file_collector.h" #include "micro/coder/opcoders/base/dtype_cast_coder.h" #include "micro/coder/opcoders/serializers/serializer.h" diff --git a/mindspore/lite/micro/coder/opcoders/base/dtype_cast_coder.h b/mindspore/lite/micro/coder/opcoders/base/dtype_cast_coder.h index 7e14d21bd0..86087e61dc 100644 --- a/mindspore/lite/micro/coder/opcoders/base/dtype_cast_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/dtype_cast_coder.h @@ -19,7 +19,7 @@ #include #include -#include "micro/coder/opcoders/op_coder.h" +#include "coder/opcoders/op_coder.h" #include "nnacl/int8/quant_dtype_cast_int8.h" namespace mindspore::lite::micro { diff --git a/mindspore/lite/micro/coder/opcoders/base/full_connection_base_coder.cc b/mindspore/lite/micro/coder/opcoders/base/full_connection_base_coder.cc index 18923e9c65..04c1869066 100644 --- a/mindspore/lite/micro/coder/opcoders/base/full_connection_base_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/base/full_connection_base_coder.cc @@ -14,10 +14,14 @@ * limitations under the License. */ -#include "micro/coder/opcoders/base/full_connection_base_coder.h" +#include "coder/opcoders/base/full_connection_base_coder.h" namespace mindspore::lite::micro { -FullConnectionBaseCoder::~FullConnectionBaseCoder() { fc_param_ = nullptr; } +FullConnectionBaseCoder::~FullConnectionBaseCoder() { + fc_param_ = nullptr; + filter_tensor_ = nullptr; + bias_tensor_ = nullptr; +} int FullConnectionBaseCoder::Init() { this->fc_param_ = reinterpret_cast(parameter_); diff --git a/mindspore/lite/micro/coder/opcoders/base/full_connection_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/full_connection_base_coder.h index efff937f11..41a2f2e25f 100644 --- a/mindspore/lite/micro/coder/opcoders/base/full_connection_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/full_connection_base_coder.h @@ -18,7 +18,7 @@ #define MINDSPORE_LITE_MICRO_CODER_OPCODERS_BASE_FULLY_CONNECTED_BASE_CODER_H_ #include -#include "micro/coder/opcoders/op_coder.h" +#include "coder/opcoders/op_coder.h" #include "nnacl/matmul_parameter.h" namespace mindspore::lite::micro { @@ -29,7 +29,8 @@ class FullConnectionBaseCoder : public OperatorCoder { : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~FullConnectionBaseCoder() override; - int Init(); + + virtual int Init(); protected: MatMulParameter *fc_param_{nullptr}; diff --git a/mindspore/lite/micro/coder/opcoders/base/quant_dtype_cast_coder.cc b/mindspore/lite/micro/coder/opcoders/base/quant_dtype_cast_coder.cc index 5598638a27..707b39a534 100644 --- a/mindspore/lite/micro/coder/opcoders/base/quant_dtype_cast_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/base/quant_dtype_cast_coder.cc @@ -14,61 +14,72 @@ * limitations under the License. */ -#include -#include "micro/coder/opcoders/op_coder.h" -#include "micro/coder/opcoders/file_collector.h" -#include "micro/coder/opcoders/base/quant_dtype_cast_coder.h" -#include "micro/coder/opcoders/serializers/serializer.h" +#include "coder/opcoders/op_coder.h" +#include "coder/opcoders/file_collector.h" +#include "coder/opcoders/base/quant_dtype_cast_coder.h" +#include "coder/opcoders/serializers/serializer.h" +#include "coder/utils/type_cast.h" using mindspore::schema::PrimitiveType_QuantDTypeCast; namespace mindspore::lite::micro { - int QuantDTypeCastCoder::Prepare(CoderContext *const context) { - this->cast_param_ = reinterpret_cast(parameter_); - - if (cast_param_->srcT == kNumberTypeFloat32 && cast_param_->dstT == kNumberTypeInt8) { - if (input_tensor_->data_type() != kNumberTypeFloat32 || output_tensor_->data_type() != kNumberTypeInt8) { - MS_LOG(ERROR) << "cast_param_ data type and tensor data type do not match."; - return RET_ERROR; - } - inverse_ = false; - } else if (cast_param_->srcT == kNumberTypeInt8 && cast_param_->dstT == kNumberTypeFloat32) { - if (input_tensor_->data_type() != kNumberTypeInt8 || output_tensor_->data_type() != kNumberTypeFloat32) { - MS_LOG(ERROR) << "cast_param_ data type and tensor data type do not match."; - return RET_ERROR; - } - inverse_ = true; - } else { - MS_LOG(ERROR) << "cast_param_ data type not supported:" - << " src: " << cast_param_->srcT << " dst: " << cast_param_->dstT; - return RET_PARAM_INVALID; + auto *param = reinterpret_cast(parameter_); + if (input_tensor_->data_type() != static_cast(param->srcT) || + output_tensor_->data_type() != static_cast(param->dstT)) { + MS_LOG(ERROR) << "param data type not supported:" + << " src: " << param->srcT << " dst: " << param->dstT; + return RET_ERROR; } + src_dtype = static_cast(param->srcT); + dst_dtype = static_cast(param->dstT); return RET_OK; } int QuantDTypeCastCoder::DoCode(CoderContext *const context) { - // get quant params - QuantArg in_quant_arg = input_tensor_->quant_params().at(0); - - // single thread for now + if (input_tensor_->quant_params().empty() && output_tensor_->quant_params().empty()) { + MS_LOG(ERROR) << "QuantDTypeCast need quantization parameters which is not found."; + return RET_ERROR; + } + auto quant_arg = (!output_tensor_->quant_params().empty() && output_tensor_->quant_params().at(0).inited) + ? output_tensor_->quant_params().at(0) + : input_tensor_->quant_params().at(0); int num_unit_thread = input_tensor_->ElementsNum(); - // generate code .h .c Collect(context, {"nnacl/int8/quant_dtype_cast_int8.h"}, {"quant_dtype_cast_int8.c"}); - Serializer code; code.precision(kPrecision); - std::string function = inverse_ ? "DoDequantizeInt8ToFp32" : "DoQuantizeFp32ToInt8"; - code.CodeFunction(function, input_tensor_, output_tensor_, in_quant_arg.scale, in_quant_arg.zeroPoint, - num_unit_thread); - + if (src_dtype == TypeId::kNumberTypeInt8 && dst_dtype == TypeId::kNumberTypeFloat32) { + code.CodeFunction("DoDequantizeInt8ToFp32", input_tensor_, output_tensor_, quant_arg.scale, quant_arg.zeroPoint, + num_unit_thread); + } else if (src_dtype == TypeId::kNumberTypeFloat32 && dst_dtype == TypeId::kNumberTypeInt8) { + bool from_uint8_src = false; + if (quant_arg.dstDtype == TypeId::kNumberTypeUInt8) { + from_uint8_src = true; + } + code.CodeFunction("DoQuantizeFp32ToInt8", input_tensor_, output_tensor_, quant_arg.scale, quant_arg.zeroPoint, + num_unit_thread, from_uint8_src); + } else if (src_dtype == TypeId::kNumberTypeInt8 && dst_dtype == TypeId::kNumberTypeUInt8) { + code.CodeFunction("Int8ToUInt8", input_tensor_, output_tensor_, num_unit_thread); + } else if (src_dtype == TypeId::kNumberTypeUInt8 && dst_dtype == TypeId::kNumberTypeFloat32) { + code.CodeFunction("DoDequantizeUInt8ToFp32", input_tensor_, output_tensor_, quant_arg.scale, quant_arg.zeroPoint, + num_unit_thread); + } else if (src_dtype == TypeId::kNumberTypeFloat32 && dst_dtype == TypeId::kNumberTypeUInt8) { + code.CodeFunction("DoQuantizeFp32ToUInt8", input_tensor_, output_tensor_, quant_arg.scale, quant_arg.zeroPoint, + num_unit_thread); + } else if (src_dtype == TypeId::kNumberTypeUInt8 && dst_dtype == TypeId::kNumberTypeInt8) { + code.CodeFunction("UInt8ToInt8", input_tensor_, output_tensor_, num_unit_thread); + } else { + MS_LOG(INFO) << "unsupported type cast, src: " << EnumNameDataType(src_dtype) + << ", dst: " << EnumNameDataType(dst_dtype); + return RET_ERROR; + } context->AppendCode(code.str()); - return RET_OK; } REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_QuantDTypeCast, CPUOpCoderCreator) REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_QuantDTypeCast, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeUInt8, PrimitiveType_QuantDTypeCast, CPUOpCoderCreator) } // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/opcoders/base/quant_dtype_cast_coder.h b/mindspore/lite/micro/coder/opcoders/base/quant_dtype_cast_coder.h index 7968c82563..276f6c967d 100644 --- a/mindspore/lite/micro/coder/opcoders/base/quant_dtype_cast_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/quant_dtype_cast_coder.h @@ -19,7 +19,7 @@ #include #include -#include "micro/coder/opcoders/op_coder.h" +#include "coder/opcoders/op_coder.h" #include "nnacl/int8/quant_dtype_cast_int8.h" namespace mindspore::lite::micro { @@ -36,10 +36,8 @@ class QuantDTypeCastCoder final : public OperatorCoder { int DoCode(CoderContext *const context) override; private: - QuantDTypeCastParameter *cast_param_{nullptr}; - std::vector inputs_; - std::vector outputs_; - bool inverse_{false}; + TypeId src_dtype{kTypeUnknown}; + TypeId dst_dtype{kTypeUnknown}; int thread_num_{0}; int thread_n_num_{0}; int thread_n_stride_{0}; diff --git a/mindspore/lite/micro/coder/opcoders/base/reduce_base_coder.cc b/mindspore/lite/micro/coder/opcoders/base/reduce_base_coder.cc index 862f096f9f..1224f7288d 100644 --- a/mindspore/lite/micro/coder/opcoders/base/reduce_base_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/base/reduce_base_coder.cc @@ -14,16 +14,16 @@ * limitations under the License. */ -#include "micro/coder/opcoders/base/reduce_base_coder.h" +#include "coder/opcoders/base/reduce_base_coder.h" #include -#include "micro/coder/opcoders/op_coder.h" +#include "coder/opcoders/op_coder.h" namespace mindspore::lite::micro { namespace { constexpr size_t kInputNum = 1; constexpr size_t kOutputNum = 1; } // namespace -int ReduceBaseCoder::CheckInputsOutputs() { +int ReduceBaseCoder::CheckInputsOutputs() const { if (input_tensors_.size() < kInputNum) { MS_LOG(ERROR) << "Reduce inputs size should be at least " << kInputNum << " but got " << input_tensors_.size(); return RET_ERROR; diff --git a/mindspore/lite/micro/coder/opcoders/base/reduce_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/reduce_base_coder.h index 1b9ee3b5fe..b942c31bb9 100644 --- a/mindspore/lite/micro/coder/opcoders/base/reduce_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/reduce_base_coder.h @@ -19,7 +19,7 @@ #include #include -#include "micro/coder/opcoders/op_coder.h" +#include "coder/opcoders/op_coder.h" #include "nnacl/reduce_parameter.h" namespace mindspore::lite::micro { @@ -31,11 +31,10 @@ class ReduceBaseCoder : public OperatorCoder { ~ReduceBaseCoder() override = default; - int Init(); - virtual int ReSize(); + virtual int Init(); private: - int CheckInputsOutputs(); + int CheckInputsOutputs() const; int CheckParameters(); protected: @@ -54,6 +53,7 @@ class ReduceBaseCoder : public OperatorCoder { int outer_size_{0}; int inner_size_{0}; int axis_size_{0}; + virtual int ReSize(); }; } // namespace mindspore::lite::micro #endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_REDUCE_BASE_CODER_H diff --git a/mindspore/lite/micro/coder/opcoders/base/resize_base_coder.cc b/mindspore/lite/micro/coder/opcoders/base/resize_base_coder.cc new file mode 100644 index 0000000000..737759a868 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/base/resize_base_coder.cc @@ -0,0 +1,104 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/opcoders/base/resize_base_coder.h" +#include "coder/opcoders/op_coder.h" + +namespace mindspore::lite::micro { +constexpr int kMaxInputNum = 2; +constexpr int kOutputNum = 1; +constexpr int kSingleNum = 1; +constexpr int kDoubleNum = 2; +constexpr int kQuadrupleNum = 4; + +int ResizeBaseCoder::CheckParameters() { + auto parameter = reinterpret_cast(parameter_); + if (parameter == nullptr) { + MS_LOG(ERROR) << "cast ResizeParameter failed."; + return RET_NULL_PTR; + } + method_ = parameter->method_; + if (method_ != static_cast(schema::ResizeMethod_LINEAR) && + method_ != static_cast(schema::ResizeMethod_NEAREST)) { + MS_LOG(ERROR) << "Resize method should be bilinear or nearest_neighbor, but got " << method_; + return RET_INVALID_OP_ATTR; + } + if (this->input_tensors_.size() == kSingleNum) { + new_height_ = parameter->new_height_; + if (new_height_ < 1) { + MS_LOG(ERROR) << "Resize new_height should >= 1, but got " << new_height_; + return RET_INVALID_OP_ATTR; + } + new_width_ = parameter->new_width_; + if (new_width_ < 1) { + MS_LOG(ERROR) << "Resize new_width should >= 1, but got " << new_width_; + return RET_INVALID_OP_ATTR; + } + } else if (this->input_tensors_.size() == kDoubleNum) { + auto out_shape = this->input_tensors_.at(1)->data_c(); + if (out_shape == nullptr) { + MS_LOG(INFO) << "Out shape is not assigned"; + const_shape_ = false; + } else { + const_shape_ = true; + } + } + coordinate_transform_mode_ = parameter->coordinate_transform_mode_; + preserve_aspect_ratio_ = parameter->preserve_aspect_ratio_; + if (preserve_aspect_ratio_) { + MS_LOG(ERROR) << "Resize currently not support preserve_aspect_ratio true"; + return RET_ERROR; + } + return RET_OK; +} + +int ResizeBaseCoder::CheckInputsOuputs() { + if (input_tensors_.size() <= kQuadrupleNum) { + if (std::any_of(input_tensors_.begin(), input_tensors_.end(), [](const Tensor *t) { return t == nullptr; })) { + return RET_NULL_PTR; + } + } else { + MS_LOG(ERROR) << "Resize input num should be no more than" << kMaxInputNum << ", but got " << input_tensors_.size(); + return RET_ERROR; + } + if (output_tensors_.size() != kOutputNum) { + MS_LOG(ERROR) << "Resize output num should be " << kOutputNum << ", but got " << output_tensors_.size(); + return RET_ERROR; + } + auto output = output_tensors_.at(0); + if (output == nullptr) { + return RET_NULL_PTR; + } + return RET_OK; +} + +int ResizeBaseCoder::Init() { + auto ret = CheckParameters(); + if (ret != RET_OK) { + return ret; + } + ret = CheckInputsOuputs(); + if (ret != RET_OK) { + return ret; + } + auto input_shape = input_tensor_->shape(); + if (!input_shape.empty() && input_shape.size() != COMM_SHAPE_SIZE) { + MS_LOG(ERROR) << "Resize op support input rank 4, got " << input_shape.size(); + return RET_ERROR; + } + return RET_OK; +} +} // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/opcoders/base/resize_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/resize_base_coder.h new file mode 100644 index 0000000000..1d2ccdc05a --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/base/resize_base_coder.h @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESIZE_BASE_CODER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESIZE_BASE_CODER_H_ + +#include +#include +#include "coder/opcoders/op_coder.h" +#include "nnacl/resize_parameter.h" + +namespace mindspore::lite::micro { +class ResizeBaseCoder : public OperatorCoder { + public: + ResizeBaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} + + ~ResizeBaseCoder() override = default; + + int Init(); + + protected: + int method_{0}; + int new_height_{0}; + int new_width_{0}; + int coordinate_transform_mode_{0}; + bool preserve_aspect_ratio_{false}; + bool const_shape_{false}; + + private: + int CheckParameters(); + int CheckInputsOuputs(); +}; +} // namespace mindspore::lite::micro +#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_RESIZE_BASE_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/base/softmax_base_coder.cc b/mindspore/lite/micro/coder/opcoders/base/softmax_base_coder.cc index 6b4bd63694..f635c02f2b 100644 --- a/mindspore/lite/micro/coder/opcoders/base/softmax_base_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/base/softmax_base_coder.cc @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "micro/coder/opcoders/base/softmax_base_coder.h" +#include "coder/opcoders/base/softmax_base_coder.h" #include #include diff --git a/mindspore/lite/micro/coder/opcoders/base/softmax_base_coder.h b/mindspore/lite/micro/coder/opcoders/base/softmax_base_coder.h index f446ae44f6..79941dfbfc 100644 --- a/mindspore/lite/micro/coder/opcoders/base/softmax_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/base/softmax_base_coder.h @@ -19,14 +19,12 @@ #include #include -#include "micro/coder/opcoders/op_coder.h" +#include "coder/opcoders/op_coder.h" #include "nnacl/softmax_parameter.h" #include "nnacl/int8/quantize.h" namespace mindspore::lite::micro { -using std::string; - class SoftmaxBaseCoder : public OperatorCoder { public: SoftmaxBaseCoder(const std::vector &in_tensors, const std::vector &out_tensors, diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.cc index e8683ff368..7be14eeb1e 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/add_int8_coder.cc @@ -17,13 +17,13 @@ #include "coder/opcoders/cmsis-nn/int8/add_int8_coder.h" #include #include -#include "micro/coder/opcoders/serializers/serializer.h" +#include "coder/opcoders/serializers/serializer.h" #include "nnacl/arithmetic.h" #include "nnacl/int8/quantize.h" #include "coder/opcoders/file_collector.h" #include "coder/log.h" -using mindspore::schema::PrimitiveType_Add; +using mindspore::schema::PrimitiveType_AddFusion; namespace mindspore::lite::micro::cmsis { @@ -85,5 +85,5 @@ int AddInt8Coder::DoCode(CoderContext *const context) { context->AppendCode(code.str()); return RET_OK; } -REG_OPERATOR_CODER(kARM32M, kNumberTypeInt8, PrimitiveType_Add, CPUOpCoderCreator) +REG_OPERATOR_CODER(kARM32M, kNumberTypeInt8, PrimitiveType_AddFusion, CPUOpCoderCreator) } // namespace mindspore::lite::micro::cmsis diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.cc index 2e9ff730b9..5349603e3a 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.cc @@ -15,14 +15,13 @@ */ #include "coder/opcoders/cmsis-nn/int8/conv2d_int8_coder.h" -#include #include #include -#include "coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.h" #include "coder/opcoders/serializers/serializer.h" #include "coder/opcoders/file_collector.h" +#include "src/common/prim_util.h" -using mindspore::schema::PrimitiveType_Conv2D; +using mindspore::schema::PrimitiveType_Conv2DFusion; namespace mindspore::lite::micro::cmsis { @@ -40,13 +39,11 @@ int Conv2DInt8Coder::Prepare(CoderContext *const context) { int Conv2DInt8Coder::DoCode(CoderContext *const context) { Serializer code; code.precision(kPrecision); - std::vector h_files; - std::vector c_files; + std::vector h_files; + std::vector c_files; h_files.emplace_back("CMSIS/NN/Include/arm_nnfunctions.h"); - string buffer_str = "NULL"; if (opt_ != Convolve_1x1_fast) { - buffer_str = allocator_->GetRuntimeAddr(buffer_); - code << " memset(" << buffer_str << ", 0, " << buffer_size_ << ");\n"; + code.CodeFunction("memset", buffer_, 0, buffer_size_); } code.CodeArray("output_shift", output_shift_, output_ch_); code.CodeArray("output_mult", output_mult_, output_ch_); @@ -57,7 +54,7 @@ int Conv2DInt8Coder::DoCode(CoderContext *const context) { code.CodeFunction("arm_convolve_s8", input_tensor_, input_x_, input_y_, input_ch_, input_batches_, filter_tensor_, output_ch_, kernel_x_, kernel_y_, pad_x_, pad_y_, stride_x_, stride_y_, bias_tensor_, output_tensor_, "output_shift", "output_mult", out_offset_, input_offset_, out_activation_min_, - out_activation_max_, output_x_, output_y_, buffer_str); + out_activation_max_, output_x_, output_y_, buffer_); break; case Convolve_1_x_n: c_files = {"arm_convolve_1_x_n_s8.c", "arm_nn_mat_mul_core_1x_s8.c"}; @@ -65,7 +62,7 @@ int Conv2DInt8Coder::DoCode(CoderContext *const context) { code.CodeFunction("arm_convolve_1_x_n_s8", input_tensor_, input_x_, input_ch_, input_batches_, filter_tensor_, output_ch_, kernel_x_, pad_x_, stride_x_, bias_tensor_, output_tensor_, "output_shift", "output_mult", out_offset_, input_offset_, out_activation_min_, out_activation_max_, output_x_, - buffer_str); + buffer_); break; case Convolve_1x1_fast: c_files = {"arm_convolve_1x1_s8_fast.c", "arm_nn_mat_mult_nt_t_s8.c", "arm_nn_mat_mul_core_4x_s8.c", @@ -74,7 +71,7 @@ int Conv2DInt8Coder::DoCode(CoderContext *const context) { code.CodeFunction("arm_convolve_1x1_s8_fast", input_tensor_, input_x_, input_y_, input_ch_, input_batches_, filter_tensor_, output_ch_, pad_x_, pad_y_, stride_x_, stride_y_, bias_tensor_, output_tensor_, "output_shift", "output_mult", out_offset_, input_offset_, out_activation_min_, - out_activation_max_, output_x_, output_y_, buffer_str); + out_activation_max_, output_x_, output_y_, buffer_); break; default: MS_LOG(ERROR) << "opt enum value is not defined"; @@ -159,5 +156,20 @@ int Conv2DInt8Coder::InitTmpBuffer() { return RET_OK; } -REG_OPERATOR_CODER(kARM32M, kNumberTypeInt8, PrimitiveType_Conv2D, CPUOpCoderCreator) +std::unique_ptr CmsisConv2DInt8OpCoderCreator(const std::vector &in_tensors, + const std::vector &out_tensors, + const Model::Node *node, size_t node_index, + Target target) { + MS_CHECK_PTR_RET_NULL(node); + int pt = GetPrimitiveType(node->primitive_); + if (pt != schema::PrimitiveType::PrimitiveType_Conv2DFusion) { + MS_LOG(ERROR) << "unmatched primitive type " << PrimitiveTypeName(pt); + return nullptr; + } + std::unique_ptr coder = + std::make_unique(in_tensors, out_tensors, node, node_index, target); + return coder; +} + +REG_OPERATOR_CODER(kARM32M, kNumberTypeInt8, PrimitiveType_Conv2DFusion, CPUOpCoderCreator) } // namespace mindspore::lite::micro::cmsis diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.cc index 04322376fc..b6182aab92 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/dwconv_int8_coder.cc @@ -20,8 +20,6 @@ #include "coder/opcoders/file_collector.h" #include "coder/log.h" -using mindspore::schema::PrimitiveType_DepthwiseConv2D; - namespace mindspore::lite::micro::cmsis { int DWConvInt8Coder::Prepare(CoderContext *const context) { @@ -153,6 +151,4 @@ int DWConvInt8Coder::InitTmpBuffer() { return 0; } -REG_OPERATOR_CODER(kARM32M, kNumberTypeInt8, PrimitiveType_DepthwiseConv2D, CPUOpCoderCreator) - } // namespace mindspore::lite::micro::cmsis diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h index a5a1d3340d..e0370dc4d7 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/fullconnection_int8_coder.h @@ -19,8 +19,8 @@ #include #include -#include "micro/coder/opcoders/op_coder.h" -#include "micro/coder/opcoders/base/full_connection_base_coder.h" +#include "coder/opcoders/op_coder.h" +#include "coder/opcoders/base/full_connection_base_coder.h" #include "nnacl/int8/quantize.h" namespace mindspore::lite::micro::cmsis { class FullConnectionInt8Coder final : public FullConnectionBaseCoder { diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.cc index f999aa4f95..a0b68f92f7 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/mul_int8_coder.cc @@ -20,7 +20,7 @@ #include "nnacl/int8/quantize.h" #include "coder/opcoders/file_collector.h" -using mindspore::schema::PrimitiveType_Mul; +using mindspore::schema::PrimitiveType_MulFusion; namespace mindspore::lite::micro::cmsis { @@ -69,5 +69,5 @@ int MulInt8Coder::DoCode(CoderContext *const context) { context->AppendCode(code.str()); return RET_OK; } -REG_OPERATOR_CODER(kARM32M, kNumberTypeInt8, PrimitiveType_Mul, CPUOpCoderCreator) +REG_OPERATOR_CODER(kARM32M, kNumberTypeInt8, PrimitiveType_MulFusion, CPUOpCoderCreator) } // namespace mindspore::lite::micro::cmsis diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.cc index 342841ab00..5e786afe27 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/pooling_int8_coder.cc @@ -20,7 +20,8 @@ #include "coder/opcoders/serializers/serializer.h" #include "coder/opcoders/file_collector.h" -using mindspore::schema::PrimitiveType_Pooling; +using mindspore::schema::PrimitiveType_AvgPoolFusion; +using mindspore::schema::PrimitiveType_MaxPoolFusion; namespace mindspore::lite::micro::cmsis { int PoolingInt8Coder::Prepare(CoderContext *const context) { @@ -39,14 +40,12 @@ int PoolingInt8Coder::Prepare(CoderContext *const context) { int PoolingInt8Coder::DoCode(CoderContext *const context) { // init struct PoolingParameters - std::string buffer_str = "NULL"; std::string pooling_func; std::vector cFiles; if (pooling_parameter_->pool_mode_ == PoolMode_AvgPool) { cFiles = {"arm_avgpool_s8.c"}; pooling_func = "arm_avgpool_s8"; - buffer_str = allocator_->GetRuntimeAddr(buffer_); } else if (pooling_parameter_->pool_mode_ == PoolMode_MaxPool) { cFiles = {"arm_max_pool_s8.c"}; pooling_func = "arm_max_pool_s8"; @@ -59,11 +58,9 @@ int PoolingInt8Coder::DoCode(CoderContext *const context) { Serializer code; code.precision(kPrecision); - code.CodeFunction(pooling_func, "&nn_context", "&pool_params", "&input_dims", input_tensor_, "&filter_dims", - "&output_dims", output_tensor_); code.CodeFunction(pooling_func, dim_src_height_, dim_src_width_, dim_dst_height_, dim_dst_width_, stride_height_, stride_width_, dim_kernel_height_, dim_kernel_width_, padding_height_, padding_width_, act_min_, - act_max_, ch_src_, input_tensor_, buffer_str, output_tensor_); + act_max_, ch_src_, input_tensor_, buffer_, output_tensor_); context->AppendCode(code.str()); return RET_OK; } @@ -97,6 +94,7 @@ int PoolingInt8Coder::SetParameters() { return RET_OK; } -REG_OPERATOR_CODER(kARM32M, kNumberTypeInt8, PrimitiveType_Pooling, CPUOpCoderCreator) +REG_OPERATOR_CODER(kARM32M, kNumberTypeInt8, PrimitiveType_AvgPoolFusion, CPUOpCoderCreator) +REG_OPERATOR_CODER(kARM32M, kNumberTypeInt8, PrimitiveType_MaxPoolFusion, CPUOpCoderCreator) } // namespace mindspore::lite::micro::cmsis diff --git a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.cc index 2083945a6d..8a47a8a1c6 100644 --- a/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/softmax_int8_coder.cc @@ -19,7 +19,7 @@ #include "coder/opcoders/serializers/serializer.h" #include "coder/opcoders/file_collector.h" -using mindspore::schema::PrimitiveType_SoftMax; +using mindspore::schema::PrimitiveType_Softmax; namespace mindspore::lite::micro::cmsis { int SoftMaxInt8Coder::Prepare(CoderContext *const context) { @@ -76,6 +76,6 @@ int SoftMaxInt8Coder::DoCode(CoderContext *const context) { context->AppendCode(code.str()); return RET_OK; } -REG_OPERATOR_CODER(kARM32M, kNumberTypeInt8, PrimitiveType_SoftMax, CPUOpCoderCreator) +REG_OPERATOR_CODER(kARM32M, kNumberTypeInt8, PrimitiveType_Softmax, CPUOpCoderCreator) } // namespace mindspore::lite::micro::cmsis diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/dequant/de_quant.cc b/mindspore/lite/micro/coder/opcoders/nnacl/dequant/de_quant.cc new file mode 100644 index 0000000000..0e3b9a7475 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/dequant/de_quant.cc @@ -0,0 +1,143 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/opcoders/nnacl/dequant/de_quant.h" +#include +#include +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" + +static constexpr int kPerTensor = 1; +static constexpr size_t kPerBatch = 3; +namespace mindspore::lite::micro::nnacl { + +void Dequant::set_de_quant_buffer_str(const std::string &dequant_buffer_str) { + de_quant_buffer_str_ = "(float *)(" + dequant_buffer_str + ")"; +} + +void Dequant::DequantRecordWorkspcae(size_t curr_workspace) { + de_quant_max_workspace_ = de_quant_max_workspace_ > curr_workspace ? de_quant_max_workspace_ : curr_workspace; +} + +bool Dequant::CheckDequantFlag(const Tensor *weight_tensor) { + if (weight_tensor == nullptr) { + return false; + } + return !weight_tensor->quant_params().empty() && weight_tensor->quant_params().front().inited && + weight_tensor->data_c() != nullptr; +} + +void Dequant::DeQuantFunctionPerChannel(const Tensor *quant_tensor, const std::vector &de_quant_args, + const std::string &de_quant_arg_base_str, + NNaclFp32Serializer *const de_quant_code) { + int quant_arg_dims = static_cast(quant_tensor->quant_params().size()); + int de_quant_nums = quant_tensor->ElementsNum(); + for (int i = 0; i < quant_arg_dims; ++i) { + auto de_quant_arg = de_quant_args.at(i); + std::string de_quant_arg_str = de_quant_arg_base_str + std::to_string(i); + de_quant_code->CodeStruct(de_quant_arg_str, de_quant_arg); + } + std::string de_quant_args_name = "de_quant_args"; + *de_quant_code << "const DeQuantArg *" << de_quant_args_name << "[" << quant_arg_dims << "] = {\n"; + for (int i = 0; i < quant_arg_dims - 1; ++i) { + *de_quant_code << "&" << de_quant_arg_base_str << std::to_string(i) << ", "; + } + *de_quant_code << "&" << de_quant_arg_base_str << std::to_string(quant_arg_dims - 1); + *de_quant_code << "};\n"; + size_t per_batch_size = quant_tensor->shape().at(0); + std::string quant_tensor_addr_str = "(int8_t *)(" + quant_tensor_addr_ + ")"; + de_quant_code->CodeFunction("DequantDataPerChannel", quant_tensor_addr_str, de_quant_args_name, de_quant_nums, + per_batch_size, de_quant_buffer_str_); +} + +void Dequant::DeQuantFunction(const Tensor *quant_tensor, const std::vector &de_quant_args, + const std::string &de_quant_arg_base_str, NNaclFp32Serializer *const de_quant_code) { + int quant_arg_dims = static_cast(quant_tensor->quant_params().size()); + int de_quant_nums = quant_tensor->ElementsNum(); + for (int i = 0; i < quant_arg_dims; ++i) { + auto de_quant_arg = de_quant_args.at(i); + std::string de_quant_arg_str = de_quant_arg_base_str + std::to_string(i); + de_quant_code->CodeStruct(de_quant_arg_str, de_quant_arg); + } + std::string de_quant_args_name = "de_quant_args"; + *de_quant_code << "const DeQuantArg *" << de_quant_args_name << "[" << quant_arg_dims << "] = {\n"; + for (int i = 0; i < quant_arg_dims - 1; ++i) { + *de_quant_code << "&" << de_quant_arg_base_str << std::to_string(i) << ", "; + } + *de_quant_code << "&" << de_quant_arg_base_str << std::to_string(quant_arg_dims - 1); + *de_quant_code << "};\n"; + auto channels = static_cast(quant_tensor->Batch()); + std::string quant_tensor_addr_str = "(int8_t *)(" + quant_tensor_addr_ + ")"; + de_quant_code->CodeFunction("DequantData", quant_tensor_addr_str, de_quant_args_name, de_quant_nums, channels, + de_quant_buffer_str_); +} + +void Dequant::DeQuantFunctionPerTensor(const Tensor *quant_tensor, const std::vector &de_quant_args, + const std::string &de_quant_arg_base_str, + NNaclFp32Serializer *const de_quant_code) { + size_t de_quant_nums = quant_tensor->ElementsNum(); + auto de_quant_arg = de_quant_args.at(0); + std::string de_quant_arg_str = de_quant_arg_base_str + std::to_string(0); + de_quant_code->CodeStruct(de_quant_arg_str, de_quant_arg); + std::string de_quant_args_name = "de_quant_args"; + *de_quant_code << "const DeQuantArg *" << de_quant_args_name << "[" << 1 << "] = {\n"; + *de_quant_code << "&" << de_quant_arg_base_str << std::to_string(0); + *de_quant_code << "};\n"; + std::string quant_tensor_addr_str = "(int8_t *)(" + quant_tensor_addr_ + ")"; + de_quant_code->CodeFunction("DequantDataPerTensor", quant_tensor_addr_str, de_quant_args_name, de_quant_nums, + de_quant_buffer_str_); +} + +std::string Dequant::GetMicroDeQuantFunction(const Tensor *quant_tensor, const std::string &quant_tensor_addr) { + std::string de_quant_block; + if (quant_tensor == nullptr || de_quant_buffer_str_.empty()) { + return de_quant_block; + } + quant_tensor_addr_ = quant_tensor_addr; + size_t de_quant_nums = quant_tensor->ElementsNum(); + size_t quant_arg_dims = quant_tensor->quant_params().size(); + DequantRecordWorkspcae(static_cast(de_quant_nums * sizeof(float))); + NNaclFp32Serializer de_quant_code; + de_quant_code << "{\n"; + size_t quant_tensor_dims = quant_tensor->shape().size(); + std::vector de_quant_args; + std::string de_quant_arg_base_str = "de_quant_arg_"; + for (size_t i = 0; i < quant_arg_dims; ++i) { + auto curr_quant_param = quant_tensor->quant_params().at(i); + DeQuantArg de_quant_arg = { + .scale = static_cast(curr_quant_param.scale), + .zeroPoint = curr_quant_param.zeroPoint, + .var_corr = curr_quant_param.var_corr, + .mean_corr = curr_quant_param.mean_corr, + // this clusters is meaningless which will be supported in future + .clusters = {}, + .clusters_nums = static_cast(curr_quant_param.clusters.size()), + .bitNum = quant_tensor->quant_params().at(i).bitNum, + }; + de_quant_args.emplace_back(de_quant_arg); + } + de_quant_code.CodeFunction("memset", de_quant_buffer_str_, 0, de_quant_nums * sizeof(float)); + if (quant_tensor_dims == kPerBatch && quant_arg_dims == static_cast(quant_tensor->shape().at(0))) { + DeQuantFunctionPerChannel(quant_tensor, de_quant_args, de_quant_arg_base_str, &de_quant_code); + } else if (quant_arg_dims != kPerTensor) { + DeQuantFunction(quant_tensor, de_quant_args, de_quant_arg_base_str, &de_quant_code); + } else { + DeQuantFunctionPerTensor(quant_tensor, de_quant_args, de_quant_arg_base_str, &de_quant_code); + } + de_quant_code << "}\n"; + de_quant_block = de_quant_code.str(); + return de_quant_block; +} +} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/dequant/de_quant.h b/mindspore/lite/micro/coder/opcoders/nnacl/dequant/de_quant.h new file mode 100644 index 0000000000..bf86b7b490 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/dequant/de_quant.h @@ -0,0 +1,63 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MICRO_LITE_MICRO_CODER_OPCODERS_NNACL_DEQUANT_DEQUANT_H_ +#define MICRO_LITE_MICRO_CODER_OPCODERS_NNACL_DEQUANT_DEQUANT_H_ + +#include +#include +#include "src/tensor.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" +namespace mindspore::lite::micro::nnacl { +class Dequant { + public: + Dequant(const Dequant &) = delete; + Dequant &operator=(const Dequant &) = delete; + static Dequant *GetInstance() { + static Dequant dequant; + return &dequant; + } + + void set_de_quant_buffer_str(const std::string &de_quant_buffer_str); + + const size_t de_quant_max_workspace() const { return de_quant_max_workspace_; } + + const std::string de_quant_buffer_str() const { return de_quant_buffer_str_; } + + bool CheckDequantFlag(const Tensor *quant_tensor); + + std::string GetMicroDeQuantFunction(const Tensor *quant_tensor, const std::string &quant_tensor_addr); + + private: + void DeQuantFunctionPerTensor(const Tensor *quant_tensor, const std::vector &de_quant_args, + const std::string &de_quant_arg_base_str, NNaclFp32Serializer *de_quant_code); + + void DeQuantFunction(const Tensor *quant_tensor, const std::vector &de_quant_args, + const std::string &de_quant_arg_base_str, NNaclFp32Serializer *de_quant_code); + + void DeQuantFunctionPerChannel(const Tensor *quant_tensor, const std::vector &de_quant_args, + const std::string &de_quant_arg_base_str, NNaclFp32Serializer *de_quant_code); + + Dequant() = default; + ~Dequant() = default; + void DequantRecordWorkspcae(size_t curr_workspace); + + std::string de_quant_buffer_str_; + std::string quant_tensor_addr_; + size_t de_quant_max_workspace_{0}; +}; +} // namespace mindspore::lite::micro::nnacl +#endif // MICRO_LITE_MICRO_CODER_OPCODERS_NNACL_DEQUANT_DEQUANT_H_ diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/activation_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/activation_fp32_coder.cc index 3e617f7708..f560221ebe 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/activation_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/activation_fp32_coder.cc @@ -13,12 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "micro/coder/opcoders/nnacl/fp32/activation_fp32_coder.h" +#include "coder/opcoders/nnacl/fp32/activation_fp32_coder.h" #include #include "nnacl/fp32/activation_fp32.h" #include "nnacl/op_base.h" -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" -#include "micro/coder/opcoders/file_collector.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" +#include "coder/opcoders/file_collector.h" using mindspore::schema::PrimitiveType_Activation; @@ -34,9 +34,9 @@ int ActivationFP32Coder::DoCode(CoderContext *const context) { int count = MSMIN(stride, length - stride * task_id); if (activation_parameter->type_ == schema::ActivationType_SIGMOID) { - Collect(context, {"runtime/kernel/fp32/sigmoid.h"}, {"sigmoid.c"}); + Collect(context, {"runtime/kernel/fp32/sigmoid_fp32.h"}, {"sigmoid_fp32.c"}); } else { - Collect(context, {"nnacl/fp32/activation.h"}, {"activation.c"}); + Collect(context, {"nnacl/fp32/activation_fp32.h"}, {"activation_fp32.c"}); } NNaclFp32Serializer code; switch (activation_parameter->type_) { diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.cc index c5a4132c0d..d780247d11 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.cc @@ -14,10 +14,10 @@ * limitations under the License. */ -#include "micro/coder/opcoders/nnacl/fp32/addn_fp32_coder.h" +#include "coder/opcoders/nnacl/fp32/addn_fp32_coder.h" #include -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" -#include "micro/coder/opcoders/file_collector.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" +#include "coder/opcoders/file_collector.h" using mindspore::schema::PrimitiveType_AddN; namespace mindspore::lite::micro::nnacl { @@ -28,15 +28,12 @@ int AddNFP32Coder::DoCode(CoderContext *const context) { int elements_num = input0->ElementsNum(); // Get Tensor Pointer - std::string input0_str = allocator_->GetRuntimeAddr(input0); - std::string input1_str = allocator_->GetRuntimeAddr(input1); - Collect(context, {"nnacl/kernel/fp32/add_fp32_slim.h"}, {"add_fp32_slim.c"}); + Collect(context, {"nnacl/kernel/fp32/add_fp32.h"}, {"add_fp32.c"}); NNaclFp32Serializer code; - code.CodeFunction("ElementAdd", input0_str, input1_str, output_tensor_, elements_num); + code.CodeFunction("ElementAdd", input0, input1, output_tensor_, elements_num); if (input_tensors_.size() > 2) { for (size_t i = 2; i < input_tensors_.size(); ++i) { - std::string input_str = allocator_->GetRuntimeAddr(input_tensors_.at(i)); - code.CodeFunction("ElementAdd", input_str, output_tensor_, elements_num); + code.CodeFunction("ElementAdd", input_tensors_.at(i), output_tensor_, elements_num); } } context->AppendCode(code.str()); diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.cc index 1c70ebd6f4..6f9a3047c7 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.cc @@ -61,7 +61,7 @@ int ArithmeticFP32Coder::Init(CoderContext *const context) { if (arithmetic_parameter_->in_elements_num0_ == 1 || arithmetic_parameter_->in_elements_num1_ == 1) { switch (arithmetic_parameter_->op_parameter_.type_) { - case PrimitiveType_Mul: + case PrimitiveType_MulFusion: switch (arithmetic_parameter_->activation_type_) { case schema::ActivationType_RELU: arithmetic_parameter_->broadcasting_ = false; @@ -80,7 +80,7 @@ int ArithmeticFP32Coder::Init(CoderContext *const context) { break; } break; - case PrimitiveType_Add: + case PrimitiveType_AddFusion: switch (arithmetic_parameter_->activation_type_) { case schema::ActivationType_RELU: arithmetic_parameter_->broadcasting_ = false; @@ -99,7 +99,7 @@ int ArithmeticFP32Coder::Init(CoderContext *const context) { break; } break; - case PrimitiveType_Sub: + case PrimitiveType_SubFusion: switch (arithmetic_parameter_->activation_type_) { case schema::ActivationType_RELU: arithmetic_parameter_->broadcasting_ = false; @@ -157,7 +157,7 @@ int ArithmeticFP32Coder::Prepare(CoderContext *const context) { } arithmetic_parameter_ = reinterpret_cast(parameter_); std::map> type_setters = { - {PrimitiveType_Mul, + {PrimitiveType_MulFusion, [this]() { switch (arithmetic_parameter_->activation_type_) { case schema::ActivationType_RELU: @@ -174,7 +174,7 @@ int ArithmeticFP32Coder::Prepare(CoderContext *const context) { break; } }}, - {PrimitiveType_Add, + {PrimitiveType_AddFusion, [this]() { switch (arithmetic_parameter_->activation_type_) { case schema::ActivationType_RELU: @@ -191,7 +191,7 @@ int ArithmeticFP32Coder::Prepare(CoderContext *const context) { break; } }}, - {PrimitiveType_Sub, + {PrimitiveType_SubFusion, [this]() { switch (arithmetic_parameter_->activation_type_) { case schema::ActivationType_RELU: @@ -205,7 +205,7 @@ int ArithmeticFP32Coder::Prepare(CoderContext *const context) { break; } }}, - {PrimitiveType_Div, + {PrimitiveType_DivFusion, [this]() { switch (arithmetic_parameter_->activation_type_) { case schema::ActivationType_RELU: @@ -275,15 +275,16 @@ int ArithmeticFP32Coder::DoCode(CoderContext *const context) { * this solution is not suitable for micro, for the size of package. * */ if (arithmetic_opt_run_ == "ElementOptSub" || arithmetic_run_ == "ElementSub") { - Collect(context, {"nnacl/kernel/fp32/sub.h"}, {"sub.c"}); + Collect(context, {"nnacl/fp32/sub_fp32.h"}, {"sub_fp32.c"}); } else if (arithmetic_opt_run_ == "ElementOptAdd" || arithmetic_run_ == "ElementAdd") { - Collect(context, {"nnacl/kernel/fp32/add_fp32_slim.h"}, {"add_fp32_slim.c"}); + Collect(context, {"nnacl/fp32/add_fp32.h"}, {"add_fp32.c"}); } else if (arithmetic_opt_run_ == "ElementOptMul" || arithmetic_run_ == "ElementMul") { - Collect(context, {"nnacl/kernel/fp32/mul.h"}, {"mul.c"}); + Collect(context, {"nnacl/fp32/mul_fp32.h"}, {"mul_fp32.c"}); } else if (arithmetic_run_ == "ElementAddRelu") { - Collect(context, {"nnacl/kernel/fp32/add_relu.h"}, {"add_relu.c"}); + Collect(context, {"nnacl/fp32/add_relu_fp32.h"}, {"add_relu_fp32.c"}); } else { - Collect(context, {"nnacl/arithmetic_common.h", "nnacl/fp32/arithmetic.h"}, {"arithmetic_common.c", "arithmetic.c"}); + Collect(context, {"nnacl/arithmetic_common.h", "nnacl/fp32/arithmetic_fp32.h"}, + {"arithmetic_common.c", "arithmetic_fp32.c"}); } if (arithmetic_parameter_->broadcasting_) { @@ -330,15 +331,15 @@ int ArithmeticFP32Coder::DoCode(CoderContext *const context) { return RET_OK; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt32, PrimitiveType_Add, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt32, PrimitiveType_AddFusion, CPUOpCoderCreator) -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Mul, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_MulFusion, CPUOpCoderCreator) -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Add, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_AddFusion, CPUOpCoderCreator) -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Sub, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_SubFusion, CPUOpCoderCreator) -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Div, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_DivFusion, CPUOpCoderCreator) REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_LogicalAnd, CPUOpCoderCreator) diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.h index 7c2ed43242..a2f6948842 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_fp32_coder.h @@ -25,9 +25,9 @@ #define DEFAULT_ARITHMETIC_NDIMS 10 namespace mindspore::lite::micro::nnacl { -using mindspore::schema::PrimitiveType_Add; +using mindspore::schema::PrimitiveType_AddFusion; -using mindspore::schema::PrimitiveType_Div; +using mindspore::schema::PrimitiveType_DivFusion; using mindspore::schema::PrimitiveType_Equal; @@ -51,7 +51,7 @@ using mindspore::schema::PrimitiveType_Maximum; using mindspore::schema::PrimitiveType_Minimum; -using mindspore::schema::PrimitiveType_Mul; +using mindspore::schema::PrimitiveType_MulFusion; using mindspore::schema::PrimitiveType_NotEqual; @@ -59,7 +59,7 @@ using mindspore::schema::PrimitiveType_RealDiv; using mindspore::schema::PrimitiveType_SquaredDifference; -using mindspore::schema::PrimitiveType_Sub; +using mindspore::schema::PrimitiveType_SubFusion; using mindspore::schema::PrimitiveType_Eltwise; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_self_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_self_fp32_coder.h index 239173907e..2f0754d699 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_self_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/arithmetic_self_fp32_coder.h @@ -27,7 +27,7 @@ namespace mindspore::lite::micro::nnacl { using mindspore::schema::PrimitiveType_Abs; -using mindspore::schema::PrimitiveType_Add; +using mindspore::schema::PrimitiveType_AddFusion; using mindspore::schema::PrimitiveType_AddN; @@ -37,7 +37,7 @@ using mindspore::schema::PrimitiveType_Ceil; using mindspore::schema::PrimitiveType_Cos; -using mindspore::schema::PrimitiveType_Div; +using mindspore::schema::PrimitiveType_DivFusion; using mindspore::schema::PrimitiveType_Equal; @@ -67,7 +67,7 @@ using mindspore::schema::PrimitiveType_Maximum; using mindspore::schema::PrimitiveType_Minimum; -using mindspore::schema::PrimitiveType_Mul; +using mindspore::schema::PrimitiveType_MulFusion; using mindspore::schema::PrimitiveType_NotEqual; @@ -81,7 +81,7 @@ using mindspore::schema::PrimitiveType_Sqrt; using mindspore::schema::PrimitiveType_SquaredDifference; -using mindspore::schema::PrimitiveType_Sub; +using mindspore::schema::PrimitiveType_SubFusion; using mindspore::schema::PrimitiveType_Sin; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.cc index 8d7ac73fd7..fa97188414 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.cc @@ -14,10 +14,10 @@ * limitations under the License. */ -#include "micro/coder/opcoders/nnacl/fp32/assign_add_fp32_coder.h" +#include "coder/opcoders/nnacl/fp32/assign_add_fp32_coder.h" #include #include "schema/inner/ops_generated.h" -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" namespace mindspore::lite::micro::nnacl { diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.cc index 4ddf38d561..2185714a14 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.cc @@ -17,7 +17,6 @@ #include #include #include "nnacl/fp32/batchnorm_fp32.h" -#include "src/ops/batch_norm.h" #include "nnacl/op_base.h" #include "coder/opcoders/file_collector.h" #include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" @@ -27,10 +26,7 @@ using mindspore::schema::PrimitiveType_BatchNorm; namespace mindspore::lite::micro::nnacl { int BatchnormFP32Coder::Init() { - auto bn_parameter = reinterpret_cast(parameter_); - auto bn_prim = reinterpret_cast(OperatorCoder::primitive()); - bn_parameter->epsilon_ = bn_prim->GetEpsilon(); - + auto bn_parameter = reinterpret_cast(OperatorCoder::parameter_); std::vector input_shapes = input_tensor_->shape(); if (input_shapes.empty()) { return RET_ERROR; @@ -41,7 +37,9 @@ int BatchnormFP32Coder::Init() { for (int i = 0; i < n_dim - 1; i++) { bn_parameter->unit_ *= input_shapes.at(i); } - bn_parameter->op_parameter_.thread_num_ = MSMIN(bn_parameter->op_parameter_.thread_num_, bn_parameter->unit_); + if (default_momentum_ < 0.0f) { + default_momentum_ = bn_parameter->momentum_; + } return RET_OK; } @@ -59,7 +57,7 @@ int BatchnormFP32Coder::DoCode(CoderContext *const context) { Collect(context, {"nnacl/fp32/batchnorm.h"}, {"nnacl/fp32/batchnorm.c"}); NNaclFp32Serializer code; code.CodeStruct("bn_parameter", *bn_parameter); - code.CodeFunction("BatchNorm", output_tensor_, input_tensor_, mean_tensor, var_tensor, task_id, "&bn_parameter"); + code.CodeFunction("BatchNormFp32", input_tensor_, mean_tensor, var_tensor, "&bn_parameter", task_id, output_tensor_); MS_LOG(INFO) << "BatchnormFP32Code has been called"; context->AppendCode(code.str()); return lite::RET_OK; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.h index 9b77799a04..5d222c487f 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/batchnorm_fp32_coder.h @@ -36,6 +36,12 @@ class BatchnormFP32Coder final : public OperatorCoder { private: int Init(); + + float default_momentum_{-1.0f}; + + float *mean_{nullptr}; + + float *variance_{nullptr}; }; } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/biasadd_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/biasadd_fp32_coder.cc new file mode 100644 index 0000000000..5ff71219e6 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/biasadd_fp32_coder.cc @@ -0,0 +1,77 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/opcoders/nnacl/fp32/biasadd_fp32_coder.h" +#include +#include "coder/opcoders/file_collector.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" + +using mindspore::schema::PrimitiveType_BiasAdd; + +namespace mindspore::lite::micro::nnacl { + +int BiasAddFP32Coder::Prepare(CoderContext *context) { + arithmetic_parameter_ = reinterpret_cast(parameter_); + size_t data_size = input_tensors_.at(0)->ElementsNum(); + tile_in_ = reinterpret_cast(allocator_->Malloc(kNumberTypeFloat32, data_size * sizeof(float), kWorkspace)); + tile_bias_ = reinterpret_cast(allocator_->Malloc(kNumberTypeFloat32, data_size * sizeof(float), kWorkspace)); + return RET_OK; +} + +int BiasAddFP32Coder::DoCode(CoderContext *ctx) { + if (input_tensors_.size() < kBiasIndex) { + return RET_ERROR; + } + size_t data_size = input_tensor_->ElementsNum(); + std::string bias_str = allocator_->GetRuntimeAddr(input_tensors_.at(kWeightIndex)); + Collect(ctx, + {"nnacl/arithmetic.h", "nnacl/nnacl_utils.h", "nnacl/nnacl_common.h", "nnacl/base/arithmetic_base.h", + "nnacl/fp32/add_fp32.h", "nnacl/fp32/arithmetic_fp32.h"}, + {"arithmetic_base.c", "arithmetic_fp32.c", "add_fp32.c"}); + nnacl::NNaclFp32Serializer code; + std::vector dims = input_tensor_->shape(); + arithmetic_parameter_->broadcasting_ = false; + arithmetic_parameter_->ndim_ = dims.size(); + arithmetic_parameter_->activation_type_ = 0; + for (size_t i = 0; i < dims.size(); i++) { + arithmetic_parameter_->in_shape0_[i] = dims[i]; + } + arithmetic_parameter_->in_elements_num0_ = 0; + + for (size_t i = 0; i < dims.size(); i++) { + if (i == dims.size() - 1) { + arithmetic_parameter_->in_shape1_[i] = dims[dims.size() - 1]; + continue; + } + arithmetic_parameter_->in_shape1_[i] = 1; + } + arithmetic_parameter_->in_elements_num1_ = 0; + + for (size_t i = 0; i < dims.size(); i++) { + arithmetic_parameter_->out_shape_[i] = dims[i]; + } + arithmetic_parameter_->out_elements_num_ = 0; + // other rest elements is not sure + + code.CodeStruct("arith_param", *arithmetic_parameter_); + code.CodeFunction("BroadcastAdd", input_tensor_, bias_str, tile_in_, tile_bias_, output_tensor_, data_size, + "(ArithmeticParameter *)&arith_param"); + ctx->AppendCode(code.str()); + return RET_OK; +} + +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_BiasAdd, CPUOpCoderCreator) +} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/biasadd_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/biasadd_fp32_coder.h new file mode 100644 index 0000000000..60064f2af8 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/biasadd_fp32_coder.h @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_FP32_BIASADD_FP32_CODER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_FP32_BIASADD_FP32_CODER_H_ + +#include +#include "coder/opcoders/op_coder.h" +#include "nnacl/arithmetic.h" + +namespace mindspore::lite::micro::nnacl { +class BiasAddFP32Coder final : public OperatorCoder { + public: + BiasAddFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} + + ~BiasAddFP32Coder() override = default; + + int Prepare(CoderContext *context) override; + + int DoCode(CoderContext *context) override; + + private: + ArithmeticParameter *arithmetic_parameter_{nullptr}; + float *tile_in_{nullptr}; + float *tile_bias_{nullptr}; +}; +} // namespace mindspore::lite::micro::nnacl +#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_FP32_BIASADD_FP32_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.cc index be9041267b..27e45cfc98 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.cc @@ -14,13 +14,12 @@ * limitations under the License. */ -#include "micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h" +#include "coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h" #include -#include "micro/coder/log.h" -#include "micro/coder/opcoders/file_collector.h" -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" +#include "coder/log.h" +#include "coder/opcoders/file_collector.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" -using mindspore::schema::PrimitiveType_DepthwiseConv2D; namespace mindspore::lite::micro::nnacl { int ConvolutionDepthwiseFP32Coder::Prepare(CoderContext *const context) { Conv2DBaseCoder::Init(); @@ -73,6 +72,4 @@ int ConvolutionDepthwiseFP32Coder::DoCode(CoderContext *const context) { return RET_OK; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_DepthwiseConv2D, - CPUOpCoderCreator) } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h index 6946459d42..f38a25b24a 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h @@ -18,7 +18,7 @@ #define MINDSPORE_LITE_MICRO_CODER_OPCODERS_FP32_CONVOLUTION_DEPTHWISE_FP32_CODER_H_ #include -#include "micro/coder/opcoders/base/conv2d_base_coder.h" +#include "coder/opcoders/base/conv2d_base_coder.h" #include "src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.h" namespace mindspore::lite::micro::nnacl { diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_fp32_coder.cc index 54a6e86efe..2efa19c302 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_fp32_coder.cc @@ -14,17 +14,21 @@ * limitations under the License. */ -#include "micro/coder/opcoders/nnacl/fp32/convolution_fp32_coder.h" +#include "coder/opcoders/nnacl/fp32/convolution_fp32_coder.h" #include #include #include -#include "micro/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.h" +#include "coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.h" +#include "coder/opcoders/nnacl/fp32/convolution_depthwise_fp32_coder.h" #include "nnacl/fp32/winograd_utils.h" #include "src/ops/populate/populate_register.h" -#include "micro/coder/opcoders/file_collector.h" -#include "micro/coder/log.h" +#include "coder/opcoders/file_collector.h" +#include "coder/log.h" +#include "src/common/prim_util.h" +#include "src/common/version_manager.h" +#include "coder/opcoders/nnacl/dequant/de_quant.h" -using mindspore::schema::PrimitiveType_Conv2D; +using mindspore::schema::PrimitiveType_Conv2DFusion; namespace mindspore::lite::micro::nnacl { int ConvolutionFP32Coder::InitTmpBuffer() { int in_channel = conv_param_->input_channel_; @@ -43,20 +47,16 @@ int ConvolutionFP32Coder::InitTmpBuffer() { } int ConvolutionFP32Coder::Prepare(CoderContext *const context) { - int ret = Conv2DBaseCoder::Init(); - MS_CHECK_RET_CODE(ret, "Conv2DBaseCoder::Init() failed."); - ret = InitWeightBias(context); - MS_CHECK_RET_CODE(ret, "Init weight bias failed."); + MS_CHECK_RET_CODE(Conv2DBaseCoder::Init(), "Conv2DBaseCoder::Init() failed."); + de_quant_flag_ = Dequant::GetInstance()->CheckDequantFlag(filter_tensor_); + MS_CHECK_RET_CODE(InitWeightBias(context), "Init weight bias failed."); return Resize(); } int ConvolutionFP32Coder::Resize() { - int ret = Conv2DBaseCoder::CheckResizeValid(); - MS_CHECK_RET_CODE(ret, "Resize is invalid."); - ret = Conv2DBaseCoder::Init(); - MS_CHECK_RET_CODE(ret, "init failed."); - ret = InitTmpBuffer(); - MS_CHECK_RET_CODE(ret, "init tmp buffer failed."); + MS_CHECK_RET_CODE(Conv2DBaseCoder::CheckResizeValid(), "Resize is invalid."); + MS_CHECK_RET_CODE(Conv2DBaseCoder::Init(), "init failed."); + MS_CHECK_RET_CODE(InitTmpBuffer(), "init tmp buffer failed."); return RET_OK; } @@ -71,36 +71,43 @@ int ConvolutionFP32Coder::InitWeightBias(CoderContext *const context) { const int oc_block = C8NUM; int oc_block_num = UP_DIV(out_channel, C8NUM); int pack_weight_size = oc_block_num * oc_block * in_channel * kernel_plane; - + pack_weight_size_ = pack_weight_size * sizeof(float); auto origin_weight = reinterpret_cast(filter_tensor_->MutableData()); MS_CHECK_PTR(origin_weight); - packed_weight_ = reinterpret_cast( - allocator_->Malloc(kNumberTypeFloat32, pack_weight_size * sizeof(float), kOnlinePackWeight)); + packed_weight_ = reinterpret_cast(allocator_->Malloc(kNumberTypeFloat32, kOnlineSize, kOnlinePackWeight)); MS_CHECK_PTR(packed_weight_); auto out_channel_size = static_cast(out_channel); - NNaclFp32Serializer code; - code.CodeMallocExpression(packed_weight_, pack_weight_size * sizeof(float)); - code.CodeFunction("memset", packed_weight_, 0, pack_weight_size * sizeof(float)); - code.CodeFunction("RowMajor2Col8Major", filter_tensor_, packed_weight_, out_channel_size, in_channel * kernel_plane); + NNaclFp32Serializer init_code; + std::string ori_weight_addr = allocator_->GetRuntimeAddr(filter_tensor_); + std::string init_weight_str = ori_weight_addr; + if (de_quant_flag_) { + init_weight_str = Dequant::GetInstance()->de_quant_buffer_str(); + std::string de_quant_function = Dequant::GetInstance()->GetMicroDeQuantFunction(filter_tensor_, ori_weight_addr); + init_code << de_quant_function; + } + init_code.CodeMallocExpression(packed_weight_, pack_weight_size_); + init_code.CodeFunction("memset", packed_weight_, 0, pack_weight_size_); + init_code.CodeFunction("RowMajor2Col8Major", init_weight_str, packed_weight_, out_channel_size, + in_channel * kernel_plane); auto bias_data_size = static_cast(oc_block_num * oc_block * sizeof(float)); - bias_data_ = reinterpret_cast(allocator_->Malloc(kNumberTypeFloat32, bias_data_size, kOnlinePackWeight)); + bias_data_ = reinterpret_cast(allocator_->Malloc(kNumberTypeFloat32, kOnlineSize, kOnlinePackWeight)); MS_CHECK_PTR(bias_data_); if (input_tensors_.size() == kInputSize2) { - code.CodeMallocExpression(bias_data_, bias_data_size); - code.CodeFunction("memset", bias_data_, 0, bias_data_size); - code.CodeFunction("memcpy", bias_data_, bias_tensor_, out_channel_size * sizeof(float)); + init_code.CodeMallocExpression(bias_data_, bias_data_size); + init_code.CodeFunction("memset", bias_data_, 0, bias_data_size); + init_code.CodeFunction("memcpy", bias_data_, bias_tensor_, out_channel_size * sizeof(float)); } else { return RET_ERROR; } - context->AppendInitCode(code.str()); + context->AppendInitCode(init_code.str()); return RET_OK; } int ConvolutionFP32Coder::DoCode(CoderContext *const context) { { - std::vector asmFiles; + std::vector asmFiles; if (target_ == kARM32A) { asmFiles = {"MatmulFp32.S", "MatmulFp32Opt.S", @@ -112,9 +119,14 @@ int ConvolutionFP32Coder::DoCode(CoderContext *const context) { asmFiles = {"MatmulFp32.S", "MatmulFp32Opt.S", "PreSum4x16Int8Peroc.S", "MatVecMulFp32.S", "PreSum4x16Int8Peroc.S", "PreSum4x16Int8Pert.S", "IndirectGemmInt16to32_8x4.S", "MatmulInt8.S"}; } - Collect(context, - {"nnacl/kernel/fp32/conv_fp32_slim.h", "nnacl/fp32/matmul.h", "nnacl/conv_parameter.h", "nnacl/op_base.h"}, - {"common_func.c", "conv_fp32_slim.c", "matmul.c"}, asmFiles); + std::vector h_files = {"nnacl/fp32/conv_common_fp32.h", "nnacl/fp32/matmul.h", + "nnacl/conv_parameter.h", "nnacl/op_base.h"}; + std::vector c_files = {"common_func.c", "conv_common_fp32.c", "matmul.c"}; + if (de_quant_flag_) { + h_files.emplace_back("wrapper/fp32/dequant_int8_to_fp32_wrapper.h"); + c_files.emplace_back("dequant_int8_to_fp32_wrapper.c"); + } + Collect(context, h_files, c_files, asmFiles); } NNaclFp32Serializer code; // call the op function @@ -122,7 +134,7 @@ int ConvolutionFP32Coder::DoCode(CoderContext *const context) { code.CodeFunction("memset", col_major_input_, "0", col_major_input_size_); code.CodeStruct("conv_parameter", *conv_param_); int task_id = 0; - code.CodeFunction("ConvFp32Slim", input_tensor_, packed_input_, packed_weight_, bias_data_, col_major_input_, + code.CodeFunction("ConvFp32", input_tensor_, packed_input_, packed_weight_, bias_data_, col_major_input_, output_tensor_, task_id, "(ConvParameter *)&conv_parameter"); context->AppendCode(code.str()); @@ -135,18 +147,18 @@ std::unique_ptr CPUConvolutionFP32CoderCreator(const std::vector< Target target) { std::vector inputs = in_tensors; std::vector outputs = out_tensors; - auto primitive = node->primitive_; - if (!primitive) { + const void *primitive = node->primitive_; + if (primitive == nullptr) { return nullptr; } - OpParameter *parameter = - PopulateRegistry::GetInstance()->GetParameterCreator((schema::PrimitiveType(primitive->Type())))(primitive); - if (parameter == nullptr) { - MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " - << schema::EnumNamePrimitiveType((schema::PrimitiveType)(primitive->Type())); + int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); + ParameterGen paramGen = + PopulateRegistry::GetInstance()->GetParameterCreator(GetPrimitiveType(node->primitive_), schema_version); + if (paramGen == nullptr) { + MS_LOG(ERROR) << "parameter generator is null"; return nullptr; } - auto conv_param = reinterpret_cast(parameter); + auto conv_param = reinterpret_cast(paramGen(node->primitive_)); bool use_winograd = false; int out_unit = 0; int kernel_h = conv_param->kernel_h_; @@ -159,7 +171,7 @@ std::unique_ptr CPUConvolutionFP32CoderCreator(const std::vector< conv_param->output_channel_ = outputs.at(kOutputIndex)->Channel(); conv_param->op_parameter_.thread_num_ = 1; CheckIfUseWinograd(&use_winograd, &out_unit, conv_param); - free(parameter); + free(conv_param); // weight de quant std::unique_ptr coder; if (kernel_h == 1 && kernel_w == 1) { @@ -175,5 +187,32 @@ std::unique_ptr CPUConvolutionFP32CoderCreator(const std::vector< return coder; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Conv2D, CPUConvolutionFP32CoderCreator) +std::unique_ptr CPUConv2DFusionFP32CoderCreator(const std::vector &in_tensors, + const std::vector &out_tensors, + const Model::Node *node, size_t node_index, + Target target) { + const void *primitive = node->primitive_; + if (primitive == nullptr) { + return nullptr; + } + int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); + ParameterGen paramGen = + PopulateRegistry::GetInstance()->GetParameterCreator(GetPrimitiveType(node->primitive_), schema_version); + if (paramGen == nullptr) { + MS_LOG(ERROR) << "parameter generator is null"; + return nullptr; + } + auto conv_param = reinterpret_cast(paramGen(node->primitive_)); + std::unique_ptr coder; + if (conv_param->group_ == 1) { + coder = CPUConvolutionFP32CoderCreator(in_tensors, out_tensors, node, node_index, target); + } else if (conv_param->group_ == conv_param->input_channel_ && conv_param->group_ == conv_param->output_channel_) { + coder = CPUOpCoderCreator(in_tensors, out_tensors, node, node_index, target); + } else { + // GroupConv + } + return coder; +} + +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Conv2DFusion, CPUConv2DFusionFP32CoderCreator) } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_fp32_coder.h index eaabfb841e..42c2ee583b 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_fp32_coder.h @@ -14,14 +14,14 @@ * limitations under the License. */ -#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_FP32_CONVOLUTION_FP32_CODER_H_ -#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_FP32_CONVOLUTION_FP32_CODER_H_ +#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_FP32_CONVOLUTION_FP32_CODER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_FP32_CONVOLUTION_FP32_CODER_H_ #include #include #include "nnacl/conv_parameter.h" -#include "micro/coder/opcoders/base/conv2d_base_coder.h" -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" +#include "coder/opcoders/base/conv2d_base_coder.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" namespace mindspore::lite::micro::nnacl { class ConvolutionFP32Coder final : public Conv2DBaseCoder { @@ -51,12 +51,14 @@ class ConvolutionFP32Coder final : public Conv2DBaseCoder { size_t packed_input_size_{0}; - int thread_stride_{0}; + bool de_quant_flag_{false}; int thread_count_{0}; float *col_major_input_{nullptr}; size_t col_major_input_size_{0}; + + size_t pack_weight_size_{0}; }; } // namespace mindspore::lite::micro::nnacl -#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_FP32_CONVOLUTION_FP32_CODER_H_ +#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_FP32_CONVOLUTION_FP32_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.cc index fbf6867252..69c973a24f 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.cc @@ -13,12 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "micro/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.h" +#include "coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.h" #include #include "nnacl/base/minimal_filtering_generator.h" -#include "micro/coder/log.h" -#include "micro/coder/opcoders/file_collector.h" -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" +#include "coder/log.h" +#include "coder/opcoders/file_collector.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" namespace mindspore::lite::micro::nnacl { const std::array InputTransFuncList = { @@ -222,10 +222,11 @@ int ConvolutionWinogradFP32Coder::DoCode(CoderContext *const context) { asmFiles = {"MatmulFp32.S", "MatmulFp32Opt.S", "PreSum4x16Int8Peroc.S", "MatVecMulFp32.S", "PreSum4x16Int8Peroc.S", "PreSum4x16Int8Pert.S", "IndirectGemmInt16to32_8x4.S", "MatmulInt8.S"}; } - Collect(context, {"nnacl/fp32/conv.h", "nnacl/common_func.h"}, - {"common_func.c", "conv_int8.c", "matmul_int8.c", "pack.c", "conv.c", "winograd_transform.c", - "common_func_fp32.c", "fixed_point.c", "winograd_utils.c", "minimal_filtering_generator.c"}, - asmFiles); + Collect( + context, {"nnacl/fp32/conv_winograd_fp32.h", "nnacl/common_func.h"}, + {"common_func.c", "conv_int8.c", "matmul_int8.c", "pack_fp32.c", "conv_winograd_fp32.c", "winograd_transform.c", + "common_func_fp32.c", "fixed_point.c", "winograd_utils.c", "minimal_filtering_generator.c"}, + asmFiles); NNaclFp32Serializer code; // call the op function diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.h index 8bce785ad0..1e236ae622 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/convolution_winograd_fp32_coder.h @@ -20,7 +20,7 @@ #include #include #include -#include "micro/coder/opcoders/base/conv2d_base_coder.h" +#include "coder/opcoders/base/conv2d_base_coder.h" #include "nnacl/conv_parameter.h" namespace mindspore::lite::micro::nnacl { diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_base_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_base_coder.cc index d9137398af..d84b208e48 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_base_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_base_coder.cc @@ -22,6 +22,7 @@ #include "coder/opcoders/file_collector.h" #include "nnacl/fp32/matmul_fp32.h" #include "wrapper/fp32/matmul_fp32_wrapper.h" +#include "coder/opcoders/nnacl/dequant/de_quant.h" using mindspore::schema::PrimitiveType_MatMul; @@ -31,6 +32,13 @@ int MatMulFP32BaseCoder::ReSize() { ResizeParameter(); thread_count_ = MSMIN(thread_num_, UP_DIV(params_->col_align_, col_tile_)); thread_stride_ = UP_DIV(UP_DIV(params_->col_align_, col_tile_), thread_count_); + // can not call Malloc in DoCode,so move this runtime init to final resize + if (!params_->a_const_) { + MS_CHECK_RET_CODE(InitBufferA(), "InitBufferA failed"); + } + if (!params_->b_const_) { + MS_CHECK_RET_CODE(InitBufferB(), "InitBufferB failed"); + } return RET_OK; } @@ -45,17 +53,16 @@ int MatMulFP32BaseCoder::InitBiasData() { } void MatMulFP32BaseCoder::InitParameter() { + row_tile_ = C12NUM; if (target_ == kARM32A) { - row_tile_ = C12NUM; col_tile_ = C4NUM; } else { - row_tile_ = C12NUM; col_tile_ = C8NUM; } } void MatMulFP32BaseCoder::ResizeParameter() { - if (params_->row_ == 1 && !params_->b_const_) { + if (params_->row_ == 1) { vec_matmul_ = true; } params_->row_align_ = vec_matmul_ ? 1 : UP_ROUND(params_->row_, row_tile_); @@ -66,12 +73,11 @@ int MatMulFP32BaseCoder::InitBufferA() { if (a_pack_ptr_ != nullptr) { return RET_OK; } + a_pack_ptr_size_ = static_cast(params_->batch * params_->row_align_ * params_->deep_ * sizeof(float)); if (params_->a_const_) { a_pack_ptr_ = reinterpret_cast(allocator_->Malloc(kNumberTypeFloat32, kOnlineSize, kOnlinePackWeight)); } else { - a_pack_ptr_size_ = static_cast(params_->batch * params_->row_align_ * params_->deep_ * sizeof(float)); - a_pack_ptr_ = - reinterpret_cast(allocator_->Malloc(kNumberTypeFloat32, a_pack_ptr_size_, kOfflinePackWeight)); + a_pack_ptr_ = reinterpret_cast(allocator_->Malloc(kNumberTypeFloat32, a_pack_ptr_size_, kWorkspace)); } MS_CHECK_PTR(a_pack_ptr_); return RET_OK; @@ -81,12 +87,11 @@ int MatMulFP32BaseCoder::InitBufferB() { if (b_pack_ptr_ != nullptr) { return RET_OK; } + b_pack_ptr_size_ = static_cast(params_->batch * params_->col_align_ * params_->deep_ * sizeof(float)); if (params_->b_const_) { b_pack_ptr_ = reinterpret_cast(allocator_->Malloc(kNumberTypeFloat32, kOnlineSize, kOnlinePackWeight)); } else { - b_pack_ptr_size_ = static_cast(params_->batch * params_->col_align_ * params_->deep_ * sizeof(float)); - b_pack_ptr_ = - reinterpret_cast(allocator_->Malloc(kNumberTypeFloat32, b_pack_ptr_size_, kOfflinePackWeight)); + b_pack_ptr_ = reinterpret_cast(allocator_->Malloc(kNumberTypeFloat32, b_pack_ptr_size_, kWorkspace)); } MS_CHECK_PTR(b_pack_ptr_); return RET_OK; @@ -108,12 +113,9 @@ int MatMulFP32BaseCoder::Init() { MS_CHECK_RET_CODE(InitBiasData(), "InitBiasData failed"); if (params_->a_const_) { MS_CHECK_RET_CODE(InitBufferA(), "InitBufferA failed"); - MS_CHECK_RET_CODE(InitMatrixA(reinterpret_cast(input_tensor_->data_c())), "InitMatrixA failed"); } - if (params_->b_const_) { MS_CHECK_RET_CODE(InitBufferB(), "InitBufferB failed"); - MS_CHECK_RET_CODE(InitMatrixB(reinterpret_cast(filter_tensor_->data_c())), "InitMatrixB failed"); } return RET_OK; } @@ -124,12 +126,17 @@ int MatMulFP32BaseCoder::DoCode(CoderContext *const context) { // generate code .h .c std::vector asm_files; if (target_ == kARM32A) { - asm_files = {"MatmulFp32.S", "MatmulFp32Opt.S"}; + asm_files = {"MatmulFp32.S", "MatmulFp32Opt.S", "MatmulFp32Opt12x4.S"}; } else if (target_ == kARM64) { - asm_files = {"arm64/MatmulFp32.S", "MatmulFp32Opt.S", "arm64/MatVecMulFp32.S"}; + asm_files = {"MatmulFp32.S", "MatmulFp32Opt.S", "MatVecMulFp32.S"}; + } + std::vector h_files = {"nnacl/fp32/matmul_fp32.h", "wrapper/fp32/matmul_fp32_wrapper.h"}; + std::vector c_files = {"matmul_fp32.c", "matmul_fp32_wrapper.c"}; + if (de_quant_flag_) { + h_files.emplace_back("wrapper/fp32/dequant_int8_to_fp32_wrapper.h"); + c_files.emplace_back("dequant_int8_to_fp32_wrapper.c"); } - Collect(context, {"nnacl/fp32/matmul.h", "adapter/fp32/matmul_fp32_adapter.h"}, {"matmul.c", "matmul_fp32_adapter.c"}, - asm_files); + Collect(context, h_files, c_files, asm_files); NNaclFp32Serializer code; NNaclFp32Serializer init_code; code.CodeStruct("mat_mul_parameter", *params_); @@ -137,9 +144,12 @@ int MatMulFP32BaseCoder::DoCode(CoderContext *const context) { // do bias packing to init if (bias_ptr_) { init_code.CodeMallocExpression(bias_ptr_, bias_pack_ptr_size_); - init_code.CodeFunction("memcpy", bias_ptr_, bias_tensor_->data_c(), bias_pack_ptr_size_); + init_code.CodeFunction("memcpy", bias_ptr_, bias_tensor_, bias_pack_ptr_size_); } + // Get Tensor Pointer + std::string a_str = allocator_->GetRuntimeAddr(input_tensor_); + std::string b_str = allocator_->GetRuntimeAddr(filter_tensor_); std::string c_str = allocator_->GetRuntimeAddr(output_tensor_); std::string a_pack_str = allocator_->GetRuntimeAddr(a_pack_ptr_); std::string b_pack_str = allocator_->GetRuntimeAddr(b_pack_ptr_); @@ -147,12 +157,28 @@ int MatMulFP32BaseCoder::DoCode(CoderContext *const context) { // do const value packing to init if (!params_->a_const_) { code.CodeFunction("InitMatrixA", input_tensor_, a_pack_ptr_, "&mat_mul_parameter", vec_matmul_); + init_code.CodeMallocExpression(b_pack_ptr_, b_pack_ptr_size_); + std::string b_src_str = b_str; + if (de_quant_flag_) { + // reuse to b_pack_str + b_src_str = Dequant::GetInstance()->de_quant_buffer_str(); + std::string de_quant_function = Dequant::GetInstance()->GetMicroDeQuantFunction(filter_tensor_, b_str); + init_code << de_quant_function; + } // b_pack_str has been memset, no need to memset - init_code.CodeFunction("InitMatrixB", filter_tensor_, b_pack_ptr_, "&mat_mul_parameter", vec_matmul_); + init_code.CodeFunction("InitMatrixB", b_src_str, b_pack_ptr_, "&mat_mul_parameter", vec_matmul_); } if (!params_->b_const_) { + init_code.CodeMallocExpression(a_pack_str, a_pack_ptr_size_); + std::string a_src_str = a_str; + if (de_quant_flag_) { + // reuse to a_pack_str + a_src_str = Dequant::GetInstance()->de_quant_buffer_str(); + std::string de_quant_function = Dequant::GetInstance()->GetMicroDeQuantFunction(input_tensor_, a_str); + init_code << de_quant_function; + } // a_pack_str has been memset, no need to memset - init_code.CodeFunction("InitMatrixA", input_tensor_, a_pack_ptr_, "&mat_mul_parameter", vec_matmul_); + init_code.CodeFunction("InitMatrixA", a_src_str, a_pack_ptr_, "&mat_mul_parameter", vec_matmul_); code.CodeFunction("InitMatrixB", filter_tensor_, b_pack_ptr_, "&mat_mul_parameter", vec_matmul_); } @@ -165,13 +191,13 @@ int MatMulFP32BaseCoder::DoCode(CoderContext *const context) { } code << "for (int i = 0; i < " << params_->batch << "; ++i) {\n"; if (vec_matmul_) { - code << "\t\tbatch_a_ptr = " << a_pack_str << " + i * " << params_->deep_ << ";\n"; - code << "\t\tbatch_b_ptr = " << b_pack_str << " + i * " << params_->deep_ * params_->col_ << ";\n"; - code << "\t\tbatch_c_ptr = " << c_str << " + i * " << params_->row_ * params_->col_ << ";\n"; + code << "\t\tfloat *batch_a_ptr = " << a_pack_str << " + i * " << params_->deep_ << ";\n"; + code << "\t\tfloat *batch_b_ptr = " << b_pack_str << " + i * " << params_->deep_ * params_->col_ << ";\n"; + code << "\t\tfloat *batch_c_ptr = " << c_str << " + i * " << params_->row_ * params_->col_ << ";\n"; } else { - code << "\t\tbatch_a_ptr = " << a_pack_str << " + i * " << params_->row_align_ * params_->deep_ << ";\n"; - code << "\t\tbatch_b_ptr = " << b_pack_str << " + i * " << params_->deep_ * params_->col_align_ << ";\n"; - code << "\tbatch_c_ptr = " << c_str << " + i * " << params_->row_ * params_->col_ << ";\n"; + code << "\t\tfloat *batch_a_ptr = " << a_pack_str << " + i * " << params_->row_align_ * params_->deep_ << ";\n"; + code << "\t\tfloat *batch_b_ptr = " << b_pack_str << " + i * " << params_->deep_ * params_->col_align_ << ";\n"; + code << "\t\tfloat *batch_c_ptr = " << c_str << " + i * " << params_->row_ * params_->col_ << ";\n"; } if (vec_matmul_) { diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_base_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_base_coder.h index 7d8a85fe53..088bee45a0 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_base_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_base_coder.h @@ -56,6 +56,7 @@ class MatMulFP32BaseCoder : public OperatorCoder { float *b_pack_ptr_ = nullptr; float *bias_ptr_{nullptr}; bool vec_matmul_{false}; + bool de_quant_flag_{false}; private: int col_tile_{0}; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_coder.cc index c58f4025cd..5f10e3fa8e 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/matmul_fp32_coder.cc @@ -18,6 +18,7 @@ #include #include "coder/log.h" #include "coder/opcoders/file_collector.h" +#include "coder/opcoders/nnacl/dequant/de_quant.h" using mindspore::schema::PrimitiveType_MatMul; @@ -77,10 +78,12 @@ int MatMulFP32Coder::Prepare(CoderContext *const context) { params_->b_const_ = (filter_tensor_->data_c() != nullptr); MatMulFP32BaseCoder::InitParameter(); if (params_->a_const_) { - InitShapeA(); + de_quant_flag_ = Dequant::GetInstance()->CheckDequantFlag(input_tensor_); + MS_CHECK_RET_CODE(InitShapeA(), "MatMulFP32Coder init_shape_a failed"); } if (params_->b_const_) { - InitShapeB(); + de_quant_flag_ = Dequant::GetInstance()->CheckDequantFlag(filter_tensor_); + MS_CHECK_RET_CODE(InitShapeB(), "MatMulFP32Coder init_shape_b failed"); } MS_CHECK_RET_CODE(MatMulFP32BaseCoder::Init(), "MatMulFP32Coder init failed"); return ReSize(); diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.cc index ae51dc35b4..30fb30616f 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.cc @@ -14,14 +14,14 @@ * limitations under the License. */ -#include "micro/coder/opcoders/nnacl/fp32/pad_fp32_coder.h" +#include "coder/opcoders/nnacl/fp32/pad_fp32_coder.h" #include #include -#include "micro/coder/log.h" -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" -#include "micro/coder/opcoders/file_collector.h" +#include "coder/log.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" +#include "coder/opcoders/file_collector.h" -using mindspore::schema::PrimitiveType_Pad; +using mindspore::schema::PrimitiveType_PadFusion; namespace mindspore::lite::micro::nnacl { @@ -99,5 +99,5 @@ int PadFP32Coder::DoCode(CoderContext *const context) { return RET_OK; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Pad, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_PadFusion, CPUOpCoderCreator) } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.cc index 7ce3b2c343..d06f5eef3f 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.cc @@ -21,7 +21,8 @@ #include "coder/log.h" #include "coder/opcoders/file_collector.h" -using mindspore::schema::PrimitiveType_Pooling; +using mindspore::schema::PrimitiveType_AvgPoolFusion; +using mindspore::schema::PrimitiveType_MaxPoolFusion; namespace mindspore::lite::micro::nnacl { @@ -46,7 +47,7 @@ int PoolingFP32Coder::DoCode(CoderContext *const context) { float minf = -FLT_MAX; float maxf = FLT_MAX; if (pooling_parameter->pool_mode_ == PoolMode_MaxPool) { - Collect(context, {"nnacl/kernel/fp32/max_pooling_fp32_slim.h"}, {"max_pooling_fp32_slim.c"}); + Collect(context, {"nnacl/fp32/pooling_fp32.h"}, {"pooling_fp32.c"}); switch (pooling_parameter->act_type_) { case ActType_Relu: { minf = 0.f; @@ -63,14 +64,9 @@ int PoolingFP32Coder::DoCode(CoderContext *const context) { } } - if (thread_num_ > 1) { - code.CodeBaseStruct("PoolingFp32Args", "args", input_tensor_, output_tensor_, "&pooling_parameter", minf, maxf); - CODE_PARALLEL_FUNC("MaxPoolingFp32Run"); - } else { - code.CodeFunction("MaxPooling", input_tensor_, output_tensor_, "&pooling_parameter", task_id, minf, maxf); - } + code.CodeFunction("MaxPooling", input_tensor_, output_tensor_, "&pooling_parameter", task_id, minf, maxf); } else { - Collect(context, {"nnacl/fp32/pooling.h"}, {"pooling.c"}); + Collect(context, {"nnacl/fp32/pooling_fp32.h"}, {"pooling_fp32.c"}); switch (pooling_parameter->act_type_) { case ActType_Relu: { minf = 0.f; @@ -86,12 +82,7 @@ int PoolingFP32Coder::DoCode(CoderContext *const context) { break; } } - if (thread_num_ > 1) { - code.CodeBaseStruct("PoolingFp32Args", "args", input_tensor_, output_tensor_, "&pooling_parameter", minf, maxf); - CODE_PARALLEL_FUNC("AvgPoolingFp32Run"); - } else { - code.CodeFunction("AvgPooling", input_tensor_, output_tensor_, "&pooling_parameter", task_id, minf, maxf); - } + code.CodeFunction("AvgPooling", input_tensor_, output_tensor_, "&pooling_parameter", task_id, minf, maxf); } MS_LOG(INFO) << "PoolingFp32Code has been called"; @@ -99,5 +90,6 @@ int PoolingFP32Coder::DoCode(CoderContext *const context) { return lite::RET_OK; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Pooling, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_AvgPoolFusion, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_MaxPoolFusion, CPUOpCoderCreator) } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.h index 30429acbdd..ae66ea8a0f 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/pooling_fp32_coder.h @@ -18,7 +18,7 @@ #define MINDSPORE_LITE_MICRO_CODER_OPCODERS_POOLFP32_CODER_H_ #include -#include "micro/coder/opcoders/op_coder.h" +#include "coder/opcoders/op_coder.h" namespace mindspore::lite::micro::nnacl { diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/power_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/power_fp32_coder.cc index 46999a64f2..52222fe6c8 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/power_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/power_fp32_coder.cc @@ -20,7 +20,7 @@ #include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" #include "coder/opcoders/file_collector.h" -using mindspore::schema::PrimitiveType_Power; +using mindspore::schema::PrimitiveType_PowFusion; namespace mindspore::lite::micro::nnacl { @@ -55,6 +55,6 @@ int PowerFP32Coder::DoCode(CoderContext *const context) { return RET_OK; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Power, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_PowFusion, CPUOpCoderCreator) } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/reduce_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/reduce_fp32_coder.cc index 55c71f371c..fb005efd5a 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/reduce_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/reduce_fp32_coder.cc @@ -20,7 +20,7 @@ #include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" #include "coder/opcoders/file_collector.h" -using mindspore::schema::PrimitiveType_Reduce; +using mindspore::schema::PrimitiveType_PowFusion; namespace mindspore::lite::micro::nnacl { int ReduceFP32Coder::Prepare(CoderContext *const context) { @@ -116,6 +116,6 @@ int ReduceFP32Coder::DoCode(CoderContext *const context) { return RET_OK; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Reduce, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_PowFusion, CPUOpCoderCreator) } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/scale_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/scale_fp32_coder.cc index 4e901cb237..239ddf67ef 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/scale_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/scale_fp32_coder.cc @@ -18,8 +18,9 @@ #include "coder/log.h" #include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" #include "coder/opcoders/file_collector.h" +#include "coder/opcoders/parallel.h" -using mindspore::schema::PrimitiveType_Scale; +using mindspore::schema::PrimitiveType_ScaleFusion; namespace mindspore::lite::micro::nnacl { ScaleFP32Coder::~ScaleFP32Coder() { @@ -131,34 +132,26 @@ int ScaleFP32Coder::DoCode(CoderContext *const context) { NNaclFp32Serializer code; code.CodeStruct("scale_parameter", *scale_param_); - if (thread_num_ > 1) { - code.CodeBaseStruct("ScaleFp32Args", "args", input_tensor_, output_tensor_, scale_tensor, offset_tensor, + switch (scale_param_->activation_type_) { + case schema::ActivationType_RELU6: + code.CodeFunction("DoScaleRelu6", input_tensor_, output_tensor_, scale_tensor, offset_tensor, kDefaultTaskId, "&scale_parameter"); - CODE_PARALLEL_FUNC("ScaleFp32Run"); - } else { - int task_id = 0; - switch (scale_param_->activation_type_) { - case schema::ActivationType_RELU6: - code.CodeFunction("DoScaleRelu6", input_tensor_, output_tensor_, scale_tensor, offset_tensor, task_id, - "&scale_parameter"); - break; - case schema::ActivationType_RELU: - code.CodeFunction("DoScaleRelu", input_tensor_, output_tensor_, scale_tensor, offset_tensor, task_id, - "&scale_parameter"); - break; - case schema::ActivationType_NO_ACTIVATION: - code.CodeFunction("DoScale", input_tensor_, output_tensor_, scale_tensor, offset_tensor, task_id, - "&scale_parameter"); - break; - default: - MS_LOG(ERROR) << "Scale does not support activation type " << scale_param_->activation_type_; - return RET_ERROR; - } + break; + case schema::ActivationType_RELU: + code.CodeFunction("DoScaleRelu", input_tensor_, output_tensor_, scale_tensor, offset_tensor, kDefaultTaskId, + "&scale_parameter"); + break; + case schema::ActivationType_NO_ACTIVATION: + code.CodeFunction("DoScale", input_tensor_, output_tensor_, scale_tensor, offset_tensor, kDefaultTaskId, + "&scale_parameter"); + break; + default: + MS_LOG(ERROR) << "Scale does not support activation type " << scale_param_->activation_type_; + return RET_ERROR; } - MS_LOG(INFO) << "ScaleFP32Code has been called"; context->AppendCode(code.str()); return RET_OK; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Scale, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_ScaleFusion, CPUOpCoderCreator) } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/slice_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/slice_fp32_coder.cc deleted file mode 100644 index 15d31e4b41..0000000000 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/slice_fp32_coder.cc +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "coder/opcoders/nnacl/fp32/slice_fp32_coder.h" -#include -#include "nnacl/slice_parameter.h" -#include "src/ops/slice.h" -#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" -#include "coder/opcoders/file_collector.h" - -using mindspore::schema::PrimitiveType_Slice; -namespace mindspore::lite::micro::nnacl { -int SliceFP32Coder::Prepare(CoderContext *const context) { return RET_OK; } - -int SliceFP32Coder::DoCode(CoderContext *const context) { - // generate code .h .c - Collect(context, {"nnacl/slice_parameter.h", "nnacl/fp32/slice.h"}, {"slice.c"}); - - auto param = reinterpret_cast(parameter_); - auto primitive_slice = reinterpret_cast(OperatorCoder::primitive()); - std::vector begin = primitive_slice->GetPostProcessBegin(); - std::vector size = primitive_slice->GetPostProcessSize(); - std::vector input_shape = input_tensor_->shape(); - NNaclFp32Serializer code; - for (int i = 0; i < param->param_length_; i++) { - param->shape_[i] = input_shape.at(i); - } - - for (int i = 0; i < param->param_length_; i++) { - param->begin_[i] = begin.at(i); - } - - for (int i = 0; i < param->param_length_; i++) { - int tmp_size = size.at(i); - if (size.at(i) < 0) { - tmp_size = input_shape.at(i) - begin.at(i); - } - param->end_[i] = (begin.at(i) + tmp_size); - } - - for (int i = 0; i < param->param_length_; i++) { - if (size.at(i) < 0) { - param->size_[i] = (input_shape.at(i) - begin.at(i)); - continue; - } - param->size_[i] = size.at(i); - } - - code.CodeStruct("slice_parameter", *param); - - // call the op function - if (param->param_length_ < DIMENSION_4D) { - code.CodeFunction("PadSliceParameterTo4D", "&slice_parameter"); - } - code.CodeFunction("DoSliceNoParallel", input_tensor_, output_tensor_, "&slice_parameter"); - context->AppendCode(code.str()); - return RET_OK; -} - -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Slice, CPUOpCoderCreator) -} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/slice_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/slice_fp32_coder.h deleted file mode 100644 index 97acb64263..0000000000 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/slice_fp32_coder.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_SLICE_FP32_CODER_H_ -#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_SLICE_FP32_CODER_H_ - -#include -#include "coder/opcoders/op_coder.h" - -namespace mindspore::lite::micro::nnacl { -class SliceFP32Coder final : public OperatorCoder { - public: - SliceFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, - const Model::Node *node, size_t node_index, Target target) - : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} - - ~SliceFP32Coder() override = default; - - int Prepare(CoderContext *const context) override; - - int DoCode(CoderContext *const context) override; -}; -} // namespace mindspore::lite::micro::nnacl -#endif // MINDSPORE_LITE_MICRO_CODER_OPCOD ERS_SLICE_FP32_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/softmax_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/softmax_fp32_coder.cc index b949590ebb..7ffd6e63e2 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/softmax_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/softmax_fp32_coder.cc @@ -20,7 +20,7 @@ #include "schema/inner/ops_generated.h" #include "coder/opcoders/file_collector.h" -using mindspore::schema::PrimitiveType_SoftMax; +using mindspore::schema::PrimitiveType_Softmax; namespace mindspore::lite::micro::nnacl { @@ -48,7 +48,7 @@ int SoftMaxFP32Coder::Prepare(CoderContext *const context) { } int SoftMaxFP32Coder::DoCode(CoderContext *const context) { - Collect(context, {"nnacl/fp32/softmax.h"}, {"softmax.c"}); + Collect(context, {"nnacl/fp32/softmax_fp32.h"}, {"softmax_fp32.c", "exp_fp32.c"}); NNaclFp32Serializer code; code.CodeStruct("softmax_parameter", *softmax_param_); code.CodeFunction("memset", sum_data_, "0", sum_data_size_); @@ -58,6 +58,6 @@ int SoftMaxFP32Coder::DoCode(CoderContext *const context) { return RET_OK; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_SoftMax, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Softmax, CPUOpCoderCreator) } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/softmax_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/softmax_fp32_coder.h index 92414d6737..322d4d1c51 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/softmax_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/softmax_fp32_coder.h @@ -17,7 +17,7 @@ #define MINDSPORE_LITE_MICRO_CODER_SOFTMAX_CODER_H_ #include -#include "micro/coder/opcoders/base/softmax_base_coder.h" +#include "coder/opcoders/base/softmax_base_coder.h" namespace mindspore::lite::micro::nnacl { class SoftMaxFP32Coder final : public SoftmaxBaseCoder { diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/splice_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/splice_fp32_coder.cc new file mode 100644 index 0000000000..739287893f --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/splice_fp32_coder.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/opcoders/nnacl/fp32/splice_fp32_coder.h" +#include +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" +#include "coder/opcoders/file_collector.h" +#include "src/common/log_adapter.h" +#include "nnacl/splice_parameter.h" +using mindspore::schema::PrimitiveType_Splice; +namespace mindspore::lite::micro::nnacl { +int SpliceFP32Coder::DoCode(CoderContext *const context) { + auto splice_parameter = reinterpret_cast(parameter_); + // to make forward_indexes nullptr + splice_parameter->forward_indexes_ = nullptr; + std::vector src_shape = input_tensor_->shape(); + std::vector dst_shape = output_tensor_->shape(); + if (src_shape.size() != dst_shape.size() || src_shape.size() != kInputSize2 || dst_shape.size() != kInputSize2) { + MS_LOG(ERROR) << "SpliceFP32Coder src_shape size not equal to dst_shape"; + return RET_ERROR; + } + int src_row = src_shape.at(kInputIndex); + int dst_row = dst_shape.at(kInputIndex); + int src_col = src_shape.at(kBiasIndex); + int dst_col = dst_shape.at(kBiasIndex); + if (src_row != dst_row) { + MS_LOG(ERROR) << "SpliceFP32Coder src_row not equal to dst_row"; + return RET_ERROR; + } + if (src_col * splice_parameter->context_dim_ != dst_col) { + MS_LOG(ERROR) << "SpliceFP32Coder src_col not match to dst_col"; + return RET_ERROR; + } + Collect(context, {"nnacl/splice_parameter.h", "nnacl/fp32/splice_fp32.h"}, {"splice_fp32.c"}); + NNaclFp32Serializer code; + code.CodeStruct("splice_parameter", *splice_parameter); + code.CodeFunction("SpliceFp32", input_tensor_, src_row, src_col, "&splice_parameter", output_tensor_, dst_row, + dst_col); + context->AppendCode(code.str()); + MS_LOG(DEBUG) << "SpliceFP32Coder do_code ok"; + return RET_OK; +} +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Splice, CPUOpCoderCreator) +} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/splice_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/splice_fp32_coder.h new file mode 100644 index 0000000000..a60b58fae6 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/splice_fp32_coder.h @@ -0,0 +1,35 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_FP32_SPLICE_FP32_CODER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_FP32_SPLICE_FP32_CODER_H_ +#include +#include "coder/opcoders/op_coder.h" +namespace mindspore::lite::micro::nnacl { +class SpliceFP32Coder final : public OperatorCoder { + public: + SpliceFP32Coder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} + + ~SpliceFP32Coder() override = default; + + int Prepare(CoderContext *const context) override { return RET_OK; } + + int DoCode(CoderContext *const context) override; +}; +} // namespace mindspore::lite::micro::nnacl +#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_FP32_SPLICE_FP32_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/tile_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/tile_fp32_coder.cc index 0376e42bac..48d071f599 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/tile_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/tile_fp32_coder.cc @@ -20,7 +20,7 @@ #include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" #include "coder/opcoders/file_collector.h" -using mindspore::schema::PrimitiveType_Tile; +using mindspore::schema::PrimitiveType_TileFusion; namespace mindspore::lite::micro::nnacl { void TileFP32Coder::ComputeStrides(const int *shape, int *strides, int ndim) const { @@ -63,6 +63,6 @@ int TileFP32Coder::DoCode(CoderContext *const context) { return RET_OK; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_Tile, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeFloat32, PrimitiveType_TileFusion, CPUOpCoderCreator) } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.cc index efcab9f252..98e4dcca01 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.cc @@ -14,11 +14,11 @@ * limitations under the License. */ -#include "micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.h" +#include "coder/opcoders/nnacl/fp32/transpose_fp32_coder.h" #include #include -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" -#include "micro/coder/opcoders/file_collector.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" +#include "coder/opcoders/file_collector.h" using mindspore::schema::PrimitiveType_Transpose; namespace mindspore::lite::micro::nnacl { @@ -83,8 +83,8 @@ int TransposeFp32Coder::DoCode(CoderContext *const context) { NNaclFp32Serializer code; code.CodeStruct("transpose_parameter", *transpose_parameter_); - code.CodeFunction("DoTransposeFp32", input_tensor_, output_tensor_, in_shape_, out_shape_, "&transpose_parameter", - task_id, num_unit_thread, dim_size_, position_); + code.CodeFunction("DoTransposeFp32", input_tensor_, output_tensor_, in_shape_, out_shape_, + "(TransposeParameter *)&transpose_parameter", task_id, num_unit_thread, dim_size_, position_); context->AppendCode(code.str()); return RET_OK; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.h index e4a150bcc7..01ec048e5c 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/fp32/transpose_fp32_coder.h @@ -39,14 +39,14 @@ class TransposeFp32Coder final : public OperatorCoder { private: TransposeParameter *transpose_parameter_ = nullptr; - int thread_num_ = 1; - int thread_h_stride_ = 0; - int thread_h_num_ = 0; - int num_unit_ = 0; - int *in_shape_ = nullptr; - int *out_shape_ = nullptr; - int *dim_size_ = nullptr; - int *position_ = nullptr; + int thread_num_{1}; + int thread_h_stride_{0}; + int thread_h_num_{0}; + int num_unit_{0}; + int *in_shape_{nullptr}; + int *out_shape_{nullptr}; + int *dim_size_{nullptr}; + int *position_{nullptr}; }; } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/activation_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/activation_int8_coder.cc new file mode 100644 index 0000000000..ff7ba5d327 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/activation_int8_coder.cc @@ -0,0 +1,74 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/opcoders/nnacl/int8/sigmoid_int8_coder.h" +#include "coder/opcoders/nnacl/int8/relux_int8_coder.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/activation_fp32.h" +#include "schema/model_generated.h" +#include "src/common/version_manager.h" + +using mindspore::schema::PrimitiveType_Activation; + +namespace mindspore::lite::micro::nnacl { + +std::unique_ptr CPUActivationINT8CoderCreator(const std::vector &in_tensors, + const std::vector &out_tensors, + const Model::Node *node, size_t node_index, + Target target) { + const void *primitive_c = node->primitive_; + if (primitive_c == nullptr) { + return nullptr; + } + int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); + ParameterGen parameter_gen = + PopulateRegistry::GetInstance()->GetParameterCreator(GetPrimitiveType(node->primitive_), schema_version); + if (parameter_gen == nullptr) { + MS_LOG(ERROR) << "parameter generator is nullptr"; + return nullptr; + } + OpParameter *parameter = parameter_gen(node->primitive_); + if (parameter == nullptr) { + MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " + << schema::EnumNamePrimitiveType((schema::PrimitiveType)GetPrimitiveType(node->primitive_)); + return nullptr; + } + auto type = (reinterpret_cast(parameter))->type_; + + std::unique_ptr coder; + switch (static_cast(type)) { + case schema::ActivationType_SIGMOID: + coder = CPUOpCoderCreator(in_tensors, out_tensors, node, node_index, target); + break; + case schema::ActivationType_RELU: + coder = CPUOpCoderCreator(in_tensors, out_tensors, node, node_index, target); + break; + case schema::ActivationType_RELU6: + coder = CPUOpCoderCreator(in_tensors, out_tensors, node, node_index, target); + break; + default: + break; + } + + if (coder == nullptr) { + MS_LOG(ERROR) << "create conv2d int8 coder failed"; + return nullptr; + } + return coder; +} + +REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Activation, CPUActivationINT8CoderCreator) +} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.cc index afe7085610..830be1a660 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.cc @@ -14,17 +14,18 @@ * limitations under the License. */ -#include "micro/coder/opcoders/nnacl/int8/add_int8_coder.h" +#include "coder/opcoders/nnacl/int8/add_int8_coder.h" #include #include #include "nnacl/int8/quantize.h" -#include "micro/coder/log.h" -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" -#include "micro/coder/opcoders/file_collector.h" +#include "coder/log.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" +#include "coder/opcoders/file_collector.h" +#include "coder/opcoders/parallel.h" -using mindspore::schema::PrimitiveType_Add; +using mindspore::schema::PrimitiveType_AddFusion; -namespace mindspore::lite::micro { +namespace mindspore::lite::micro::nnacl { int AddInt8Coder::Prepare(CoderContext *const context) { input0 = input_tensors().at(0); @@ -38,26 +39,8 @@ int AddInt8Coder::Prepare(CoderContext *const context) { return RET_OK; } -int AddInt8Coder::DoCode(CoderContext *const context) { - Collect(context, {"wrapper/int8/conv1x1_init_int8.h"}, {"add_int8_wrapper.c", "add_int8.c", "thread_pool.c"}); - - nnacl::NNaclInt8Serializer code; - - code.CodeStruct("para", para_); - code.CodeStruct("arith_para", *arith_para_); - code.CodeBaseStruct("AddArgs", "args", "para", "arith_para", in_size_, out_size_, thread_num_s_, elements_num_, - support_opt_add_, input0, input1, output_tensor_); - - if (arith_para_->broadcasting_) { - code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "AddBroadcastRun", "&args", thread_num_s_); - } else { - code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "AddRun", "&args", thread_num_s_); - } - - return RET_OK; -} - int AddInt8Coder::Init() { + arith_para_ = reinterpret_cast(parameter_); para_.in0_args_.zp_ = input0->quant_params().front().zeroPoint * -1; para_.in1_args_.zp_ = input1->quant_params().front().zeroPoint * -1; para_.out_zp_ = output_tensor_->quant_params().front().zeroPoint; @@ -152,5 +135,32 @@ int AddInt8Coder::ReSize() { return RET_OK; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Add, CPUOpCoderCreator) -} // namespace mindspore::lite::micro +int AddInt8Coder::DoCode(CoderContext *const context) { + Collect(context, {"wrapper/int8/add_int8_wrapper.h"}, + {"add_int8_wrapper.c", "add_int8.c", "arithmetic_base.c", "arithmetic_int8.c", "thread_pool.c"}); + + nnacl::NNaclInt8Serializer code; + + code.CodeStruct("para", para_); + code.CodeStruct("arith_para", *arith_para_); + code.CodeBaseStruct("AddInt8Args", kRunArgs, "¶", "&arith_para", in_size_, out_size_, gThreadNum, elements_num_, + support_opt_add_, input0, input1, output_tensor_); + if (support_parallel_) { + if (arith_para_->broadcasting_) { + code.CodeFunction(kParallelLaunch, gThreadPool, "AddBroadcastInt8Run", kRunArgsAddr, gThreadNum); + } else { + code.CodeFunction(kParallelLaunch, gThreadPool, "AddInt8Run", kRunArgsAddr, gThreadNum); + } + } else { + if (arith_para_->broadcasting_) { + code.CodeFunction("AddBroadcastInt8Run", kRunArgsAddr, kDefaultTaskId); + } else { + code.CodeFunction("AddInt8Run", kRunArgsAddr, kDefaultTaskId); + } + } + context->AppendCode(code.str()); + return RET_OK; +} + +REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_AddFusion, CPUOpCoderCreator) +} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.h index c1064a89a9..27eb507c8e 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/add_int8_coder.h @@ -18,17 +18,15 @@ #define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_ADD_INT8_CODER_H_ #include -#include "micro/coder/opcoders/op_coder.h" +#include "coder/opcoders/op_coder.h" #include "nnacl/int8/add_int8.h" -namespace mindspore::lite::micro { -class AddInt8Coder : public OperatorCoder { +namespace mindspore::lite::micro::nnacl { +class AddInt8Coder final : public OperatorCoder { public: AddInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, const Model::Node *node, size_t node_index, Target target) - : OperatorCoder(in_tensors, out_tensors, node, node_index, target) { - arith_para_ = reinterpret_cast(parameter_); - } + : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} ~AddInt8Coder() override = default; @@ -49,5 +47,5 @@ class AddInt8Coder : public OperatorCoder { int elements_num_{0}; bool support_opt_add_{false}; }; -} // namespace mindspore::lite::micro +} // namespace mindspore::lite::micro::nnacl #endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_ADD_INT8_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/batchnorm_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/batchnorm_int8_coder.cc new file mode 100644 index 0000000000..7f22f79ef4 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/batchnorm_int8_coder.cc @@ -0,0 +1,162 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/opcoders/nnacl/int8/batchnorm_int8_coder.h" +#include +#include "coder/log.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" +#include "coder/opcoders/file_collector.h" +#include "coder/opcoders/parallel.h" + +using mindspore::schema::PrimitiveType_BatchNorm; + +namespace mindspore::lite::micro::nnacl { + +int BatchNormInt8Coder::Prepare(CoderContext *const context) { + std::vector input_shapes = input_tensor_->shape(); + size_t n_dim = input_shapes.size(); + batchnorm_param_->channel_ = input_shapes[n_dim - 1]; + batchnorm_param_->units_ = 1; + for (size_t i = 0; i < n_dim - 1; i++) { + batchnorm_param_->units_ *= input_shapes[i]; + } + batchnorm_param_->op_parameter_.thread_num_ = + MSMIN(batchnorm_param_->op_parameter_.thread_num_, batchnorm_param_->channel_); + if (target_ == kARM32M) { + batchnorm_param_->unit_ = batchnorm_param_->units_; + } else { + batchnorm_param_->unit_ = UP_DIV(batchnorm_param_->units_, kMaxThreadNumSupported); + } + if (batchnorm_param_->fused_) { + MS_CHECK_RET_CODE(InitFusedConstTensor(), "InitFusedConstTensor failed"); + } else { + MS_CHECK_RET_CODE(InitConstTensor(), "InitConstTensor failed"); + } + + return RET_OK; +} +int BatchNormInt8Coder::DoCode(CoderContext *context) { + std::vector headers = {"nnacl/slice_parameter.h"}; + std::vector cFiles = {"batchnorm_int8.c"}; + NNaclInt8Serializer code; + + code.CodeStruct("param", *batchnorm_param_); + code.CodeFunction("BatchNormInt8", output_tensor_, input_tensor_, alpha_addr_, beta_addr_, kDefaultTaskId, "¶m"); + + Collect(context, headers, cFiles); + context->AppendCode(code.str()); + + return RET_OK; +} + +int BatchNormInt8Coder::InitConstTensor() { + MS_CHECK_TRUE(input_tensors_.size() >= kInputSize2, "input tensors number not match"); + Tensor *input = input_tensor_; + Tensor *mean = input_tensors_.at(1); + Tensor *variance = input_tensors_.at(2); + Tensor *output = output_tensor_; + + auto mean_ptr = reinterpret_cast(mean->MutableData()); + auto var_ptr = reinterpret_cast(variance->MutableData()); + + MS_CHECK_PTR(mean_ptr); + MS_CHECK_PTR(var_ptr); + + alpha_addr_ = reinterpret_cast( + allocator_->Malloc(kNumberTypeFloat, mean->ElementsNum() * sizeof(float), kOfflinePackWeight)); + MS_CHECK_PTR(alpha_addr_); + beta_addr_ = reinterpret_cast( + allocator_->Malloc(kNumberTypeFloat, variance->ElementsNum() * sizeof(float), kOfflinePackWeight)); + MS_CHECK_PTR(beta_addr_); + // compute alpha, beta; + auto eps = batchnorm_param_->epsilon_; + int32_t zp_in = input->quant_params().at(0).zeroPoint; + int32_t zp_mean = mean->quant_params().at(0).zeroPoint; + int32_t zp_var = variance->quant_params().at(0).zeroPoint; + int32_t zp_out = output->quant_params().at(0).zeroPoint; + auto s_in = static_cast(input->quant_params().at(0).scale); + auto s_mean = static_cast(mean->quant_params().at(0).scale); + auto s_var = static_cast(variance->quant_params().at(0).scale); + auto s_out = static_cast(output->quant_params().at(0).scale); + + for (int i = 0; i < batchnorm_param_->channel_; ++i) { + float tmp = s_out * sqrt(eps + s_var * (var_ptr[i] - zp_var)); + float tmp_a = s_in / tmp; + float tmp_b = zp_out - tmp_a * zp_in - (s_mean * (mean_ptr[i] - zp_mean)) / tmp; + alpha_addr_[i] = tmp_a; + beta_addr_[i] = tmp_b; + } + + return RET_OK; +} + +int BatchNormInt8Coder::InitFusedConstTensor() { + MS_CHECK_TRUE(input_tensors_.size() >= 5, "input tensors number not match"); + Tensor *input = input_tensors_.at(0); + Tensor *scale = input_tensors_.at(1); + Tensor *offset = input_tensors_.at(2); + Tensor *mean = input_tensors_.at(3); + Tensor *variance = input_tensors_.at(4); + Tensor *output = output_tensor_; + + auto scale_ptr = reinterpret_cast(scale->MutableData()); + auto offset_ptr = reinterpret_cast(offset->MutableData()); + auto mean_ptr = reinterpret_cast(mean->MutableData()); + auto var_ptr = reinterpret_cast(variance->MutableData()); + + MS_CHECK_PTR(scale_ptr); + MS_CHECK_PTR(offset_ptr); + MS_CHECK_PTR(mean_ptr); + MS_CHECK_PTR(var_ptr); + + alpha_addr_ = reinterpret_cast( + allocator_->Malloc(kNumberTypeFloat, mean->ElementsNum() * sizeof(float), kOfflinePackWeight)); + MS_CHECK_PTR(alpha_addr_); + beta_addr_ = reinterpret_cast( + allocator_->Malloc(kNumberTypeFloat, variance->ElementsNum() * sizeof(float), kOfflinePackWeight)); + MS_CHECK_PTR(beta_addr_); + // compute alpha, beta; + float eps = batchnorm_param_->epsilon_; + int32_t zp_in = input->quant_params().at(0).zeroPoint; + int32_t zp_scale = scale->quant_params().at(0).zeroPoint; + int32_t zp_offset = offset->quant_params().at(0).zeroPoint; + int32_t zp_mean = mean->quant_params().at(0).zeroPoint; + int32_t zp_var = variance->quant_params().at(0).zeroPoint; + int32_t zp_out = output->quant_params().at(0).zeroPoint; + auto s_in = static_cast(input->quant_params().at(0).scale); + auto s_scale = static_cast(scale->quant_params().at(0).scale); + auto s_offset = static_cast(offset->quant_params().at(0).scale); + auto s_mean = static_cast(mean->quant_params().at(0).scale); + auto s_var = static_cast(variance->quant_params().at(0).scale); + auto s_out = static_cast(output->quant_params().at(0).scale); + + float mul_12 = s_in * s_scale; + float mul_24 = s_scale * s_mean; + float div_36 = s_offset / s_out; + for (int i = 0; i < batchnorm_param_->channel_; ++i) { + float tmp = s_out * sqrt(eps + s_var * (var_ptr[i] - zp_var)); + float tmp_a = (mul_12 * (scale_ptr[i] - zp_scale)) / tmp; + float tmp_b = zp_out + div_36 * (offset_ptr[i] - zp_offset) - tmp_a * zp_in - + (mul_24 * (scale_ptr[i] - zp_scale) * (mean_ptr[i] - zp_mean)) / tmp; + alpha_addr_[i] = tmp_a; + beta_addr_[i] = tmp_b; + } + + return RET_OK; +} + +REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_BatchNorm, CPUOpCoderCreator) +} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/batchnorm_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/batchnorm_int8_coder.h new file mode 100644 index 0000000000..0fdd560824 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/batchnorm_int8_coder.h @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_BATCHNORM_INT8_CODER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_BATCHNORM_INT8_CODER_H_ + +#include +#include +#include "coder/opcoders/op_coder.h" +#include "nnacl/batchnorm_parameter.h" + +namespace mindspore::lite::micro::nnacl { +class BatchNormInt8Coder final : public OperatorCoder { + public: + BatchNormInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : OperatorCoder(in_tensors, out_tensors, node, node_index, target) { + batchnorm_param_ = reinterpret_cast(parameter_); + } + + ~BatchNormInt8Coder() override = default; + + int Prepare(CoderContext *const context) override; + + int DoCode(CoderContext *context) override; + + private: + int InitConstTensor(); + int InitFusedConstTensor(); + + float *alpha_addr_{nullptr}; + float *beta_addr_{nullptr}; + BatchNormParameter *batchnorm_param_; +}; +} // namespace mindspore::lite::micro::nnacl +#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_BATCHNORM_INT8_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.cc index 99c217c990..17a131fe18 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.cc @@ -21,6 +21,7 @@ #include "nnacl/int8/quantize.h" #include "coder/opcoders/file_collector.h" #include "coder/log.h" +#include "coder/opcoders/parallel.h" #include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" int MallocQuantArgForConcat(ConcatQuantArg *quant_arg, size_t input_num) { @@ -37,7 +38,6 @@ int ConcatInt8Coder::Prepare(CoderContext *const context) { concat_param_->input_shapes_ = nullptr; size_t input_num = input_tensors().size(); - MS_CHECK_PTR(input_data_); MS_CHECK_RET_CODE(MallocQuantArgForConcat(&concat_param_->quant_arg_, input_num), "Null pointer reference: quant_concat_parm_->in_quant_args_."); for (int i = 0; i < static_cast(input_num); i++) { @@ -60,7 +60,10 @@ int ConcatInt8Coder::Prepare(CoderContext *const context) { concat_param_->input_shapes_ = reinterpret_cast(malloc(sizeof(int *) * input_num)); MS_CHECK_PTR(concat_param_->input_shapes_); for (int i = 0; i < static_cast(input_num); i++) { - concat_param_->input_shapes_[i] = reinterpret_cast(input_tensors().at(i)->shape().data()); + auto in_shape = input_tensors_.at(i)->shape(); + concat_param_->input_shapes_[i] = reinterpret_cast(malloc(in_shape.size() * sizeof(int))); + MS_CHECK_PTR(concat_param_->input_shapes_[i]); + memcpy(reinterpret_cast(concat_param_->input_shapes_[i]), in_shape.data(), sizeof(int) * in_shape.size()); } before_axis_size = 1; @@ -70,7 +73,10 @@ int ConcatInt8Coder::Prepare(CoderContext *const context) { int64_t after_axis_size = 1; int output_dim = static_cast(output_tensor_->shape().size()); - concat_param_->output_shapes_ = output_tensor_->shape().data(); + concat_param_->output_shapes_ = reinterpret_cast(malloc(output_dim * sizeof(int))); + MS_CHECK_PTR(concat_param_->output_shapes_); + memcpy(reinterpret_cast(concat_param_->output_shapes_), output_tensor_->shape().data(), + sizeof(int) * output_dim); for (int i = axis_ + 1; i < output_dim; i++) { after_axis_size *= concat_param_->output_shapes_[i]; } @@ -84,7 +90,8 @@ int ConcatInt8Coder::DoCode(CoderContext *const context) { count_unit_ = thread_num_ > 1 ? UP_DIV(before_axis_size, thread_num_) : before_axis_size; concat_param_->count_unit_ = count_unit_; - Collect(context, {"nnacl/int8/concat_int8.h"}, {"concat_int8.c"}); + Collect(context, {"nnacl/int8/concat_int8.h", "wrapper/int8/concat_int8_wrapper.h"}, + {"concat_int8.c", "concat_int8_wrapper.c"}); NNaclInt8Serializer code; int in_tensor_count = input_tensors().size(); @@ -96,15 +103,12 @@ int ConcatInt8Coder::DoCode(CoderContext *const context) { } code.CodeStruct("concat_param", *concat_param_, in_tensor_count, input_tensor_->shape().size(), output_tensor_->shape().size()); - - if (thread_num_ > 1) { - code.CodeBaseStruct("ConcatInt8Args", "args", "input_data", output_tensor_, "&concat_param", axis_, - before_axis_size, count_unit_); - code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "ConcatInt8Run", "&args", "thread_num"); + code.CodeBaseStruct("ConcatInt8Args", kRunArgs, "input_data", output_tensor_, "&concat_param", axis_, + before_axis_size, count_unit_); + if (support_parallel_) { + code.CodeFunction(kParallelLaunch, gThreadPool, "ConcatInt8Run", kRunArgsAddr, gThreadNum); } else { - int task_id = 0; - int64_t real_dst_count = MSMIN(before_axis_size - task_id * count_unit_, count_unit_); - code.CodeFunction("Int8Concat", "input_data", output_tensor_, "&concat_param", axis_, real_dst_count, task_id); + code.CodeFunction("ConcatInt8Run", kRunArgsAddr, kDefaultTaskId); } context->AppendCode(code.str()); return RET_OK; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.h index e71a1f5aaa..bc666683e8 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/concat_int8_coder.h @@ -19,7 +19,7 @@ #include #include -#include "micro/coder/opcoders/op_coder.h" +#include "coder/opcoders/op_coder.h" #include "nnacl/int8/concat_int8.h" namespace mindspore::lite::micro::nnacl { diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.cc index 6b9c1c6608..2ec6685f85 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.cc @@ -14,14 +14,14 @@ * limitations under the License. */ -#include "micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h" +#include "coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h" #include #include #include "securec/include/securec.h" #include "src/runtime/kernel/arm/base/convolution_base.h" -#include "micro/coder/opcoders/file_collector.h" -#include "micro/coder/log.h" -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" +#include "coder/opcoders/file_collector.h" +#include "coder/log.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" namespace mindspore::lite::micro::nnacl { @@ -43,10 +43,10 @@ int Conv2D1x1Int8Coder::Prepare(CoderContext *const context) { int Conv2D1x1Int8Coder::DoCode(CoderContext *const context) { Collect(context, - {"nnacl/int8/conv1x1_int8.h", "nnacl/common_func.h", "wrapper/int8/conv1x1_init_int8.h", - "wrapper/int8/conv1x1_run_int8.h"}, - {"common_func.c", "pack.c", "conv1x1_int8.c", "matmul_int8.c", "fixed_point.c", "conv1x1_init_int8_wrapper.c", - "conv1x1_run_int8_wrapper.c", "thread_pool.c"}); + {"nnacl/int8/conv1x1_int8.h", "nnacl/common_func.h", "wrapper/int8/conv1x1_init_int8_wrapper.h", + "wrapper/int8/conv1x1_run_int8_wrapper.h"}, + {"common_func.c", "pack_int8.c", "conv1x1_int8.c", "matmul_int8.c", "fixed_point.c", + "conv1x1_init_int8_wrapper.c", "conv1x1_run_int8_wrapper.c", "thread_pool.c", "conv1x1_base.c"}); nnacl::NNaclInt8Serializer code; @@ -54,10 +54,10 @@ int Conv2D1x1Int8Coder::DoCode(CoderContext *const context) { code.CodeStruct("matmul_param", *matmul_param_); code.CodeBaseStruct("Conv1x1Args", "args", input_sum_, filter_zp_ptr_, left_shift_, right_shift_, multiplier_, - packed_weight_, bias_data_, packed_input_, nullptr, nullptr, 0, 0, "conv_param", "matmul_param", + packed_weight_, bias_data_, packed_input_, nullptr, nullptr, 0, 0, "&conv_param", "&matmul_param", matmul_func_, pre_trans_input_, support_optimize_, filter_peroc_); - code.CodeFunction("Conv1x1Run", input_tensor_, "args", "THREAD_POOL_DEFAULT", thread_num_s_, output_tensor_); + code.CodeFunction("Conv1x1Run", input_tensor_, "(Conv1x1Args *)&args", output_tensor_); context->AppendCode(code.str()); return RET_OK; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h index 0f1d9e144a..29185d6273 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h @@ -16,7 +16,7 @@ #ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_Conv2D_1X1_INT8_CODER_H_ #define MINDSPORE_LITE_MICRO_CODER_OPCODERS_Conv2D_1X1_INT8_CODER_H_ -#include "micro/coder/opcoders/base/conv2d_base_coder.h" +#include "coder/opcoders/base/conv2d_base_coder.h" #include #include #include diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc index 30ccf2f976..21a3e4f81c 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.cc @@ -14,16 +14,14 @@ * limitations under the License. */ -#include "micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h" -#include +#include "coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h" #include #include "securec/include/securec.h" #include "nnacl/int8/conv3x3_int8.h" -#include "src/runtime/kernel/arm/base/convolution_base.h" -#include "src/runtime/kernel/arm/int8/convolution_3x3_int8.h" -#include "micro/coder/opcoders/file_collector.h" -#include "micro/coder/log.h" -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" +#include "coder/opcoders/file_collector.h" +#include "coder/log.h" +#include "coder/opcoders/parallel.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" namespace mindspore::lite::micro::nnacl { void ProcessFilterUint8(int8_t *origin_weight, int16_t *dst_weight, ConvParameter *conv_param) { @@ -129,7 +127,7 @@ int Conv2D3x3Int8Coder::Prepare(CoderContext *const context) { } int Conv2D3x3Int8Coder::DoCode(CoderContext *const context) { - Collect(context, {"nnacl/int8/conv_int8.h"}, {"pack.c", "conv_int8.c", "fixed_point.c"}); + Collect(context, {"nnacl/int8/conv_int8.h"}, {"pack_int8.c", "conv_int8.c", "fixed_point.c"}); nnacl::NNaclInt8Serializer code; code.precision(kPrecision); // call the op function @@ -145,9 +143,9 @@ int Conv2D3x3Int8Coder::DoCode(CoderContext *const context) { code.CodeFunction("PackInputToC8Int8", input_tensor_, c8_input_, "&conv_param_"); // code operator func if (thread_num_ > 1) { - code.CodeBaseStruct("Conv3x3Int8Args", "args", c8_input_, transformed_filter_addr_, new_bias_addr_, output_tensor_, - tile_buffer_, block_unit_buffer_, tmp_dst_buffer_, tmp_out_, "&conv_param_"); - code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "Conv3x3Int8Run", "&args", "thread_num"); + code.CodeBaseStruct("Conv3x3Int8Args", kRunArgs, c8_input_, transformed_filter_addr_, new_bias_addr_, + output_tensor_, tile_buffer_, block_unit_buffer_, tmp_dst_buffer_, tmp_out_, "&conv_param_"); + code.CodeFunction(kParallelLaunch, "THREAD_POOL_DEFAULT", "Conv3x3Int8Run", kRunArgsAddr, "thread_num"); } else { int task_id = 0; code.CodeFunction("Conv3x3Int8", c8_input_, transformed_filter_addr_, new_bias_addr_, output_tensor_, tile_buffer_, diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h index d32a7aa4c6..27641d7f94 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h @@ -16,7 +16,7 @@ #ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_Conv2D_3X3_INT8_CODER_H_ #define MINDSPORE_LITE_MICRO_CODER_OPCODERS_Conv2D_3X3_INT8_CODER_H_ -#include "micro/coder/opcoders/base/conv2d_base_coder.h" +#include "coder/opcoders/base/conv2d_base_coder.h" #include #include #include @@ -33,7 +33,10 @@ class Conv2D3x3Int8Coder final : public Conv2DBaseCoder { int DoCode(CoderContext *const context) override; - ~Conv2D3x3Int8Coder() override = default; + ~Conv2D3x3Int8Coder() override { + transformed_filter_addr_ = nullptr; + new_bias_addr_ = nullptr; + } private: int InitWeightBias(); diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc index 0c60ecc62d..0c75fff1cb 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.cc @@ -14,21 +14,22 @@ * limitations under the License. */ -#include "micro/coder/opcoders/nnacl/int8/conv2d_int8_coder.h" +#include "coder/opcoders/nnacl/int8/conv2d_int8_coder.h" #include #include #include -#include #include "securec/include/securec.h" -#include "micro/coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h" -#include "micro/coder/log.h" -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" +#include "coder/opcoders/nnacl/int8/conv2d_1x1_int8_coder.h" +#include "coder/opcoders/nnacl/int8/conv2d_3x3_int8_coder.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" #include "src/runtime/kernel/arm/base/convolution_base.h" -#include "src/runtime/kernel/arm/int8/convolution_int8.h" #include "src/ops/populate/populate_register.h" -#include "micro/coder/opcoders/file_collector.h" +#include "src/common/version_manager.h" +#include "coder/log.h" +#include "coder/opcoders/file_collector.h" +#include "coder/opcoders/parallel.h" -using mindspore::schema::PrimitiveType_Conv2D; +using mindspore::schema::PrimitiveType_Conv2DFusion; namespace mindspore::lite::micro::nnacl { @@ -180,42 +181,32 @@ int Conv2DINT8Coder::Resize() { } int Conv2DINT8Coder::DoCode(CoderContext *const context) { - Collect(context, {"nnacl/int8/conv_int8.h", "nnacl/common_func.h", "nnacl/kernel/int8/conv_init_int8.h"}, - {"common_func.c", "pack.c", "conv_int8.c", "winograd_transform.c", "matmul_int8.c", "fixed_point.c", - "conv_init_int8_wrapper.c", "thread_pool.c"}); + std::vector asm_files; + if (target_ == kARM32A) { + asm_files = {"PreSum4x16Int8Peroc.S", "PreSum4x16Int8Pert.S", "MatmulInt8Neon32.S"}; + } else if (target_ == kARM64) { + asm_files = {"PreSum4x16Int8Peroc.S", "PreSum4x16Int8Pert.S", "MatmulInt8Neon64.S"}; + } + Collect(context, {"nnacl/int8/conv_int8.h", "nnacl/common_func.h", "wrapper/int8/convolution_int8_wrapper.h"}, + {"common_func.c", "pack_int8.c", "conv_int8.c", "winograd_transform.c", "matmul_int8.c", "fixed_point.c", + "convolution_int8_wrapper.c", "conv_init_int8_wrapper.c", "thread_pool.c"}, + asm_files); // call the op function nnacl::NNaclInt8Serializer code; code.precision(kPrecision); code.CodeFunction("memset", packed_input_, 0, packed_input_size_); code.CodeFunction("memset", input_sum_, 0, input_sum_size_); code.CodeFunction("memset", matmul_packed_input_, 0, matmul_packed_input_size_); - - conv_param_->op_parameter_.thread_num_ = thread_num_; - conv_param_->thread_num_ = thread_num_; - code.CodeStruct("conv_param_", *conv_param_); - - // code operator func - if (thread_num_ > 1) { - code.CodeFunction("memset", matmul_packed_input_, 0, matmul_packed_input_size_); - code.CodeBaseStruct("ConvOptInt8Args", "args", input_tensor_, packed_input_, matmul_packed_input_, packed_weight_, - bias_data_, output_tensor_, input_sum_, thread_num_s_, "(ConvParameter *)&conv_param_", - matmul_func_); - code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "ConvInt8Run", "&args", "thread_num"); + code.CodeStruct("conv_param", *conv_param_); + + code.CodeBaseStruct("ConvolutionInt8Args", kRunArgs, input_tensor_, packed_input_, matmul_packed_input_, + packed_weight_, bias_data_, output_tensor_, filter_zp_ptr_, input_sum_, + "(ConvParameter *)&conv_param", matmul_func_, support_optimize_); + code.CodeFunction("CheckSupportOptimize", kRunArgsAddr); + if (support_parallel_) { + code.CodeFunction(kParallelLaunch, gThreadPool, "ConvolutionInt8Run", kRunArgsAddr, gThreadNum); } else { - if (target_ == kARM64) { - code << "if (GetSupportOptFlag()) {\n"; - code << "conv_param_.tile_num_ = " << 8 << ";\n"; - code << "} else {\n"; - code << "conv_param_.tile_num_ = " << 4 << ";\n"; - code << "}\n"; - code.CodeFunction("ConvInt8", input_tensor_, packed_input_, matmul_packed_input_, packed_weight_, bias_data_, - output_tensor_, filter_zp_ptr_, input_sum_, 0, "(ConvParameter *)&conv_param_", matmul_func_, - "GetSupportOptFlag()"); - } else { - code.CodeFunction("ConvInt8", input_tensor_, packed_input_, matmul_packed_input_, packed_weight_, bias_data_, - output_tensor_, filter_zp_ptr_, input_sum_, 0, "(ConvParameter *)&conv_param_", matmul_func_, - support_optimize_); - } + code.CodeFunction("ConvolutionInt8Run", kRunArgsAddr, kDefaultTaskId); } context->AppendCode(code.str()); return RET_OK; @@ -224,31 +215,30 @@ int Conv2DINT8Coder::DoCode(CoderContext *const context) { std::unique_ptr CPUConv2DINT8CoderCreator(const std::vector &in_tensors, const std::vector &out_tensors, const Model::Node *node, size_t node_index, Target target) { - PrimitiveC *primitive_c = node->primitive_; - if (!primitive_c) { + const void *primitive = node->primitive_; + if (primitive == nullptr) { return nullptr; } - OpParameter *parameter = - PopulateRegistry::GetInstance()->GetParameterCreator((schema::PrimitiveType(primitive_c->Type())))(primitive_c); - if (parameter == nullptr) { - MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " - << schema::EnumNamePrimitiveType((schema::PrimitiveType)(primitive_c->Type())); + int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); + ParameterGen paramGen = + PopulateRegistry::GetInstance()->GetParameterCreator(GetPrimitiveType(node->primitive_), schema_version); + if (paramGen == nullptr) { + MS_LOG(ERROR) << "parameter generator is null"; return nullptr; } - - auto *conv_param = reinterpret_cast(parameter); + auto conv_param = reinterpret_cast(paramGen(node->primitive_)); int kernel_h = conv_param->kernel_h_; int kernel_w = conv_param->kernel_w_; int stride_h = conv_param->stride_h_; int stride_w = conv_param->stride_w_; int dilation_h = conv_param->dilation_h_; int dilation_w = conv_param->dilation_w_; - free(parameter); + free(conv_param); std::unique_ptr coder; if (kernel_h == 3 && kernel_w == 3 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1) { coder = CPUOpCoderCreator(in_tensors, out_tensors, node, node_index, target); } else if (kernel_h == 1 && kernel_w == 1) { - coder = CPUOpCoderCreator(in_tensors, out_tensors, node, node_index, target); + coder = CPUOpCoderCreator(in_tensors, out_tensors, node, node_index, target); } else { coder = CPUOpCoderCreator(in_tensors, out_tensors, node, node_index, target); } @@ -259,7 +249,7 @@ std::unique_ptr CPUConv2DINT8CoderCreator(const std::vector #include #include -#include "micro/coder/opcoders/base/conv2d_base_coder.h" +#include "coder/opcoders/base/conv2d_base_coder.h" #include "nnacl/conv_parameter.h" -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" namespace mindspore::lite::micro::nnacl { class Conv2DINT8Coder final : public Conv2DBaseCoder { @@ -34,7 +34,14 @@ class Conv2DINT8Coder final : public Conv2DBaseCoder { int DoCode(CoderContext *const context) override; - ~Conv2DINT8Coder() override = default; + ~Conv2DINT8Coder() override { + packed_weight_ = nullptr; + bias_data_ = nullptr; + filter_zp_ptr_ = nullptr; + matmul_packed_input_ = nullptr; + packed_input_ = nullptr; + input_sum_ = nullptr; + } private: int InitWeightBias(CoderContext *ctx); @@ -63,7 +70,7 @@ class Conv2DINT8Coder final : public Conv2DBaseCoder { int32_t *input_sum_{nullptr}; int8_t *matmul_packed_input_{nullptr}; - string matmul_func_; + std::string matmul_func_; std::function pack_weight_init_{nullptr}; }; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.cc new file mode 100644 index 0000000000..b40c68782f --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.cc @@ -0,0 +1,110 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.h" +#include +#include "coder/log.h" +#include "coder/opcoders/file_collector.h" +#include "coder/opcoders/parallel.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" +#include "nnacl/int8/conv_depthwise_int8.h" + +using mindspore::schema::PrimitiveType_DepthwiseConv2D; + +namespace mindspore::lite::micro { + +int ConvolutionDepthwiseINT8Coder::Prepare(CoderContext *const context) { + Conv2DBaseCoder::Init(); + // init sliding window param + MS_CHECK_RET_CODE(SetQuantParam(), "Set quant param failed."); + MS_CHECK_RET_CODE(InitWeightBias(context), "dwconvolution do init weightbais failed"); + MS_CHECK_RET_CODE(InitBuffer(context), "dwconvolution do init buffer failed"); + return RET_OK; +} + +int ConvolutionDepthwiseINT8Coder::InitBuffer(CoderContext *const context) { + // malloc pack input and output buffer + row_buffer_size_ = thread_num_ * conv_param_->output_w_ * conv_param_->output_channel_ * sizeof(int32_t); + row_buffer_ = reinterpret_cast(allocator_->Malloc(kNumberTypeInt32, row_buffer_size_, kWorkspace)); + MS_CHECK_PTR(row_buffer_); + return RET_OK; +} + +int ConvolutionDepthwiseINT8Coder::InitWeightBias(CoderContext *const context) { + // init weight, int8 -> int16 + int channel = filter_tensor_->Batch(); + int pack_weight_size = channel * filter_tensor_->Height() * filter_tensor_->Width(); + auto tmp_weight_data_size = static_cast(pack_weight_size * sizeof(int8_t)); + + nnacl::NNaclInt8Serializer code; + + int8_t *tmp_weight = reinterpret_cast(allocator_->Malloc(kNumberTypeInt8, kOnlineSize, kOnlinePackWeight)); + MS_CHECK_PTR(tmp_weight); + code.CodeMallocExpression(tmp_weight, tmp_weight_data_size); + code.CodeFunction("memset", tmp_weight, 0, tmp_weight_data_size); + code.CodeFunction("PackNCHWToNHWCInt8", filter_tensor_, tmp_weight, 1, + filter_tensor_->Height() * filter_tensor_->Width(), filter_tensor_->Batch()); + int weight_zp = conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_; + auto packed_weight_data_size = static_cast(pack_weight_size * sizeof(int16_t)); + packed_weight_ = reinterpret_cast(allocator_->Malloc(kNumberTypeInt16, kOnlineSize, kOnlinePackWeight)); + MS_CHECK_PTR(packed_weight_); + code.CodeMallocExpression(packed_weight_, packed_weight_data_size); + code << "for (int i = 0; i < " << filter_tensor_->ElementsNum() << "; i++) {\n"; + code << " " << allocator_->GetRuntimeAddr(packed_weight_) << "[i] = (int16_t)(" + << allocator_->GetRuntimeAddr(tmp_weight) << "[i] - " << weight_zp << ");\n"; + code << "}\n"; + + auto channel_data_size = static_cast(channel * sizeof(int32_t)); + bias_data_ = reinterpret_cast(allocator_->Malloc(kNumberTypeInt32, kOnlineSize, kOnlinePackWeight)); + MS_CHECK_PTR(bias_data_); + code.CodeMallocExpression(bias_data_, channel_data_size); + code.CodeFunction("memset", bias_data_, 0, channel_data_size); + // init bias + if (input_tensors_.size() == kInputSize2) { + code.CodeFunction("memcpy", bias_data_, bias_tensor_, bias_tensor_->ElementsNum() * sizeof(int32_t)); + } + context->AppendInitCode(code.str()); + return RET_OK; +} + +int ConvolutionDepthwiseINT8Coder::DoCode(CoderContext *const context) { + MS_CHECK_TRUE(conv_param_->input_channel_ == conv_param_->output_channel_, + "Only support input channel equals output channel."); + Collect( + context, + {"nnacl/int8/conv_depthwise_int8.h", "nnacl/int8/pack_int8.h", "wrapper/int8/convolution_depthwise_int8_wrapper.h"}, + {"conv_depthwise_int8.c", "fixed_point.c", "pack_int8.c", "conv_int8.c", "winograd_transform.c", + "convolution_depthwise_int8_wrapper.c"}, + {"ConvDwInt8Row.S", "ConvDwInt8PostAlign4.S", "ConvDwInt8PostAlign4PerChannel.S"}); + nnacl::NNaclInt8Serializer code; + code.precision(kPrecision); + // call the op function + code.CodeFunction("memset", row_buffer_, 0, row_buffer_size_); + code.CodeStruct("conv_param", *conv_param_); + code.CodeBaseStruct("ConvDepthwiseInt8Args", kRunArgs, output_tensor_, row_buffer_, input_tensor_, packed_weight_, + bias_data_, "&conv_param"); + if (support_parallel_) { + code.CodeFunction(kParallelLaunch, gThreadPool, "ConvDepthwiseInt8Run", kRunArgsAddr, "conv_param.thread_num_"); + } else { + code.CodeFunction("ConvDepthwiseInt8Run", kRunArgsAddr, kDefaultTaskId); + } + context->AppendCode(code.str()); + return RET_OK; +} + +REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_DepthwiseConv2D, + CPUOpCoderCreator) +} // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.h new file mode 100644 index 0000000000..6b42a73a85 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/convolution_depthwise_int8_coder.h @@ -0,0 +1,52 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODER_CONVOLUTION_DEPTHWISE_INT8_CODER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPCODER_CONVOLUTION_DEPTHWISE_INT8_CODER_H_ + +#include +#include "coder/opcoders/base/conv2d_base_coder.h" +#include "src/runtime/kernel/arm/int8/convolution_depthwise_int8.h" + +namespace mindspore::lite::micro { +class ConvolutionDepthwiseINT8Coder : public Conv2DBaseCoder { + public: + ConvolutionDepthwiseINT8Coder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : Conv2DBaseCoder(in_tensors, out_tensors, node, node_index, target) {} + + ~ConvolutionDepthwiseINT8Coder() override = default; + + int Prepare(CoderContext *const context) override; + + int DoCode(CoderContext *const context) override; + + private: + int InitBuffer(CoderContext *const context); + + int InitWeightBias(CoderContext *const context); + + int32_t *row_buffer_{nullptr}; + + size_t row_buffer_size_{0}; + + int16_t *packed_weight_{nullptr}; + + int32_t *bias_data_{nullptr}; +}; +} // namespace mindspore::lite::micro + +#endif // MINDSPORE_LITE_MICRO_CODER_OPCODER_CONVOLUTION_DEPTHWISE_INT8_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/deconvolution_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/deconvolution_int8_coder.cc index ea9242fcc8..f725eb75df 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/deconvolution_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/deconvolution_int8_coder.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,13 +14,13 @@ * limitations under the License. */ -#include "micro/coder/opcoders/nnacl/int8/deconvolution_int8_coder.h" +#include "coder/opcoders/nnacl/int8/deconvolution_int8_coder.h" #include #include "nnacl/int8/deconv_int8.h" -#include "micro/coder/opcoders/file_collector.h" -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" +#include "coder/opcoders/file_collector.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" -using mindspore::schema::PrimitiveType_DeConv2D; +using mindspore::schema::PrimitiveType_Conv2dTransposeFusion; namespace mindspore::lite::micro::nnacl { @@ -123,7 +123,7 @@ int DeconvolutionInt8Coder::InitRunBuf(CoderContext *const context) { } int DeconvolutionInt8Coder::DoCode(CoderContext *const context) { - Collect(context, {"nnacl/int8/deconv.h"}, {"int8/deconv.c", "pack.c", "quantization/fixed_point.c"}); + Collect(context, {"nnacl/int8/deconv.h"}, {"int8/deconv.c", "pack_int8.c", "quantization/fixed_point.c"}); nnacl::NNaclInt8Serializer code; code.CodeFunction("memset", input_ptr_, 0, input_ptr_size_); @@ -157,5 +157,6 @@ int DeconvolutionInt8Coder::DoCode(CoderContext *const context) { return RET_OK; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_DeConv2D, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Conv2dTransposeFusion, + CPUOpCoderCreator) } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/deconvolution_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/deconvolution_int8_coder.h index 7b5839f3bf..f404e398ad 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/deconvolution_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/deconvolution_int8_coder.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/detection_post_process_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/detection_post_process_int8_coder.cc new file mode 100644 index 0000000000..237cf576c8 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/detection_post_process_int8_coder.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/opcoders/nnacl/int8/detection_post_process_int8_coder.h" + +#include "coder/opcoders/file_collector.h" +#include "coder/log.h" +#include "include/errorcode.h" + +using mindspore::schema::PrimitiveType_DetectionPostProcess; + +namespace mindspore::lite::micro::nnacl { +int DetectionPostProcessInt8Coder::MallocInputsBuffer() { + input_boxes_ = reinterpret_cast( + allocator_->Malloc(kNumberTypeFloat32, input_tensors_.at(0)->ElementsNum() * sizeof(float), kWorkspace)); + MS_CHECK_PTR(input_boxes_); + input_scores_ = reinterpret_cast( + allocator_->Malloc(kNumberTypeFloat32, input_tensors_.at(1)->ElementsNum() * sizeof(float), kWorkspace)); + MS_CHECK_PTR(input_boxes_); + return RET_OK; +} + +int DetectionPostProcessInt8Coder::GetInputData(CoderContext *const context, Serializer *const code) { + Tensor *boxes = input_tensors_.at(0); + MS_CHECK_PTR(boxes); + lite::QuantArg boxes_quant_param = boxes->quant_params().front(); + Tensor *scores = input_tensors_.at(1); + MS_CHECK_PTR(scores); + lite::QuantArg scores_quant_param = scores->quant_params().front(); + MS_CHECK_TRUE(boxes->data_type() == kNumberTypeInt8, "Input data type error"); + MS_CHECK_TRUE(scores->data_type() == kNumberTypeInt8, "Input data type error"); + + Collect(context, {"nnacl/int8/quant_dtype_cast_int8.h"}, {"quant_dtype_cast_int8.c"}); + code->CodeFunction("DoDequantizeInt8ToFp32", boxes, input_boxes_, boxes_quant_param.scale, + boxes_quant_param.zeroPoint, boxes->ElementsNum()); + code->CodeFunction("DoDequantizeInt8ToFp32", scores, input_scores_, scores_quant_param.scale, + scores_quant_param.zeroPoint, scores->ElementsNum()); + return RET_OK; +} + +REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_DetectionPostProcess, + CPUOpCoderCreator) +} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/detection_post_process_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/detection_post_process_int8_coder.h new file mode 100644 index 0000000000..97f86a8ba2 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/detection_post_process_int8_coder.h @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_INT8_DETECTION_POST_PROCESS_INT8_CODER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_INT8_DETECTION_POST_PROCESS_INT8_CODER_H_ + +#include +#include +#include +#include "coder/opcoders/base/detection_post_process_base_coder.h" + +namespace mindspore::lite::micro::nnacl { +class DetectionPostProcessInt8Coder final : public DetectionPostProcessBaseCoder { + public: + DetectionPostProcessInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : DetectionPostProcessBaseCoder(in_tensors, out_tensors, node, node_index, target) {} + + ~DetectionPostProcessInt8Coder() override = default; + + private: + int GetInputData(CoderContext *const context, Serializer *const code) override; + int MallocInputsBuffer() override; +}; +} // namespace mindspore::lite::micro::nnacl +#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_NNACL_INT8_DETECTION_POST_PROCESS_INT8_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/div_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/div_int8_coder.cc new file mode 100644 index 0000000000..cbcf4530ae --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/div_int8_coder.cc @@ -0,0 +1,78 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/opcoders/nnacl/int8/div_int8_coder.h" +#include +#include +#include "include/errorcode.h" +#include "coder/log.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" +#include "coder/opcoders/file_collector.h" + +namespace mindspore::lite::micro::nnacl { + +int DivInt8Coder::Prepare(CoderContext *context) { + input0 = input_tensors_.at(0); + input1 = input_tensors_.at(1); + MS_ASSERT(input0); + MS_ASSERT(input1); + + broadcast_ = input0->ElementsNum() != input1->ElementsNum(); + + param_.in0_args_.scale_ = input0->quant_params().front().scale; + param_.in0_args_.zp_ = -input0->quant_params().front().zeroPoint; + param_.in1_args_.scale_ = input1->quant_params().front().scale; + param_.in1_args_.zp_ = -input1->quant_params().front().zeroPoint; + param_.out_args_.scale_ = output_tensor_->quant_params().front().scale; + param_.out_args_.zp_ = output_tensor_->quant_params().front().zeroPoint; + + const double real_multiplier = param_.in0_args_.scale_ / (param_.in1_args_.scale_ * param_.out_args_.scale_); + + QuantizeMultiplier(real_multiplier, ¶m_.output_multiplier_, ¶m_.output_shift_); + + param_.output_activation_min_ = std::numeric_limits::min(); + param_.output_activation_max_ = std::numeric_limits::max(); + + return RET_OK; +} + +int DivInt8Coder::DoCode(CoderContext *const context) { + NNaclInt8Serializer code; + int element_num = output_tensor_->ElementsNum(); + code.CodeStruct("param", param_); + if (broadcast_) { + ArithmeticParameter tile_para; + tile_para.ndim_ = output_tensor_->shape().size(); + for (size_t i = 0; i < tile_para.ndim_; i++) { + tile_para.in_shape0_[i] = input0->DimensionSize(i); + tile_para.in_shape1_[i] = input1->DimensionSize(i); + tile_para.out_shape_[i] = output_tensor_->DimensionSize(i); + } + tile0_data_ = static_cast(allocator_->Malloc(kNumberTypeInt8, output_tensor_->Size(), kWorkspace)); + tile1_data_ = static_cast(allocator_->Malloc(kNumberTypeInt8, output_tensor_->Size(), kWorkspace)); + MS_CHECK_PTR(tile0_data_); + MS_CHECK_PTR(tile1_data_); + code.CodeStruct("tile_para", tile_para); + code.CodeFunction("TileDimensionsInt8", input0, input1, tile0_data_, tile1_data_, "&tile_para"); + code.CodeFunction("DivInt8", tile0_data_, tile1_data_, output_tensor_, element_num, "¶m"); + } else { + code.CodeFunction("DivInt8", input0, input1, output_tensor_, element_num, "¶m"); + } + + return RET_OK; +} + +} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/div_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/div_int8_coder.h new file mode 100644 index 0000000000..d648f040a7 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/div_int8_coder.h @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_MICRO_CODER_DIV_INT8_CODER_H_ +#define MINDSPORE_LITE_MICRO_CODER_DIV_INT8_CODER_H_ + +#include +#include +#include +#include "coder/opcoders/op_coder.h" +#include "nnacl/int8/quantize.h" + +namespace mindspore::lite::micro::nnacl { + +class DivInt8Coder final : public OperatorCoder { + public: + DivInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} + + ~DivInt8Coder() override = default; + + int Prepare(CoderContext *context) override; + + int DoCode(CoderContext *const context) override; + + private: + DivQuantArg param_; + Tensor *input0{nullptr}; + Tensor *input1{nullptr}; + int8_t *tile0_data_{nullptr}; + int8_t *tile1_data_{nullptr}; + bool broadcast_{false}; +}; + +} // namespace mindspore::lite::micro::nnacl +#endif // MINDSPORE_LITE_MICRO_CODER_DIV_INT8_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/fullconnection_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/fullconnection_int8_coder.cc index ca6a03cf12..e1059b886e 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/fullconnection_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/fullconnection_int8_coder.cc @@ -24,7 +24,16 @@ using mindspore::schema::PrimitiveType_FullConnection; namespace mindspore::lite::micro::nnacl { -FullConnectionInt8Coder ::~FullConnectionInt8Coder() { FreeQuantParam(); } +FullConnectionInt8Coder ::~FullConnectionInt8Coder() { + FreeQuantParam(); + filter_tensor_ = nullptr; + bias_tensor_ = nullptr; + pack_a_ptr_ = nullptr; + pack_b_ptr_ = nullptr; + input_sums_ = nullptr; + weight_bias_sums_ = nullptr; + bias_ptr_ = nullptr; +} int FullConnectionInt8Coder::MallocQuantParam() { filter_tensor_ = input_tensors_.at(kWeightIndex); diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/pooling_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/pooling_int8_coder.cc index 5b3a9cd1fe..ea788f58bd 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/pooling_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/pooling_int8_coder.cc @@ -15,16 +15,15 @@ */ #include "coder/opcoders/nnacl/int8/pooling_int8_coder.h" #include -#include #include #include "nnacl/int8/pooling_int8.h" #include "coder/log.h" #include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" #include "coder/opcoders/file_collector.h" +#include "coder/opcoders/parallel.h" -using std::string; - -using mindspore::schema::PrimitiveType_Pooling; +using mindspore::schema::PrimitiveType_AvgPoolFusion; +using mindspore::schema::PrimitiveType_MaxPoolFusion; namespace mindspore::lite::micro::nnacl { int PoolingInt8Coder::DoCode(CoderContext *const context) { @@ -58,25 +57,17 @@ int PoolingInt8Coder::DoCode(CoderContext *const context) { pooling_parameter->quant_args_ = quant_args; code.CodeStruct("pooling_parameter", *pooling_parameter); - if (thread_num_ > 1) { - code.CodeBaseStruct("PoolingInt8Args", "args", in_tensor, out_tensor, "(PoolingParameter *)&pooling_parameter"); - if (pooling_parameter->pool_mode_ == PoolMode_MaxPool) { - code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "MaxPoolingInt8Run", "&args", "thread_num"); - } else { - code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "AvgPoolingInt8Run", "&args", "thread_num"); - } + if (pooling_parameter->pool_mode_ == PoolMode_MaxPool) { + code.CodeFunction("MaxPoolingInt8", in_tensor, out_tensor, "(PoolingParameter *)&pooling_parameter", + kDefaultTaskId); } else { - int task_id = 0; - if (pooling_parameter->pool_mode_ == PoolMode_MaxPool) { - code.CodeFunction("MaxPoolingInt8", in_tensor, out_tensor, "(PoolingParameter *)&pooling_parameter", task_id); - } else { - code.CodeFunction("AvgPoolingInt8", in_tensor, out_tensor, "(PoolingParameter *)&pooling_parameter", task_id); - } + code.CodeFunction("AvgPoolingInt8", in_tensor, out_tensor, "(PoolingParameter *)&pooling_parameter", + kDefaultTaskId); } - MS_LOG(INFO) << "PoolingInt8Code has been called"; context->AppendCode(code.str()); return lite::RET_OK; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Pooling, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_AvgPoolFusion, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_MaxPoolFusion, CPUOpCoderCreator) } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.cc index da7f81687e..c9550bb98e 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.cc @@ -20,7 +20,7 @@ #include "coder/log.h" #include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" -using mindspore::schema::PrimitiveType_Reduce; +using mindspore::schema::PrimitiveType_ReduceFusion; namespace mindspore::lite::micro::nnacl { int ReduceInt8Coder::CalculateQuantArgs() { QuantArg input_quant = input_tensor_->quant_params().at(0); @@ -230,6 +230,6 @@ int ReduceInt8Coder::DoCode(CoderContext *const context) { return RET_OK; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Reduce, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_ReduceFusion, CPUOpCoderCreator) } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.h index 3ab5919f2e..24fc456416 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.h +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/reduce_int8_coder.h @@ -30,7 +30,8 @@ class ReduceInt8Coder final : public ReduceBaseCoder { const Model::Node *node, size_t node_index, Target target) : ReduceBaseCoder(in_tensors, out_tensors, node, node_index, target) {} - ~ReduceInt8Coder() override = default; + ~ReduceInt8Coder() override { begin_src_data_ = nullptr; } + int Prepare(CoderContext *const context) override; int DoCode(CoderContext *const context) override; diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/relux_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/relux_int8_coder.cc new file mode 100644 index 0000000000..7c72500d13 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/relux_int8_coder.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/opcoders/nnacl/int8/relux_int8_coder.h" +#include "nnacl/fp32/activation_fp32.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" +#include "coder/opcoders/file_collector.h" +#include "coder/log.h" +#include "include/errorcode.h" + +namespace mindspore::lite::micro::nnacl { + +int ReluxInt8Coder::Prepare(CoderContext *const context) { + MS_CHECK_PTR(parameter_); + type_ = (reinterpret_cast(parameter_))->type_; + + quant_arg_.input_arg.scale_ = input_tensor_->quant_params().front().scale; + quant_arg_.input_arg.zp_ = input_tensor_->quant_params().front().zeroPoint; + quant_arg_.output_arg.scale_ = output_tensor_->quant_params().front().scale; + quant_arg_.output_arg.zp_ = output_tensor_->quant_params().front().zeroPoint; + + const double multiplier = quant_arg_.input_arg.scale_ / quant_arg_.output_arg.scale_; + QuantizeRoundParameterWithDoublePrecision(multiplier, &quant_arg_.input_multiplier_, &quant_arg_.left_shift_, + &quant_arg_.right_shift_); + + return RET_OK; +} + +int ReluxInt8Coder::DoCode(CoderContext *const context) { + Collect(context, {"nnacl/int8/relux_int8.h"}, {"relux_int8.c"}); + + NNaclInt8Serializer code; + + int length = input_tensor_->ElementsNum(); + + code.CodeStruct("quant_arg", quant_arg_); + code.CodeFunction("ReluXInt8", input_tensor_, length, output_tensor_, "&quant_arg"); + + context->AppendCode(code.str()); + + return RET_OK; +} + +} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/relux_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/relux_int8_coder.h new file mode 100644 index 0000000000..ff55311dfb --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/relux_int8_coder.h @@ -0,0 +1,81 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_MICRO_CODER_RELUX_INT8_CODER_H_ +#define MINDSPORE_LITE_MICRO_CODER_RELUX_INT8_CODER_H_ + +#include +#include +#include +#include "coder/opcoders/op_coder.h" +#include "nnacl/int8/relux_int8.h" +#include "coder/log.h" +#include "include/errorcode.h" + +namespace mindspore::lite::micro::nnacl { + +class ReluxInt8Coder : public OperatorCoder { + public: + ReluxInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} + + ~ReluxInt8Coder() override = default; + + int Prepare(CoderContext *const context) override; + + int DoCode(CoderContext *const context) override; + + protected: + ReluXQuantArg quant_arg_; + + private: + int type_; +}; + +class ReluInt8Coder final : public ReluxInt8Coder { + public: + ReluInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : ReluxInt8Coder(in_tensors, out_tensors, node, node_index, target) {} + + ~ReluInt8Coder() override = default; + + int Prepare(CoderContext *const context) override { + MS_CHECK_RET_CODE(ReluxInt8Coder::Prepare(context), "ReluxInt8Coder::Prepare failed"); + quant_arg_.quantized_output_min = quant_arg_.output_arg.zp_; + quant_arg_.quantized_output_max = CHAR_MAX; + return RET_OK; + }; +}; + +class Relu6Int8Coder final : public ReluxInt8Coder { + public: + Relu6Int8Coder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : ReluxInt8Coder(in_tensors, out_tensors, node, node_index, target) {} + + ~Relu6Int8Coder() override = default; + + int Prepare(CoderContext *const context) override { + MS_CHECK_RET_CODE(ReluxInt8Coder::Prepare(context), "ReluxInt8Coder::Prepare failed"); + quant_arg_.quantized_output_min = QuantizeToInt8(0, quant_arg_.output_arg.scale_, quant_arg_.output_arg.zp_); + quant_arg_.quantized_output_max = QuantizeToInt8(6, quant_arg_.output_arg.scale_, quant_arg_.output_arg.zp_); + return RET_OK; + }; +}; + +} // namespace mindspore::lite::micro::nnacl +#endif // MINDSPORE_LITE_MICRO_CODER_RELUX_INT8_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/reshape_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/reshape_int8_coder.cc index 0df7a07986..eb83acf6be 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/reshape_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/reshape_int8_coder.cc @@ -43,13 +43,8 @@ int ReshapeInt8Coder::DoCode(CoderContext *const context) { INT8_MIN, INT8_MAX}; code.CodeStruct("reshape_quant_arg", reshape_quant_arg); + code.CodeFunction("Int8Reshape", input, output, elements_num, "reshape_quant_arg"); - if (thread_num_ > 1) { - code.CodeBaseStruct("ReshapeInt8Args", "args", input, output, elements_num, thread_num_s_, "reshape_quant_arg"); - CODE_PARALLEL_FUNC("ReshapeInt8Run"); - } else { - code.CodeFunction("Int8Reshape", input, output, elements_num, "reshape_quant_arg"); - } context->AppendCode(code.str()); return RET_OK; } diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/resize_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/resize_int8_coder.cc new file mode 100644 index 0000000000..066bcc8c1c --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/resize_int8_coder.cc @@ -0,0 +1,109 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/opcoders/nnacl/int8/resize_int8_coder.h" +#include "coder/log.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" +#include "coder/opcoders/file_collector.h" +#include "securec/include/securec.h" +#include "nnacl/int8/quantize.h" +#include "coder/opcoders/parallel.h" + +using mindspore::schema::PrimitiveType_Resize; + +namespace mindspore::lite::micro::nnacl { +ResizeInt8Coder::~ResizeInt8Coder() { + delete quant_out_; + quant_out_ = nullptr; + delete quant_in_; + quant_in_ = nullptr; + delete multiplier_; + multiplier_ = nullptr; +} + +int ResizeInt8Coder::Prepare(CoderContext *const context) { + MS_CHECK_RET_CODE(ResizeBaseCoder::Init(), "init resize base failed"); + quant_in_ = new (std::nothrow)::QuantArg; + quant_out_ = new (std::nothrow)::QuantArg; + multiplier_ = new (std::nothrow) QuantMulArg; + MS_CHECK_PTR(quant_in_); + MS_CHECK_PTR(quant_out_); + MS_CHECK_PTR(multiplier_); + quant_in_->zp_ = input_tensor_->quant_params().at(0).zeroPoint; + quant_in_->scale_ = input_tensor_->quant_params().at(0).scale; + quant_out_->zp_ = output_tensor_->quant_params().at(0).zeroPoint; + quant_out_->scale_ = output_tensor_->quant_params().at(0).scale; + + QuantizeRoundParameterWithDoublePrecision(quant_in_->scale_ / quant_out_->scale_, &multiplier_->multiplier_, + &multiplier_->left_shift_, &multiplier_->right_shift_); + return ReSize(); +} + +int ResizeInt8Coder::ReSize() { + if (method_ == schema::ResizeMethod_LINEAR) { + MS_LOG(ERROR) << "unsupported resize linear currently"; + return RET_ERROR; + } + return RET_OK; +} + +int ResizeInt8Coder::DoCode(CoderContext *const context) { + std::vector headers = {"nnacl/int8/resize_int8.h", "wrapper/int8/resize_int8_wrapper.h"}; + std::vector cFiles = {"resize_int8.c", "common_func.c", "resize_int8_wrapper.c"}; + Collect(context, headers, cFiles); + + nnacl::NNaclInt8Serializer code; + code.CodeArray("input_shape", input_tensor_->shape().data(), input_tensor_->shape().size(), false); + code.CodeArray("output_shape", output_tensor_->shape().data(), output_tensor_->shape().size(), false); + switch (method_) { + case static_cast(schema::ResizeMethod_LINEAR): { + MS_LOG(ERROR) << "unsupported: " << schema::EnumNameResizeMethod(static_cast(method_)); + break; + } + case static_cast(schema::ResizeMethod_NEAREST): { + bool same_zp = quant_in_->zp_ == quant_out_->zp_; + bool same_scale = abs(quant_out_->scale_ - quant_in_->scale_) < 1e-6; + bool align_corners = coordinate_transform_mode_ == schema::CoordinateTransformMode_ALIGN_CORNERS; + if (same_zp && same_scale) { + code.CodeBaseStruct("ResizeInt8Args", kRunArgs, input_tensor_, output_tensor_, "&input_shape", "&output_shape", + align_corners, thread_num_); + if (support_parallel_) { + code.CodeFunction(kParallelLaunch, gThreadPool, "ResizeInt8Run", kRunArgsAddr, gThreadNum); + } else { + code.CodeFunction("ResizeInt8Run", kRunArgsAddr, kDefaultTaskId); + } + } else { + MS_LOG(WARNING) << "unsupported parallel launch currently"; + code.CodeStruct("quant_in", *quant_in_); + code.CodeStruct("quant_out", *quant_out_); + code.CodeStruct("multiplier", *multiplier_); + code.CodeFunction("ResizeNearestNeighborInt8", input_tensor_, output_tensor_, "&input_shape", "&output_shape", + align_corners, "multiplier", "quant_in", "quant_out", 0, thread_num_); + } + break; + } + case schema::ResizeMethod_UNKNOWN: + default: { + MS_LOG(ERROR) << "Resize unknown method " << method_; + return RET_ERROR; + } + } + context->AppendCode(code.str()); + return RET_OK; +} + +REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Resize, CPUOpCoderCreator) +} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/resize_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/resize_int8_coder.h new file mode 100644 index 0000000000..ca849dfc58 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/resize_int8_coder.h @@ -0,0 +1,60 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_MICRO_CODER_SLICE_INT8_CODER_H_ +#define MINDSPORE_LITE_MICRO_CODER_SLICE_INT8_CODER_H_ + +#include +#include +#include +#include "coder/opcoders/base/resize_base_coder.h" +#include "nnacl/op_base.h" + +namespace mindspore::lite::micro::nnacl { +class ResizeInt8Coder final : public ResizeBaseCoder { + public: + ResizeInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : ResizeBaseCoder(in_tensors, out_tensors, node, node_index, target) {} + + ~ResizeInt8Coder(); + + int Prepare(CoderContext *const context) override; + + int DoCode(CoderContext *const context) override; + + private: + int ReSize(); + int InitResizeBiLinear(); + int InitFloatResizeBiLinear(); + int InitResizeQuantArg(); + int CalRatio(); + int CalInterpolationRange(); + void FreeResizeBiLinear(); + int InitResizeFloatQuantArg(); + int CalFloatRatio(); + int CalFloatInterpolationRange(); + void FreeFloatResizeBiLinear(); + + ResizeParameter *param_{nullptr}; + ::QuantArg *quant_in_{nullptr}; + ::QuantArg *quant_out_{nullptr}; + QuantMulArg *multiplier_{nullptr}; + ResizeQuantArg resize_quant_arg_; + ResizeFloatScaleQuantArg resize_float_quant_arg_; +}; + +} // namespace mindspore::lite::micro::nnacl +#endif // MINDSPORE_LITE_MICRO_CODER_SLICE_INT8_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/sigmoid_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/sigmoid_int8_coder.cc new file mode 100644 index 0000000000..2593ea7154 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/sigmoid_int8_coder.cc @@ -0,0 +1,74 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/opcoders/nnacl/int8/sigmoid_int8_coder.h" +#include +#include +#include "coder/log.h" +#include "include/errorcode.h" +#include "coder/opcoders/file_collector.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" + +namespace mindspore::lite::micro::nnacl { + +void CalculateTableList(int8_t *table, const float input_scale, const int32_t input_zp) { + int32_t min_value = std::numeric_limits::min(); + int32_t max_value = std::numeric_limits::max(); + const float output_scale = 1.0f / 256; + const int32_t output_zp = -128; + + for (int i = min_value; i < max_value; ++i) { + const float real_input_value = input_scale * (i - input_zp); + const float sigmoid_value = 1.0f / (1.0f + std::exp(-real_input_value)); + const int32_t quantized = std::round(sigmoid_value / output_scale) + output_zp; + auto out_value = static_cast(std::max(std::min(quantized, max_value), min_value)); + auto index = static_cast(i); + table[index] = out_value; + } +} + +int SigmodInt8Coder::Prepare(CoderContext *const context) { + size_t int8_range = 256; + table_list_ = static_cast(allocator_->Malloc(kNumberTypeInt8, int8_range, kOfflinePackWeight)); + MS_CHECK_PTR(table_list_); + + const float input_scale = input_tensor_->quant_params().at(0).scale; + const int32_t input_zp = input_tensor_->quant_params().at(0).zeroPoint; + const float output_scale = output_tensor_->quant_params().at(0).scale; + const int32_t output_zp = output_tensor_->quant_params().at(0).zeroPoint; + if (output_scale != (1.0f / 256) || output_zp != -128) { + MS_LOG(ERROR) << "Output scale is : " << output_scale << ", should be 1/256. Output zp is : " << output_zp + << ", should be -128."; + return RET_ERROR; + } + CalculateTableList(table_list_, input_scale, input_zp); + return RET_OK; +} + +int SigmodInt8Coder::DoCode(CoderContext *const context) { + Collect(context, {"nnacl/int8/sigmoid_int8.h"}, {"sigmoid_int8.c"}); + + NNaclInt8Serializer code; + + int length = input_tensor_->ElementsNum(); + code.CodeFunction("SigmoidInt8", input_tensor_, length, output_tensor_, table_list_); + + context->AppendCode(code.str()); + + return RET_OK; +} + +} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/sigmoid_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/sigmoid_int8_coder.h new file mode 100644 index 0000000000..f6d015a197 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/sigmoid_int8_coder.h @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_MICRO_CODER_SIGMOID_INT8_CODER_H_ +#define MINDSPORE_LITE_MICRO_CODER_SIGMOID_INT8_CODER_H_ + +#include +#include +#include +#include "coder/opcoders/op_coder.h" + +namespace mindspore::lite::micro::nnacl { + +class SigmodInt8Coder final : public OperatorCoder { + public: + SigmodInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} + + ~SigmodInt8Coder() override = default; + + int Prepare(CoderContext *const context) override; + + int DoCode(CoderContext *const context) override; + + private: + int8_t *table_list_{nullptr}; +}; + +} // namespace mindspore::lite::micro::nnacl +#endif // MINDSPORE_LITE_MICRO_CODER_SIGMOID_INT8_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/softmax_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/softmax_int8_coder.cc index 40a1bfc528..a75cb60a82 100644 --- a/mindspore/lite/micro/coder/opcoders/nnacl/int8/softmax_int8_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/softmax_int8_coder.cc @@ -24,8 +24,9 @@ #include "coder/log.h" #include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" #include "coder/opcoders/file_collector.h" +#include "coder/opcoders/parallel.h" -using mindspore::schema::PrimitiveType_SoftMax; +using mindspore::schema::PrimitiveType_Softmax; namespace mindspore::lite::micro::nnacl { int SoftMaxInt8Coder::Prepare(CoderContext *const context) { @@ -64,15 +65,10 @@ int SoftMaxInt8Coder::Prepare(CoderContext *const context) { int SoftMaxInt8Coder::DoCode(CoderContext *const context) { int outter_size = 1; - int inner_size = 1; for (int i = 0; i < softmax_param_->axis_; i++) { outter_size *= softmax_param_->input_shape_[i]; } MS_CHECK_TRUE(softmax_param_->n_dim_ < 5, "n_dim should be less than the length of maximum value of input_shape"); - for (int i = softmax_param_->axis_; i < softmax_param_->n_dim_; i++) { - inner_size *= softmax_param_->input_shape_[i]; - } - Collect(context, {"nnacl/int8/softmax_int8.h"}, {"softmax_int8.c", "fixed_point.c"}); NNaclInt8Serializer code; @@ -84,22 +80,14 @@ int SoftMaxInt8Coder::DoCode(CoderContext *const context) { code.CodeFunction("memset", exp_data_, 0, exp_data_size_); code.CodeFunction("memset", sum_data_, 0, sum_data_size_); - if (thread_num_ > 1) { - code.CodeBaseStruct("SoftmaxInt8Args", "args", input_tensor_, output_tensor_, outter_size, inner_size, exp_data_, - sum_data_, thread_num_s_, "quant_args", "(SoftmaxParameter *)&softmax_param"); - code.CodeFunction("ParallelLaunch", "THREAD_POOL_DEFAULT", "SoftmaxInt8Run", "&args", "thread_num"); - } else { - int task_id = 0; - MS_CHECK_TRUE(thread_num_ > 0, "thread_num_ <= 0"); - int stride = UP_DIV(outter_size, thread_num_); - int count = MSMIN(stride, outter_size - stride * task_id); - code.CodeFunction("SoftmaxInt8", input_tensor_, output_tensor_, count, exp_data_, sum_data_, "quant_args", - "(SoftmaxParameter *)&softmax_parameter"); - } + MS_CHECK_TRUE(thread_num_ > 0, "thread_num_ <= 0"); + int stride = UP_DIV(outter_size, thread_num_); + int count = MSMIN(stride, outter_size - stride * kDefaultTaskId); + code.CodeFunction("SoftmaxInt8", input_tensor_, output_tensor_, count, exp_data_, sum_data_, "quant_args", + "(SoftmaxParameter *)&softmax_parameter"); context->AppendCode(code.str()); - return RET_OK; } -REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_SoftMax, CPUOpCoderCreator) +REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_Softmax, CPUOpCoderCreator) } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/sub_int8_coder.cc b/mindspore/lite/micro/coder/opcoders/nnacl/int8/sub_int8_coder.cc new file mode 100644 index 0000000000..fb9b7f9c14 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/sub_int8_coder.cc @@ -0,0 +1,105 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "coder/opcoders/nnacl/int8/sub_int8_coder.h" +#include +#include +#include "include/errorcode.h" +#include "coder/log.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" +#include "coder/opcoders/file_collector.h" + +using mindspore::schema::PrimitiveType_SubFusion; + +namespace mindspore::lite::micro::nnacl { + +int SubInt8Coder::Prepare(CoderContext *const context) { + input0 = input_tensors_.at(0); + input1 = input_tensors_.at(1); + MS_ASSERT(input0); + MS_ASSERT(input1); + + broadcast_ = input0->ElementsNum() != input1->ElementsNum(); + + param_.in0_args_.scale_ = input0->quant_params().front().scale; + param_.in0_args_.zp_ = -input0->quant_params().front().zeroPoint; + param_.in1_args_.scale_ = input1->quant_params().front().scale; + param_.in1_args_.zp_ = -input1->quant_params().front().zeroPoint; + param_.out_args_.scale_ = output_tensor_->quant_params().front().scale; + param_.out_args_.zp_ = output_tensor_->quant_params().front().zeroPoint; + + const int left_shift = 20; + const double twice_max_input_scale = 2 * std::max(param_.in0_args_.scale_, param_.in1_args_.scale_); + const double real_input0_multiplier = param_.in0_args_.scale_ / twice_max_input_scale; + const double real_input1_multiplier = param_.in1_args_.scale_ / twice_max_input_scale; + const double real_output_multiplier = twice_max_input_scale / ((1 << left_shift) * param_.out_args_.scale_); + + QuantizeMultiplierSmallerThanOne(real_input0_multiplier, ¶m_.input0_multiplier_, ¶m_.input0_shift_); + QuantizeMultiplierSmallerThanOne(real_input1_multiplier, ¶m_.input1_multiplier_, ¶m_.input1_shift_); + QuantizeMultiplierSmallerThanOne(real_output_multiplier, ¶m_.output_multiplier_, ¶m_.output_shift_); + + param_.output_activation_min_ = std::numeric_limits::min(); + param_.output_activation_max_ = std::numeric_limits::max(); + + int left_shift0 = -param_.input0_shift_ > 0 ? -param_.input0_shift_ : 0; + param_.right_shift0_ = -param_.input0_shift_ > 0 ? 0 : param_.input0_shift_; + + int left_shift1 = -param_.input1_shift_ > 0 ? -param_.input1_shift_ : 0; + param_.right_shift1_ = -param_.input1_shift_ > 0 ? 0 : param_.input1_shift_; + + param_.left_shift_out_ = -param_.output_shift_ > 0 ? -param_.output_shift_ : 0; + param_.right_shift_out_ = -param_.output_shift_ > 0 ? 0 : param_.output_shift_; + + param_.left_shift_result0_ = (1 << left_shift) * ((1 << left_shift0)); + param_.left_shift_result1_ = (1 << left_shift) * ((1 << left_shift1)); + + MS_CHECK_TRUE(left_shift + left_shift0 == left_shift, "shift not match"); + MS_CHECK_TRUE(left_shift + left_shift1 == left_shift, "shift not match"); + + return RET_OK; +} + +int SubInt8Coder::DoCode(CoderContext *const context) { + NNaclInt8Serializer code; + // Todo: Parallel run wrapper + auto element_num = output_tensor_->ElementsNum(); + code.CodeStruct("param", param_); + if (broadcast_) { + ArithmeticParameter tile_para; + tile_para.ndim_ = output_tensor_->shape().size(); + for (size_t i = 0; i < tile_para.ndim_; i++) { + tile_para.in_shape0_[i] = input0->DimensionSize(i); + tile_para.in_shape1_[i] = input1->DimensionSize(i); + tile_para.out_shape_[i] = output_tensor_->DimensionSize(i); + } + tile0_data_ = static_cast(allocator_->Malloc(kNumberTypeInt8, output_tensor_->Size(), kWorkspace)); + MS_CHECK_PTR(tile0_data_); + tile1_data_ = static_cast(allocator_->Malloc(kNumberTypeInt8, output_tensor_->Size(), kWorkspace)); + MS_CHECK_PTR(tile1_data_); + + code.CodeStruct("tile_para", tile_para); + + code.CodeFunction("TileDimensionsInt8", input0, input1, tile0_data_, tile1_data_, "&tile_para"); + code.CodeFunction("SubInt8", tile0_data_, tile1_data_, output_tensor_, element_num, "¶m"); + } else { + code.CodeFunction("SubInt8", input0, input1, output_tensor_, element_num, "¶m"); + } + + return RET_OK; +} + +REG_OPERATOR_CODER(kAllTargets, kNumberTypeInt8, PrimitiveType_SubFusion, CPUOpCoderCreator) +} // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/nnacl/int8/sub_int8_coder.h b/mindspore/lite/micro/coder/opcoders/nnacl/int8/sub_int8_coder.h new file mode 100644 index 0000000000..aab1adf59f --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/nnacl/int8/sub_int8_coder.h @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_MICRO_CODER_SUB_INT8_CODER_H_ +#define MINDSPORE_LITE_MICRO_CODER_SUB_INT8_CODER_H_ + +#include +#include +#include +#include "coder/opcoders/op_coder.h" +#include "nnacl/int8/quantize.h" + +namespace mindspore::lite::micro::nnacl { + +class SubInt8Coder final : public OperatorCoder { + public: + SubInt8Coder(const std::vector &in_tensors, const std::vector &out_tensors, + const Model::Node *node, size_t node_index, Target target) + : OperatorCoder(in_tensors, out_tensors, node, node_index, target) {} + + ~SubInt8Coder() override = default; + + int Prepare(CoderContext *const context) override; + + int DoCode(CoderContext *const context) override; + + private: + SubQuantArg param_; + Tensor *input0{nullptr}; + Tensor *input1{nullptr}; + int8_t *tile0_data_{nullptr}; + int8_t *tile1_data_{nullptr}; + bool broadcast_{false}; +}; + +} // namespace mindspore::lite::micro::nnacl +#endif // MINDSPORE_LITE_MICRO_CODER_SUB_INT8_CODER_H_ diff --git a/mindspore/lite/micro/coder/opcoders/op_coder.cc b/mindspore/lite/micro/coder/opcoders/op_coder.cc index bc4820e400..1c956788e7 100644 --- a/mindspore/lite/micro/coder/opcoders/op_coder.cc +++ b/mindspore/lite/micro/coder/opcoders/op_coder.cc @@ -15,7 +15,9 @@ */ #include -#include "micro/coder/opcoders/op_coder.h" +#include "coder/opcoders/op_coder.h" +#include "coder/opcoders/parallel.h" + namespace mindspore::lite::micro { OperatorCoder::~OperatorCoder() { @@ -46,12 +48,7 @@ void OperatorCoder::set_parameter(OpParameter *parameter) { this->parameter_ = p size_t OperatorCoder::node_index() const { return node_index_; } void OperatorCoder::set_thread_num(int thread_num) { - if (thread_num == 4) { - this->thread_num_ = thread_num; - this->thread_num_s_ = "thread_num"; - return; - } else { - return; - } + thread_num_ = thread_num; + support_parallel_ = thread_num_ == kMaxThreadNumSupported; } } // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/opcoders/op_coder.h b/mindspore/lite/micro/coder/opcoders/op_coder.h index 1560b79ddc..3c53e4a74c 100644 --- a/mindspore/lite/micro/coder/opcoders/op_coder.h +++ b/mindspore/lite/micro/coder/opcoders/op_coder.h @@ -32,23 +32,21 @@ namespace mindspore::lite::micro { constexpr int kPrecision = 19; -#define CODE_PARALLEL_FUNC(func) code << "ParallelLaunch(THREAD_POOL_DEFAULT, " << func << ", &args, thread_num);\n" - class OperatorCoder { public: OperatorCoder(const std::vector &in_tensors, const std::vector &out_tensors, const Model::Node *node, size_t node_index, Target target) : input_tensors_(in_tensors), output_tensors_(out_tensors), - node_(node), target_(target), + node_(node), node_index_(node_index) { allocator_ = MemoryAllocator::GetInstance(); // vectors checked not empty in OpCoderBuilder::build input_tensor_ = input_tensors_.at(kInputIndex); output_tensor_ = output_tensors_.at(kOutputIndex); } - std::string ID() const { return node_->name_; } + std::string name() const { return node_->name_; } void set_input_tensor_indices(const std::vector &input_indices); void set_output_tensor_indices(const std::vector &output_indices); @@ -67,7 +65,6 @@ class OperatorCoder { size_t node_index() const; void set_parameter(OpParameter *parameter); - const PrimitiveC *primitive() const { return node_->primitive_; } const Model::Node *node() const { return this->node_; } @@ -87,8 +84,8 @@ class OperatorCoder { protected: std::vector input_tensors_; std::vector output_tensors_; - const Model::Node *node_{nullptr}; Target target_{kTargetUnknown}; + const Model::Node *node_{nullptr}; Tensor *input_tensor_{nullptr}; Tensor *output_tensor_{nullptr}; @@ -96,7 +93,7 @@ class OperatorCoder { MemoryAllocator *allocator_{nullptr}; - std::string thread_num_s_{"1"}; + bool support_parallel_{false}; int thread_num_{1}; private: @@ -114,6 +111,10 @@ template std::unique_ptr CPUOpCoderCreator(const std::vector &in_tensors, const std::vector &out_tensors, const Model::Node *node, size_t node_index, Target target) { + if (node == nullptr) { + MS_LOG(ERROR) << "node is null"; + return nullptr; + } std::unique_ptr coder = std::make_unique(in_tensors, out_tensors, node, node_index, target); return coder; } diff --git a/mindspore/lite/micro/coder/opcoders/op_coder_builder.cc b/mindspore/lite/micro/coder/opcoders/op_coder_builder.cc index 106d6ddfaf..b5704ea940 100644 --- a/mindspore/lite/micro/coder/opcoders/op_coder_builder.cc +++ b/mindspore/lite/micro/coder/opcoders/op_coder_builder.cc @@ -13,20 +13,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "micro/coder/opcoders/op_coder_builder.h" +#include "coder/opcoders/op_coder_builder.h" #include #include #include "micro/coder/allocator/allocator.h" +#include "src/common/prim_util.h" +#include "src/common/version_manager.h" #include "src/ops/populate/populate_register.h" +#include "coder/opcoders/parallel.h" namespace mindspore::lite::micro { -constexpr int kMAX_THREAD_NUM_SUPPORT = 4; std::unique_ptr OpCoderBuilder::build() { - if (node_->primitive_ == nullptr) { - return nullptr; - } - auto primitive_type = static_cast(node_->primitive_->Type()); + MS_CHECK_PTR_RET_NULL(node_->primitive_); + int primitive_type = GetPrimitiveType(node_->primitive_); CoderKey coder_key(target_, data_type_, primitive_type); CoderCreatorFunc creator_func = OpCoderFactory::GetInstance()->FindOpCoder(coder_key); if (creator_func == nullptr) { @@ -52,16 +52,22 @@ std::unique_ptr OpCoderBuilder::build() { << " code_target: " << target_ << " data_type: " << EnumNameDataType(data_type_); return op_coder; } - OpParameter *parameter = - PopulateRegistry::GetInstance()->GetParameterCreator((schema::PrimitiveType(primitive_type)))(node_->primitive_); + int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); + ParameterGen paramGen = + PopulateRegistry::GetInstance()->GetParameterCreator(GetPrimitiveType(node_->primitive_), schema_version); + if (paramGen == nullptr) { + MS_LOG(ERROR) << "parameter generator is null"; + return nullptr; + } + OpParameter *parameter = paramGen(node_->primitive_); if (parameter == nullptr) { MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " - << schema::EnumNamePrimitiveType((schema::PrimitiveType)(primitive_type)); + << PrimitiveTypeName(GetPrimitiveType(node_->primitive_)); return nullptr; } op_coder->set_input_tensor_indices(input_indices_); op_coder->set_output_tensor_indices(output_indices_); - int thread_num = this->mode_ == CodeMode::Code_Inference ? kMAX_THREAD_NUM_SUPPORT : 1; + int thread_num = support_parallel_ ? kMaxThreadNumSupported : 1; op_coder->set_thread_num(thread_num); parameter->thread_num_ = thread_num; op_coder->set_parameter(parameter); @@ -108,6 +114,11 @@ OpCoderBuilder &OpCoderBuilder::target(Target target) { return *this; } +OpCoderBuilder &OpCoderBuilder::support_parallel(bool parallel) { + support_parallel_ = parallel; + return *this; +} + void OpCoderBuilder::Reset() {} } // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/opcoders/op_coder_builder.h b/mindspore/lite/micro/coder/opcoders/op_coder_builder.h index c4596f8fa2..bab3e96fb2 100644 --- a/mindspore/lite/micro/coder/opcoders/op_coder_builder.h +++ b/mindspore/lite/micro/coder/opcoders/op_coder_builder.h @@ -18,7 +18,7 @@ #include #include -#include "micro/coder/opcoders/op_coder.h" +#include "coder/opcoders/op_coder.h" #include "micro/coder/allocator/allocator.h" namespace mindspore::lite::micro { @@ -43,6 +43,8 @@ class OpCoderBuilder { OpCoderBuilder &target(Target target); + OpCoderBuilder &support_parallel(bool parallel); + void Reset(); private: @@ -56,13 +58,15 @@ class OpCoderBuilder { Target target_{kTargetUnknown}; - TypeId data_type_ = kTypeUnknown; + TypeId data_type_{kTypeUnknown}; - CodeMode mode_ = Code_Normal; + CodeMode mode_{Code_Unknown}; std::vector input_indices_; std::vector output_indices_; + + bool support_parallel_{false}; }; } // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/opcoders/op_coder_register.h b/mindspore/lite/micro/coder/opcoders/op_coder_register.h index 08942c2786..4e22ffdb31 100644 --- a/mindspore/lite/micro/coder/opcoders/op_coder_register.h +++ b/mindspore/lite/micro/coder/opcoders/op_coder_register.h @@ -33,8 +33,7 @@ class CoderKey { public: CoderKey() = delete; - CoderKey(Target target, TypeId data_type, schema::PrimitiveType op_type) - : target_(target), data_type_(data_type), op_type_(op_type) {} + CoderKey(Target target, TypeId data_type, int op_type) : target_(target), data_type_(data_type), op_type_(op_type) {} CoderKey AllKey() const { CoderKey key(kAllTargets, data_type_, op_type_); @@ -48,7 +47,7 @@ class CoderKey { private: Target target_ = kTargetUnknown; TypeId data_type_ = kTypeUnknown; - schema::PrimitiveType op_type_ = schema::PrimitiveType_NONE; + int op_type_ = schema::PrimitiveType_NONE; }; class OpCoderFactory { diff --git a/mindspore/lite/micro/coder/opcoders/parallel.cc b/mindspore/lite/micro/coder/opcoders/parallel.cc new file mode 100644 index 0000000000..fbd6bb8619 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/parallel.cc @@ -0,0 +1,33 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "coder/opcoders/parallel.h" + +namespace mindspore::lite::micro { + +// ParallelLaunch is defined in thread_pool +const char *kParallelLaunch = "ParallelLaunch"; + +// g_thread_pool and g_thread_num are global variable, +// assign g_thread_pool by CreateThreadPool, +// and g_thread_num is equal to GetCurrentThreadNum +const char *gThreadNum = "g_thread_num"; +const char *gThreadPool = "g_thread_pool"; + +// args represents the parameters required for operator to run +const char *kRunArgs = "args"; +const char *kRunArgsAddr = "&args"; + +} // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/opcoders/parallel.h b/mindspore/lite/micro/coder/opcoders/parallel.h new file mode 100644 index 0000000000..0813607848 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/parallel.h @@ -0,0 +1,41 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPCODERS_PARALLEL_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPCODERS_PARALLEL_H_ + +namespace mindspore::lite::micro { + +constexpr int kDefaultTaskId = 0; + +constexpr int kMaxThreadNumSupported = 4; + +// ParallelLaunch is defined in thread_pool +extern const char *kParallelLaunch; + +// g_thread_pool and g_thread_num are global variable, +// assign g_thread_pool by CreateThreadPool, +// and g_thread_num is equal to GetCurrentThreadNum +extern const char *gThreadNum; +extern const char *gThreadPool; + +// args represents the parameters required for operator to run +extern const char *kRunArgs; +extern const char *kRunArgsAddr; + +} // namespace mindspore::lite::micro + +#endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_PARALLEL_H_ diff --git a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.cc b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.cc index e03c7e355a..35378625e5 100644 --- a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.cc +++ b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.cc @@ -14,9 +14,9 @@ * limitations under the License. */ -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h" #include "src/common/log_adapter.h" -#include "micro/coder/log.h" +#include "coder/log.h" #include "nnacl/pooling_parameter.h" namespace mindspore::lite::micro::nnacl { @@ -56,7 +56,7 @@ void NNaclFp32Serializer::CodeStruct(const std::string &name, const SoftmaxParam } void NNaclFp32Serializer::CodeStruct(const std::string &name, const ConvParameter &conv_parameter) { - CodeBaseStruct("ConvParameter", name, conv_parameter.op_parameter_, "{NULL}", conv_parameter.kernel_h_, + CodeBaseStruct("ConvParameter", name, conv_parameter.op_parameter_, "{}", conv_parameter.kernel_h_, conv_parameter.kernel_w_, conv_parameter.stride_h_, conv_parameter.stride_w_, conv_parameter.dilation_h_, conv_parameter.dilation_w_, conv_parameter.pad_u_, conv_parameter.pad_d_, conv_parameter.pad_l_, conv_parameter.pad_r_, conv_parameter.group_, conv_parameter.tile_num_, @@ -101,4 +101,14 @@ void NNaclFp32Serializer::CodeStruct(const std::string &name, const TransposePar transpose_parameter.data_size_); } +void NNaclFp32Serializer::CodeStruct(const std::string &name, const DeQuantArg &de_quant_arg) { + // this clusters is meaningless which will be supported in future + CodeBaseStruct("DeQuantArg", name, de_quant_arg.scale, de_quant_arg.zeroPoint, de_quant_arg.var_corr, + de_quant_arg.mean_corr, "NULL", de_quant_arg.clusters_nums, de_quant_arg.bitNum); +} +void NNaclFp32Serializer::CodeStruct(const std::string &name, const SpliceParameter &splice_parameter) { + CodeArray("splice_context", splice_parameter.context_, splice_parameter.context_dim_, false); + CodeBaseStruct("SpliceParameter", name, splice_parameter.op_parameter_, splice_parameter.context_dim_, + splice_parameter.forward_indexes_dim_, "splice_context", nullptr, splice_parameter.output_dim_); +} } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h index 7128853050..edc6f28ff2 100644 --- a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h +++ b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_fp32_serializer.h @@ -18,7 +18,7 @@ #define MINDSPORE_LITE_MICRO_CODER_OPCODERS_SERIALIZERS_NNACL_FP32_SERIALIZER_H_ #include #include -#include "micro/coder/opcoders/serializers/serializer.h" +#include "coder/opcoders/serializers/serializer.h" #include "nnacl/batchnorm_parameter.h" #include "nnacl/fp32/arithmetic_fp32.h" #include "nnacl/conv_parameter.h" @@ -29,12 +29,14 @@ #include "nnacl/fp32/transpose_fp32.h" #include "nnacl/pooling_parameter.h" #include "nnacl/softmax_parameter.h" +#include "nnacl/splice_parameter.h" +#include "wrapper/fp32/dequant_int8_to_fp32_wrapper.h" namespace mindspore::lite::micro::nnacl { class NNaclFp32Serializer : public Serializer { public: NNaclFp32Serializer() = default; - ~NNaclFp32Serializer() = default; + ~NNaclFp32Serializer() override = default; void CodeStruct(const std::string &name, const PoolingParameter &pooling_parameter); void CodeStruct(const std::string &name, const SoftmaxParameter &softmax_parameter); void CodeStruct(const std::string &name, const BatchNormParameter &batch_norm_parameter); @@ -45,6 +47,8 @@ class NNaclFp32Serializer : public Serializer { void CodeStruct(const std::string &name, const SliceParameter &slice_parameter); void CodeStruct(const std::string &name, const TileParameter &tile_parameter); void CodeStruct(const std::string &name, const TransposeParameter &transpose_parameter); + void CodeStruct(const std::string &name, const DeQuantArg &de_quant_arg); + void CodeStruct(const std::string &name, const SpliceParameter &splice_parameter); }; } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.cc b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.cc index fc0e3093fc..3836781337 100644 --- a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.cc +++ b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.cc @@ -13,10 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h" #include #include "src/common/log_adapter.h" -#include "micro/coder/log.h" +#include "coder/opcoders/parallel.h" +#include "coder/log.h" namespace mindspore::lite::micro::nnacl { @@ -49,15 +50,15 @@ void NNaclInt8Serializer::CodeStruct(const std::string &name, const ConvParamete quant_arg_w, quant_arg_out, real_multiplier, left_shift, right_shift, quant_multiplier, out_act_min, out_act_max, quant_arg.input_arg_num_, quant_arg.filter_arg_num_, quant_arg.output_arg_num_, quant_arg.per_channel_); - - CodeBaseStruct( - "ConvParameter", name, conv_parameter.op_parameter_, conv_quant_arg, conv_parameter.kernel_h_, - conv_parameter.kernel_w_, conv_parameter.stride_h_, conv_parameter.stride_w_, conv_parameter.dilation_h_, - conv_parameter.dilation_w_, conv_parameter.pad_u_, conv_parameter.pad_d_, conv_parameter.pad_l_, - conv_parameter.pad_r_, conv_parameter.group_, conv_parameter.tile_num_, conv_parameter.input_batch_, - conv_parameter.input_h_, conv_parameter.input_w_, conv_parameter.input_channel_, conv_parameter.output_batch_, - conv_parameter.output_h_, conv_parameter.output_w_, conv_parameter.output_channel_, conv_parameter.thread_num_, - conv_parameter.input_unit_, conv_parameter.output_unit_, conv_parameter.pad_mode_, conv_parameter.act_type_); + code << "int thread_num = MSMIN(" << gThreadNum << ", " << conv_parameter.output_h_ << ");\n"; + CodeBaseStruct("ConvParameter", name, conv_parameter.op_parameter_, conv_quant_arg, conv_parameter.kernel_h_, + conv_parameter.kernel_w_, conv_parameter.stride_h_, conv_parameter.stride_w_, + conv_parameter.dilation_h_, conv_parameter.dilation_w_, conv_parameter.pad_u_, conv_parameter.pad_d_, + conv_parameter.pad_l_, conv_parameter.pad_r_, conv_parameter.group_, conv_parameter.tile_num_, + conv_parameter.input_batch_, conv_parameter.input_h_, conv_parameter.input_w_, + conv_parameter.input_channel_, conv_parameter.output_batch_, conv_parameter.output_h_, + conv_parameter.output_w_, conv_parameter.output_channel_, "thread_num", conv_parameter.input_unit_, + conv_parameter.output_unit_, conv_parameter.pad_mode_, conv_parameter.act_type_); } void NNaclInt8Serializer::CodeStruct(const std::string &name, const MatMulParameter &matmul_parameter) { @@ -107,14 +108,14 @@ void NNaclInt8Serializer::CodeStruct(const std::string &name, const PoolingParam << " &" << out_quant_name << "};\n"; CodeBaseStruct("PoolingParameter", name, pooling_parameter.op_parameter_, pooling_parameter.pool_mode_, - pooling_parameter.round_mode_, pooling_parameter.act_type_, pooling_parameter.avg_mode_, - pooling_parameter.global_, pooling_parameter.window_w_, pooling_parameter.window_h_, - pooling_parameter.stride_w_, pooling_parameter.stride_h_, pooling_parameter.input_w_, - pooling_parameter.input_h_, pooling_parameter.input_batch_, pooling_parameter.input_channel_, - pooling_parameter.output_w_, pooling_parameter.output_h_, pooling_parameter.output_batch_, - pooling_parameter.output_channel_, pooling_parameter.pad_u_, pooling_parameter.pad_d_, - pooling_parameter.pad_l_, pooling_parameter.pad_r_, pooling_parameter.op_parameter_.thread_num_, - quant_name, pooling_parameter.quantize_); + pooling_parameter.round_mode_, pooling_parameter.pad_mode_, pooling_parameter.act_type_, + pooling_parameter.avg_mode_, pooling_parameter.global_, pooling_parameter.window_w_, + pooling_parameter.window_h_, pooling_parameter.stride_w_, pooling_parameter.stride_h_, + pooling_parameter.input_w_, pooling_parameter.input_h_, pooling_parameter.input_batch_, + pooling_parameter.input_channel_, pooling_parameter.output_w_, pooling_parameter.output_h_, + pooling_parameter.output_batch_, pooling_parameter.output_channel_, pooling_parameter.pad_u_, + pooling_parameter.pad_d_, pooling_parameter.pad_l_, pooling_parameter.pad_r_, + pooling_parameter.op_parameter_.thread_num_, quant_name, pooling_parameter.quantize_); } void NNaclInt8Serializer::CodeStruct(const std::string &name, const SoftmaxParameter &softmax_parameter) { @@ -122,6 +123,18 @@ void NNaclInt8Serializer::CodeStruct(const std::string &name, const SoftmaxParam ToString(softmax_parameter.input_shape_), softmax_parameter.element_size_, softmax_parameter.n_dim_); } +void NNaclInt8Serializer::CodeStruct(const std::string &name, const SliceParameter &slice_parameter) { + CodeBaseStruct("SliceParameter", name, slice_parameter.op_parameter_, ToString(slice_parameter.shape_), + ToString(slice_parameter.begin_), ToString(slice_parameter.end_), ToString(slice_parameter.size_), + slice_parameter.quant_arg_, slice_parameter.param_length_); +} + +void NNaclInt8Serializer::CodeStruct(const std::string &name, const BatchNormParameter &batchnorm_parameter) { + CodeBaseStruct("BatchNormParameter", name, batchnorm_parameter.op_parameter_, batchnorm_parameter.epsilon_, + batchnorm_parameter.momentum_, batchnorm_parameter.unit_, batchnorm_parameter.units_, + batchnorm_parameter.channel_, batchnorm_parameter.fused_); +} + void NNaclInt8Serializer::CodeStruct(const std::string &name, const SoftmaxQuantArg &softmax_quant_parameter) { CodeBaseStruct("SoftmaxQuantArg", name, softmax_quant_parameter.in_quant_args_, softmax_quant_parameter.out_quant_arg_, softmax_quant_parameter.output_activation_min_, @@ -160,6 +173,14 @@ void NNaclInt8Serializer::CodeStruct(const std::string &name, const ConcatParame concat_parameter.after_axis_size, concat_parameter.count_unit_); } +void NNaclInt8Serializer::CodeStruct(const std::string &name, const ::QuantArg &quant_arg) { + CodeBaseStruct("QuantArg", name, quant_arg.scale_, quant_arg.zp_); +} + +void NNaclInt8Serializer::CodeStruct(const std::string &name, const ::QuantMulArg &quant_mul_arg) { + CodeBaseStruct("QuantMulArg", name, quant_mul_arg.multiplier_, quant_mul_arg.left_shift_, quant_mul_arg.right_shift_); +} + void NNaclInt8Serializer::CodeStruct(const std::string &name, const ReduceQuantArg &reduce_quant_arg) { CodeBaseStruct( "ReduceQuantArg", name, reduce_quant_arg.in_scale_, reduce_quant_arg.in_zp_, reduce_quant_arg.out_scale_, @@ -180,4 +201,25 @@ void NNaclInt8Serializer::CodeStruct(const std::string &name, const MatmulQuantA matmul_quant_arg.right_shift, matmul_quant_arg.quant_multiplier); } +void NNaclInt8Serializer::CodeStruct(const std::string &name, const SubQuantArg &sub_quant_arg) { + CodeBaseStruct("SubQuantArg", name, sub_quant_arg.in0_args_, sub_quant_arg.in1_args_, sub_quant_arg.out_args_, + sub_quant_arg.output_activation_min_, sub_quant_arg.output_activation_max_, + sub_quant_arg.input0_multiplier_, sub_quant_arg.input1_multiplier_, sub_quant_arg.output_multiplier_, + sub_quant_arg.input0_shift_, sub_quant_arg.input1_shift_, sub_quant_arg.output_shift_, + sub_quant_arg.left_shift_result0_, sub_quant_arg.left_shift_result1_, sub_quant_arg.right_shift0_, + sub_quant_arg.right_shift1_, sub_quant_arg.left_shift_out_, sub_quant_arg.right_shift_out_); +} + +void NNaclInt8Serializer::CodeStruct(const std::string &name, const DivQuantArg &div_quant_arg) { + CodeBaseStruct("DivQuantArg", name, div_quant_arg.in0_args_, div_quant_arg.in1_args_, div_quant_arg.out_args_, + div_quant_arg.output_activation_min_, div_quant_arg.output_activation_max_, + div_quant_arg.output_multiplier_, div_quant_arg.output_shift_); +} + +void NNaclInt8Serializer::CodeStruct(const std::string &name, const ReluXQuantArg &relu_quant_arg) { + CodeBaseStruct("ReluXQuantArg", name, relu_quant_arg.input_arg, relu_quant_arg.output_arg, + relu_quant_arg.input_multiplier_, relu_quant_arg.left_shift_, relu_quant_arg.right_shift_, + relu_quant_arg.quantized_output_min, relu_quant_arg.quantized_output_max); +} + } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h index a3c0550edb..daffc55380 100644 --- a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h +++ b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_int8_serializer.h @@ -19,7 +19,8 @@ #include #include "nnacl/pooling_parameter.h" #include "nnacl/softmax_parameter.h" -#include "micro/coder/opcoders/serializers/serializer.h" +#include "coder/opcoders/serializers/serializer.h" +#include "nnacl/op_base.h" #include "nnacl/int8/add_int8.h" #include "nnacl/int8/arithmetic_int8.h" #include "nnacl/conv_parameter.h" @@ -27,6 +28,9 @@ #include "nnacl/int8/concat_int8.h" #include "nnacl/int8/quantize.h" #include "nnacl/reshape_parameter.h" +#include "nnacl/slice_parameter.h" +#include "nnacl/batchnorm_parameter.h" +#include "nnacl/int8/relux_int8.h" namespace mindspore::lite::micro::nnacl { @@ -40,12 +44,19 @@ class NNaclInt8Serializer : public Serializer { void CodeStruct(const std::string &name, const ArithmeticParameter &arithmetic_parameter); void CodeStruct(const std::string &name, const PoolingParameter &pooling_parameter); void CodeStruct(const std::string &name, const SoftmaxParameter &softmax_parameter); + void CodeStruct(const std::string &name, const SliceParameter &slice_parameter); + void CodeStruct(const std::string &name, const BatchNormParameter &batchnorm_parameter); void CodeStruct(const std::string &name, const SoftmaxQuantArg &softmax_quant_parameter); void CodeStruct(const std::string &name, const ConcatParameter &concat_parameter, int input_tensors, int in_shape, int out_shape); + void CodeStruct(const std::string &name, const ::QuantArg &quant_arg); + void CodeStruct(const std::string &name, const ::QuantMulArg &quant_mul_arg); void CodeStruct(const std::string &name, const ReduceQuantArg &reduce_quant_arg); void CodeStruct(const std::string &name, const ReshapeQuantArg &reshape_quant_arg); void CodeStruct(const std::string &name, const MatmulQuantArg &matmul_quant_arg); + void CodeStruct(const std::string &name, const SubQuantArg &sub_quant_arg); + void CodeStruct(const std::string &name, const DivQuantArg &div_quant_arg); + void CodeStruct(const std::string &name, const ReluXQuantArg &relu_quant_arg); }; } // namespace mindspore::lite::micro::nnacl diff --git a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.cc b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.cc new file mode 100644 index 0000000000..9e80249759 --- /dev/null +++ b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.cc @@ -0,0 +1,90 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "nnacl/pooling_parameter.h" +#include "nnacl/slice_parameter.h" +#include "nnacl/softmax_parameter.h" +#include "nnacl/int8/add_int8.h" +#include "nnacl/int8/quantize.h" +#include "coder/opcoders/parallel.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.h" + +namespace mindspore::lite::micro { + +std::ostream &operator<<(std::ostream &code, const ::QuantArg &quant_arg) { + code << "{" << static_cast(quant_arg.scale_) << ", " << quant_arg.zp_ << "}"; + return code; +} + +std::ostream &operator<<(std::ostream &code, const OpParameter &tile) { + code << "{ \"\"" + << ", " << tile.type_ << ", " << gThreadNum << "}"; + return code; +} + +std::ostream &operator<<(std::ostream &code, const AddQuantQrgs &args) { + code << "{" << args.zp_ << ", " << args.left_shift_ << ", " << args.right_shift_ << ", " << args.multiplier_ << "}"; + return code; +} + +std::ostream &operator<<(std::ostream &code, const SliceQuantArg &arg) { + code << "{" << arg.in_args_ << ", " << arg.out_args_ << ", " << arg.output_activation_min_ << ", " + << arg.output_activation_max_ << "}"; + return code; +} + +std::ostream &operator<<(std::ostream &code, PoolMode pool_mode) { + code << "(PoolMode)" + << "(" << static_cast(pool_mode) << ")"; + return code; +} + +std::ostream &operator<<(std::ostream &code, RoundMode round_mode) { + code << "(RoundMode)" + << "(" << static_cast(round_mode) << ")"; + return code; +} + +std::ostream &operator<<(std::ostream &code, RoundingMode rounding_mode) { + code << "(RoundingMode)" + << "(" << static_cast(rounding_mode) << ")"; + return code; +} + +std::ostream &operator<<(std::ostream &code, PadMode pad_mode) { + code << "(PadMode)" + << "(" << static_cast(pad_mode) << ")"; + return code; +} + +std::ostream &operator<<(std::ostream &code, ActType act_type) { + code << "(ActType)" + << "(" << static_cast(act_type) << ")"; + return code; +} + +std::ostream &operator<<(std::ostream &code, DataOrder data_order) { + if (data_order == RowMajor) { + code << "RowMajor"; + } else { + code << "ColMajor"; + } + return code; +} + +} // namespace mindspore::lite::micro diff --git a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.h b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.h index 3d59511609..6f6f1559dd 100644 --- a/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.h +++ b/mindspore/lite/micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.h @@ -19,65 +19,31 @@ #include #include #include "nnacl/pooling_parameter.h" +#include "nnacl/slice_parameter.h" #include "nnacl/softmax_parameter.h" #include "nnacl/int8/add_int8.h" #include "nnacl/int8/quantize.h" namespace mindspore::lite::micro { -inline std::ostream &operator<<(std::ostream &code, const ::QuantArg &quant_arg) { - code << "{" << static_cast(quant_arg.scale_) << ", " << quant_arg.zp_ << "}"; - return code; -} +std::ostream &operator<<(std::ostream &code, const ::QuantArg &quant_arg); -inline std::ostream &operator<<(std::ostream &code, const OpParameter &tile) { - code << "{ \"\"" - << ", " << tile.type_ << ", " << tile.thread_num_ << "}"; - return code; -} +std::ostream &operator<<(std::ostream &code, const OpParameter &tile); -inline std::ostream &operator<<(std::ostream &code, const AddQuantQrgs &args) { - code << "{" << args.zp_ << ", " << args.left_shift_ << ", " << args.right_shift_ << ", " << args.multiplier_ << "}"; - return code; -} +std::ostream &operator<<(std::ostream &code, const AddQuantQrgs &args); -inline std::ostream &operator<<(std::ostream &code, PoolMode pool_mode) { - code << "(PoolMode)" - << "(" << static_cast(pool_mode) << ")"; - return code; -} +std::ostream &operator<<(std::ostream &code, const SliceQuantArg &arg); -inline std::ostream &operator<<(std::ostream &code, RoundMode round_mode) { - code << "(RoundMode)" - << "(" << static_cast(round_mode) << ")"; - return code; -} +std::ostream &operator<<(std::ostream &code, PoolMode pool_mode); -inline std::ostream &operator<<(std::ostream &code, RoundingMode rounding_mode) { - code << "(RoundingMode)" - << "(" << static_cast(rounding_mode) << ")"; - return code; -} +std::ostream &operator<<(std::ostream &code, RoundMode round_mode); -inline std::ostream &operator<<(std::ostream &code, PadMode pad_mode) { - code << "(PadMode)" - << "(" << static_cast(pad_mode) << ")"; - return code; -} +std::ostream &operator<<(std::ostream &code, RoundingMode rounding_mode); -inline std::ostream &operator<<(std::ostream &code, ActType act_type) { - code << "(ActType)" - << "(" << static_cast(act_type) << ")"; - return code; -} +std::ostream &operator<<(std::ostream &code, PadMode pad_mode); -inline std::ostream &operator<<(std::ostream &code, DataOrder data_order) { - if (data_order == RowMajor) { - code << "RowMajor"; - } else { - code << "ColMajor"; - } - return code; -} +std::ostream &operator<<(std::ostream &code, ActType act_type); + +std::ostream &operator<<(std::ostream &code, DataOrder data_order); } // namespace mindspore::lite::micro #endif // MINDSPORE_LITE_MICRO_CODER_OPCODERS_SERIALIZERS_NNACL_STREAM_UTILS_H_ diff --git a/mindspore/lite/micro/coder/opcoders/serializers/serializer.h b/mindspore/lite/micro/coder/opcoders/serializers/serializer.h index dd844da5f3..6a5401c1d2 100644 --- a/mindspore/lite/micro/coder/opcoders/serializers/serializer.h +++ b/mindspore/lite/micro/coder/opcoders/serializers/serializer.h @@ -20,8 +20,8 @@ #include #include #include -#include "micro/coder/allocator/allocator.h" -#include "micro/coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.h" +#include "coder/allocator/allocator.h" +#include "coder/opcoders/serializers/nnacl_serializer/nnacl_stream_utils.h" namespace mindspore::lite::micro { @@ -121,6 +121,10 @@ class Serializer { template void CodeMallocExpression(T t, size_t size) { + if (size == 0) { + MS_LOG(ERROR) << "CodeMallocExpression size is zero"; + exit(1); + } GenCode(t); code << " = malloc(" << size << ");\n"; code << "if ("; diff --git a/mindspore/lite/micro/coder/operator_library/CMakeLists.txt b/mindspore/lite/micro/coder/operator_library/CMakeLists.txt new file mode 100644 index 0000000000..c86d5f7fd3 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/CMakeLists.txt @@ -0,0 +1,49 @@ +option(MICRO_CMSIS_X86 "build for CMSIS x86" ON) +option(ENABLE_ASAN "enable asan" OFF) +option(CMAKE_BUILD_TYPE "debug or release" Debug) + +if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") + set(DEBUG_MODE "Debug") +endif() + +set(CMAKE_C_FLAGS "${CMAKE_ENABLE_C99} ${CMAKE_C_FLAGS}") +if("${CMAKE_BUILD_TYPE}" STREQUAL "Debug") + set(CMAKE_C_FLAGS "-DDebug -g -fvisibility=default ${CMAKE_C_FLAGS}") +else() + set(CMAKE_C_FLAGS "-fPIC -fPIE -Werror -O3 -fstack-protector-strong -fomit-frame-pointer ${CMAKE_C_FLAGS}") + set(CMAKE_C_FLAGS_Release "${CMAKE_C_FLAGS_Release} -O3 -ffunction-sections -fdata-sections") + string(REPLACE "-g" "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") +endif() + +if(ENABLE_ASAN) + set(CMAKE_C_FLAGS "${CMAKE_CXX_FLAGS} ${OPTION_CXX_FLAGS} -lasan") +endif() + +set(MICRO_CMAKE_PATH ${MICRO_DIR}/cmake) +set(OPERATOR_LIBRARY_PATH ${CMAKE_BINARY_DIR}/operator_library) +set(HEADER_PATH "${OPERATOR_LIBRARY_PATH}/include") +set(LIB_PATH "${OPERATOR_LIBRARY_PATH}/lib/x86") + +message("===========>start to pack operators' head file") +file(REMOVE_RECURSE ${OPERATOR_LIBRARY_PATH}) +file(MAKE_DIRECTORY ${OPERATOR_LIBRARY_PATH}) +file(INSTALL ${LITE_DIR}/nnacl DESTINATION ${HEADER_PATH} FILES_MATCHING PATTERN "*.h") +file(INSTALL ${MICRO_DIR}/coder/operator_library/wrapper DESTINATION ${HEADER_PATH} FILES_MATCHING PATTERN "*.h") +file(INSTALL ${CMAKE_BINARY_DIR}/cmsis/CMSIS/Core/Include DESTINATION ${HEADER_PATH}/CMSIS/Core) +file(INSTALL ${CMAKE_BINARY_DIR}/cmsis/CMSIS/DSP/Include DESTINATION ${HEADER_PATH}/CMSIS/DSP) +file(INSTALL ${CMAKE_BINARY_DIR}/cmsis/CMSIS/NN/Include DESTINATION ${HEADER_PATH}/CMSIS/NN) +file(REMOVE_RECURSE ${HEADER_PATH}/nnacl/assembly) +file(REMOVE_RECURSE ${HEADER_PATH}/nnacl/fp16) +file(REMOVE_RECURSE ${HEADER_PATH}/nnacl/fp16_grad) +file(REMOVE_RECURSE ${HEADER_PATH}/nnacl/fp32_grad) +file(REMOVE_RECURSE ${HEADER_PATH}/nnacl/intrinsics) +file(REMOVE_RECURSE ${HEADER_PATH}/nnacl/optimize) + +include(${MICRO_CMAKE_PATH}/package_android.cmake) +include(${MICRO_CMAKE_PATH}/package_nnacl.cmake) +include(${MICRO_CMAKE_PATH}/package_cmsis.cmake) +include(${MICRO_CMAKE_PATH}/package_wrapper.cmake) + +# generate static library +add_library(ops STATIC ${NNACL_OPS} ${CMSIS_OPS} ${WRAPPER_SRC} ${RUNTIME_SRC}) +install(TARGETS ops ARCHIVE DESTINATION ${LIB_PATH}) diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/base/detection_post_process_base_wrapper.c b/mindspore/lite/micro/coder/operator_library/wrapper/base/detection_post_process_base_wrapper.c new file mode 100644 index 0000000000..1b77b1c8df --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/base/detection_post_process_base_wrapper.c @@ -0,0 +1,71 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wrapper/base/detection_post_process_base_wrapper.h" +#include + +static inline void swap_index(int *arr, int lhs, int rhs) { + int temp = arr[lhs]; + arr[lhs] = arr[rhs]; + arr[rhs] = temp; +} + +static inline bool compare(int i, int j, const float *scores) { + if (scores[i] == scores[j]) { + return i < j; + } + return scores[i] > scores[j]; +} + +static void heapify(const float *scores, int *indexes, int n, int i) { + while (i < n) { + int cur = i; + int l = 2 * i + 1; + int r = 2 * i + 2; + if (r < n && compare(indexes[cur], indexes[r], scores)) { + cur = r; + } + if (l < n && compare(indexes[cur], indexes[l], scores)) { + cur = l; + } + if (cur != i) { + swap_index(indexes, i, cur); + i = cur; + } else { + break; + } + } +} + +void PartialArgSort(const float *scores, int *indexes, int num_to_sort, int num_values) { + // make heap + int start_index = num_to_sort / 2 - 1; + for (int i = start_index; i >= 0; i--) { + heapify(scores, indexes, num_to_sort, i); + } + // compare the rest elements with heap top + for (int i = num_to_sort; i < num_values; ++i) { + if (!compare(indexes[0], indexes[i], scores)) { + swap_index(indexes, i, 0); + heapify(scores, indexes, num_to_sort, 0); + } + } + // heap sort + for (int cur_length = num_to_sort - 1; cur_length > 0; cur_length--) { + swap_index(indexes, 0, cur_length); + heapify(scores, indexes, cur_length, 0); + } +} diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/base/detection_post_process_base_wrapper.h b/mindspore/lite/micro/coder/operator_library/wrapper/base/detection_post_process_base_wrapper.h new file mode 100644 index 0000000000..badec6c043 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/base/detection_post_process_base_wrapper.h @@ -0,0 +1,24 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_BASE_DETECTION_POST_PROCESS_BASE_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_BASE_DETECTION_POST_PROCESS_BASE_WRAPPER_H_ + +#include + +void PartialArgSort(const float *scores, int *indexes, int num_to_sort, int num_values); + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_BASE_DETECTION_POST_PROCESS_BASE_WRAPPER_H_ diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/fp32/dequant_int8_to_fp32_wrapper.c b/mindspore/lite/micro/coder/operator_library/wrapper/fp32/dequant_int8_to_fp32_wrapper.c new file mode 100644 index 0000000000..e9c6a01f54 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/fp32/dequant_int8_to_fp32_wrapper.c @@ -0,0 +1,69 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wrapper/fp32/dequant_int8_to_fp32_wrapper.h" +#include +#include +void DequantDataPerChannel(const int8_t *quant_src, const DeQuantArg **de_quant_args, size_t de_quant_nums, + size_t per_batch_size, float *de_quant_dst) { + size_t matrix_size = de_quant_nums / per_batch_size; + for (int i = 0; i < per_batch_size; i++) { + const DeQuantArg *de_quant_arg = de_quant_args[i]; + float scale = de_quant_arg->scale; + int32_t zero_point = de_quant_arg->zeroPoint; + for (int j = 0; j < matrix_size; j++) { + de_quant_dst[i * matrix_size + j] = (quant_src[i * matrix_size + j] - zero_point) * scale; + } + } +} + +void DequantData(const int8_t *quant_src, const DeQuantArg **de_quant_args, size_t de_quant_nums, size_t channels, + float *de_quant_dst) { + size_t per_channel_size = de_quant_nums / channels; + for (size_t i = 0; i < channels; i++) { + const DeQuantArg *de_quant_arg = de_quant_args[i]; + float scale = de_quant_arg->scale; + int32_t zero_point = de_quant_arg->zeroPoint; + float var_corr = de_quant_arg->var_corr; + float mean_corr = de_quant_arg->mean_corr; + if (var_corr < 0 || var_corr > 10) { + var_corr = 1; + } + for (size_t j = 0; j < per_channel_size; j++) { + float dequant_data = (quant_src[per_channel_size * i + j] - zero_point) * scale; + de_quant_dst[per_channel_size * i + j] = dequant_data * var_corr + mean_corr; + } + } +} + +void DequantDataPerTensor(const int8_t *quant_src, const DeQuantArg **de_quant_args, size_t de_quant_nums, + float *de_quant_dst) { + const DeQuantArg *de_quant_arg = de_quant_args[0]; + float *quant_clusters = de_quant_arg->clusters; + float scale = de_quant_arg->scale; + int32_t zero_point = de_quant_arg->zeroPoint; + for (int j = 0; j < de_quant_nums; j++) { + int8_t quant_data = quant_src[j]; + if (quant_clusters) { + if (quant_data > INT8_MAX || quant_data < INT8_MIN) { + return; + } + de_quant_dst[j] = quant_clusters[quant_data - INT8_MIN]; + } else { + de_quant_dst[j] = (quant_data - zero_point) * scale; + } + } +} diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/fp32/dequant_int8_to_fp32_wrapper.h b/mindspore/lite/micro/coder/operator_library/wrapper/fp32/dequant_int8_to_fp32_wrapper.h new file mode 100644 index 0000000000..ad581e0537 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/fp32/dequant_int8_to_fp32_wrapper.h @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_FP32_DEQUANT_TO_INT8_FP32_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_FP32_DEQUANT_TO_INT8_FP32_WRAPPER_H_ +#include +#include +typedef struct DeQuantArg { + float scale; + int32_t zeroPoint; + float var_corr; + float mean_corr; + float *clusters; + int clusters_nums; + int bitNum; +} DeQuantArg; + +#ifdef __cplusplus +extern "C" { +#endif + +void DequantDataPerChannel(const int8_t *quant_src, const DeQuantArg **de_quant_args, size_t de_quant_nums, + size_t per_batch_size, float *de_quant_dst); + +void DequantData(const int8_t *quant_src, const DeQuantArg **de_quant_args, size_t de_quant_nums, size_t channels, + float *de_quant_dst); + +void DequantDataPerTensor(const int8_t *quant_src, const DeQuantArg **de_quant_args, size_t de_quant_nums, + float *de_quant_dst); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_FP32_DEQUANT_TO_INT8_FP32_WRAPPER_H_ diff --git a/mindspore/lite/micro/wrapper/fp32/matmul_fp32_wrapper.c b/mindspore/lite/micro/coder/operator_library/wrapper/fp32/matmul_fp32_wrapper.c similarity index 100% rename from mindspore/lite/micro/wrapper/fp32/matmul_fp32_wrapper.c rename to mindspore/lite/micro/coder/operator_library/wrapper/fp32/matmul_fp32_wrapper.c diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/fp32/matmul_fp32_wrapper.h b/mindspore/lite/micro/coder/operator_library/wrapper/fp32/matmul_fp32_wrapper.h new file mode 100644 index 0000000000..89fba5e3a4 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/fp32/matmul_fp32_wrapper.h @@ -0,0 +1,33 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_FP32_MATMUL_FP32_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_FP32_MATMUL_FP32_WRAPPER_H_ +#include +#include "nnacl/fp32/matmul_fp32.h" +#ifdef __cplusplus +extern "C" { +#endif + +void InitMatrixA(const float *src_ptr, float *dst_ptr, const MatMulParameter *params_, bool is_vector_a); + +void InitMatrixB(const float *src_ptr, float *dst_ptr, const MatMulParameter *params_, bool is_vector_a); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_FP32_MATMUL_FP32_WRAPPER_H_ diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/add_int8_wrapper.c b/mindspore/lite/micro/coder/operator_library/wrapper/int8/add_int8_wrapper.c new file mode 100644 index 0000000000..c5653e5a39 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/add_int8_wrapper.c @@ -0,0 +1,69 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wrapper/int8/add_int8_wrapper.h" +#include "nnacl/errorcode.h" + +int AddBroadcastInt8Run(void *cdata, int task_id) { + AddInt8Args *args = (AddInt8Args *)(cdata); + int stride = UP_DIV(args->out_size_, args->thread_count_); + int real_out_count = MSMIN(stride, args->out_size_ - stride * task_id); + if (real_out_count <= 0) { + return NNACL_OK; + } + int8_t *cur_in0 = NULL; + int8_t *cur_in1 = NULL; + int8_t *cur_out = NULL; + for (int i = 0; i < real_out_count; i++) { + if (args->arith_para_->in_elements_num0_ == args->arith_para_->out_elements_num_) { + cur_in0 = args->input0_data_ + task_id * stride * args->in_size_ + i * args->in_size_; + cur_in1 = args->input1_data_; + cur_out = args->output_data_ + task_id * stride * args->in_size_ + i * args->in_size_; + } else { + cur_in0 = args->input0_data_; + cur_in1 = args->input1_data_ + task_id * stride * args->in_size_ + i * args->in_size_; + cur_out = args->output_data_ + task_id * stride * args->in_size_ + i * args->in_size_; + } + AddInt8(cur_in0, cur_in1, cur_out, args->in_size_, args->para_); + } + return NNACL_OK; +} + +int AddInt8Run(void *cdata, int task_id) { + AddInt8Args *args = (AddInt8Args *)(cdata); + /* no need broadcast */ + int stride = UP_DIV(args->elements_num_, args->thread_count_); + int rest_count = args->elements_num_ - task_id * stride; + int real_count = MSMIN(stride, rest_count); + if (real_count <= 0) { + return NNACL_OK; + } + int8_t *cur_in0 = args->input0_data_ + stride * task_id; + int8_t *cur_in1 = args->input1_data_ + stride * task_id; + int8_t *cur_out = args->output_data_ + stride * task_id; + if (args->support_opt_add_) { + int8_t *ptr_in = args->arith_para_->in_elements_num0_ == 1 ? cur_in1 : cur_in0; + int8_t element_in = args->arith_para_->in_elements_num0_ == 1 ? args->input0_data_[0] : args->input1_data_[0]; + AddQuantQrgs *ptr_args = + args->arith_para_->in_elements_num0_ == 1 ? &args->para_->in1_args_ : &args->para_->in0_args_; + AddQuantQrgs *ele_args = + args->arith_para_->in_elements_num0_ == 1 ? &args->para_->in0_args_ : &args->para_->in1_args_; + AddOptInt8(ptr_in, element_in, cur_out, rest_count, args->para_, ptr_args, ele_args); + } else { + AddInt8(cur_in0, cur_in1, cur_out, rest_count, args->para_); + } + return NNACL_OK; +} diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/add_int8_wrapper.h b/mindspore/lite/micro/coder/operator_library/wrapper/int8/add_int8_wrapper.h new file mode 100644 index 0000000000..791ee2cf57 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/add_int8_wrapper.h @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_ADD_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_ADD_INT8_WRAPPER_H_ +#include +#include "nnacl/int8/matmul_int8.h" +#include "nnacl/int8/add_int8.h" +#include "nnacl/arithmetic.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + AddQuantParameter *para_; + ArithmeticParameter *arith_para_; + int in_size_; + int out_size_; + int thread_count_; + int elements_num_; + bool support_opt_add_; + int8_t *input0_data_; + int8_t *input1_data_; + int8_t *output_data_; +} AddInt8Args; + +int AddBroadcastInt8Run(void *cdata, int task_id); + +int AddInt8Run(void *cdata, int task_id); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_ADD_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/batchnorm_int8_wrapper.c b/mindspore/lite/micro/coder/operator_library/wrapper/int8/batchnorm_int8_wrapper.c new file mode 100644 index 0000000000..93048138b6 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/batchnorm_int8_wrapper.c @@ -0,0 +1,25 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wrapper/int8/batchnorm_int8_wrapper.h" +#include "nnacl/int8/batchnorm_int8.h" +#include "nnacl/errorcode.h" + +int BatchNormInt8Run(void *cdata, int task_id) { + BatchNormArgs *args = (BatchNormArgs *)(cdata); + BatchNormInt8(args->out_addr_, args->in_addr_, args->alpha_addr_, args->beta_addr_, task_id, args->batchnorm_param_); + return NNACL_OK; +} diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/batchnorm_int8_wrapper.h b/mindspore/lite/micro/coder/operator_library/wrapper/int8/batchnorm_int8_wrapper.h new file mode 100644 index 0000000000..a3b3c87637 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/batchnorm_int8_wrapper.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_BATCHNORM_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_BATCHNORM_INT8_WRAPPER_H_ + +#include +#include "nnacl/batchnorm_parameter.h" +typedef struct BatchNormArgs { + int8_t *in_addr_; + int8_t *out_addr_; + float *alpha_addr_; + float *beta_addr_; + BatchNormParameter *batchnorm_param_; +} BatchNormArgs; + +int BatchNormInt8Run(void *cdata, int task_id); + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_BATCHNORM_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/concat_int8_wrapper.c b/mindspore/lite/micro/coder/operator_library/wrapper/int8/concat_int8_wrapper.c new file mode 100644 index 0000000000..b81643113e --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/concat_int8_wrapper.c @@ -0,0 +1,27 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wrapper/int8/concat_int8_wrapper.h" + +int ConcatInt8Run(void *cdata, int task_id) { + ConcatInt8Args *args = (ConcatInt8Args *)cdata; + int64_t real_dst_count = MSMIN(args->before_axis_size_ - task_id * args->count_unit_, args->count_unit_); + if (real_dst_count <= 0) { + return NNACL_OK; + } + Int8Concat(args->inputs_, args->output_, args->para_, args->axis_, real_dst_count, task_id); + return NNACL_OK; +} diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/concat_int8_wrapper.h b/mindspore/lite/micro/coder/operator_library/wrapper/int8/concat_int8_wrapper.h new file mode 100644 index 0000000000..f7c8fc4e20 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/concat_int8_wrapper.h @@ -0,0 +1,35 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_INT8_CONCAT_WRAPPER_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_INT8_CONCAT_WRAPPER_INT8_WRAPPER_H_ + +#include "nnacl/errorcode.h" +#include "nnacl/concat_parameter.h" +#include "nnacl/int8/concat_int8.h" + +typedef struct { + int8_t **inputs_; + int8_t *output_; + ConcatParameter *para_; + int axis_; + int64_t before_axis_size_; + int64_t count_unit_; +} ConcatInt8Args; + +int ConcatInt8Run(void *cdata, int task_id); + +#endif // MINDSPORE_LITE_MICRO_INT8_CONCAT_WRAPPER_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_init_int8_wrapper.c b/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_init_int8_wrapper.c new file mode 100644 index 0000000000..5f30d4bd10 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_init_int8_wrapper.c @@ -0,0 +1,90 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wrapper/int8/conv1x1_init_int8_wrapper.h" +#include +#include "nnacl/int8/matmul_int8.h" +#include "nnacl/errorcode.h" + +int Conv1x1Init(int8_t *src_weight, int32_t *src_bias, int32_t *filter_zps, int32_t input_channel, + int32_t output_channel, int32_t input_zp, bool support_optimize, bool filter_peroc, + int8_t **packed_weight, int32_t **bias_data) { + if (packed_weight == NULL || bias_data == NULL) { + return NNACL_ERR; + } +#ifdef ENABLE_ARM32 + /* InitWeightBiasArm32 */ + /* weight */ + size_t size = UP_ROUND(input_channel, C16NUM) * UP_ROUND(output_channel, C2NUM) * sizeof(int8_t); + int8_t *packed_weight_ = (int8_t *)(malloc(size)); + if (packed_weight_ == NULL) { + return NNACL_ERR; + } + memset(packed_weight_, 0, size); + RowMajor2Row2x16MajorInt8(src_weight, packed_weight_, output_channel, input_channel); + /* bias */ + size = UP_ROUND(output_channel, C2NUM); + int32_t *bias_data_ = (int32_t *)malloc(size * sizeof(int32_t)); + if (bias_data_ == NULL) { + free(packed_weight_); + return NNACL_ERR; + } + memset(bias_data_, 0, size * sizeof(int32_t)); + if (src_bias != NULL) { + memcpy(bias_data_, src_bias, output_channel * sizeof(int32_t)); + } +#else + /* InitWeightBias */ + /* weight */ + size_t size = support_optimize ? UP_ROUND(input_channel, C4NUM) * UP_ROUND(output_channel, C16NUM) * sizeof(int8_t) + : UP_ROUND(input_channel, C16NUM) * UP_ROUND(output_channel, C4NUM) * sizeof(int8_t); + int8_t *packed_weight_ = (int8_t *)(malloc(size)); + if (packed_weight_ == NULL) { + return NNACL_ERR; + } + memset(packed_weight_, 0, size); + if (support_optimize) { + RowMajor2Row4x16MajorInt8(src_weight, packed_weight_, output_channel, input_channel); + } else { + RowMajor2Row16x4MajorInt8(src_weight, packed_weight_, output_channel, input_channel); + } + /* bias */ + size = support_optimize ? UP_ROUND(output_channel, C16NUM) : UP_ROUND(output_channel, C4NUM); + int32_t *bias_data_ = (int32_t *)malloc(size * sizeof(int32_t)); + if (bias_data_ == NULL) { + free(packed_weight_); + return NNACL_ERR; + } + memset(bias_data_, 0, size * sizeof(int32_t)); + if (src_bias != NULL) { + memcpy(bias_data_, src_bias, output_channel * sizeof(int32_t)); + } +#endif + /* InitBiasByzp */ + /* bias = bias - v2 x zp1 + zp1 x zp2 */ + for (int oc = 0; oc < output_channel; oc++) { + int32_t weight_sum_value = 0; + int32_t filter_zp = (filter_peroc) ? filter_zps[oc] : filter_zps[0]; + for (int ic = 0; ic < input_channel; ic++) { + weight_sum_value += src_weight[oc * input_channel + ic]; + } + bias_data_[oc] += filter_zp * input_zp * input_channel - weight_sum_value * input_zp; + } + + *packed_weight = packed_weight_; + *bias_data = bias_data_; + return NNACL_OK; +} diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_init_int8_wrapper.h b/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_init_int8_wrapper.h new file mode 100644 index 0000000000..72380ba892 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_init_int8_wrapper.h @@ -0,0 +1,28 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV1X1_INIT_INT8_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV1X1_INIT_INT8_H_ + +#include +#include +#include "nnacl/conv_parameter.h" + +int Conv1x1Init(int8_t *src_weight, int32_t *src_bias, int32_t *filter_zps, int32_t input_channel, + int32_t output_channel, int32_t input_zp, bool support_optimize, bool filter_peroc, + int8_t **packed_weight, int32_t **bias_data); + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV1X1_INIT_INT8_H_ diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_run_int8_wrapper.c b/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_run_int8_wrapper.c new file mode 100644 index 0000000000..4560c4d49f --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_run_int8_wrapper.c @@ -0,0 +1,225 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wrapper/int8/conv1x1_run_int8_wrapper.h" +#include "nnacl/base/conv1x1_base.h" +#include "nnacl/int8/matmul_int8.h" +#include "nnacl/int8/pack_int8.h" +#include "nnacl/int8/conv1x1_int8.h" +#include "nnacl/errorcode.h" + +void Pre1x1Trans(Conv1x1Args *args, int8_t *src_input, int8_t *src_output) { + args->output_ptr_ = src_output; + if (args->pre_trans_input_) { + Conv1x1InputPack(src_input, args->input_ptr_, args->conv_param_, sizeof(int8_t)); + } else { + args->input_ptr_ = src_input; + } +} + +int OcOptPre(void *cdata, int task_id) { + Conv1x1Args *args = (Conv1x1Args *)(cdata); + int cur_stride = args->thread_stride_hw_ * C4NUM; + int res_stride = args->matmul_param_->row_ - task_id * args->thread_stride_hw_ * C4NUM; + int cur_hw = MSMIN(cur_stride, res_stride); + if (cur_hw <= 0) { + return NNACL_OK; + } + int8_t *hw_in = args->input_ptr_ + task_id * args->thread_stride_hw_ * C4NUM * args->conv_param_->input_channel_; + int8_t *hw_packed_in = args->packed_input_ + task_id * args->thread_stride_hw_ * C4NUM * args->matmul_param_->deep_4_; + int32_t *hw_input_sum = args->input_sum_ + task_id * args->thread_stride_hw_ * C4NUM; + + if (args->filter_peroc_) { + PackInput4x4AndInputSumPert(hw_in, hw_packed_in, hw_input_sum, args->matmul_param_->deep_, cur_hw, 1); + } else { + PackInput4x4AndInputSumPert(hw_in, hw_packed_in, hw_input_sum, args->matmul_param_->deep_, cur_hw, + args->conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_); + } + return NNACL_OK; +} + +int RunArm64OptOc(void *cdata, int task_id) { + Conv1x1Args *args = (Conv1x1Args *)(cdata); + int stride = args->thread_stride_oc_ * C16NUM; + int cur_stride = task_id * stride; + int res_stride = args->matmul_param_->col_ - cur_stride; + int cur_oc = MSMIN(stride, res_stride); + if (cur_oc <= 0) { + return NNACL_OK; + } + + bool filter_peroc = args->filter_peroc_; + int32_t *cur_left_shift = + filter_peroc ? args->left_shift_ + cur_stride : args->conv_param_->conv_quant_arg_.left_shift_; + int32_t *cur_right_shift = + filter_peroc ? args->right_shift_ + cur_stride : args->conv_param_->conv_quant_arg_.right_shift_; + int32_t *cur_multiplier = + filter_peroc ? args->multiplier_ + cur_stride : args->conv_param_->conv_quant_arg_.quant_multiplier_; + int32_t *cur_zp = filter_peroc ? args->filter_zp_ptr_ + cur_stride : args->filter_zp_ptr_; + + Conv1x1Int8Opt(args->packed_input_, args->packed_weight_ + cur_stride * args->matmul_param_->deep_4_, + args->output_ptr_ + cur_stride, args->input_sum_, args->bias_data_ + cur_stride, + args->matmul_param_->row_, cur_oc, args->matmul_param_->deep_4_, cur_left_shift, cur_right_shift, + cur_multiplier, args->conv_param_, args->matmul_func_, cur_zp); + return NNACL_OK; +} + +int RunArmOc(void *cdata, int task_id) { + Conv1x1Args *args = (Conv1x1Args *)(cdata); +#ifdef ENABLE_ARM32 + int col_tile = C2NUM; +#else + int col_tile = C4NUM; +#endif + int stride = args->thread_stride_oc_ * col_tile; + int cur_stride = task_id * stride; + int res_stride = args->matmul_param_->col_ - cur_stride; + int cur_oc = MSMIN(stride, res_stride); + if (cur_oc <= 0) { + return NNACL_OK; + } + + bool filter_peroc = args->filter_peroc_; + int32_t *cur_left_shift = + filter_peroc ? args->left_shift_ + cur_stride : args->conv_param_->conv_quant_arg_.left_shift_; + int32_t *cur_right_shift = + filter_peroc ? args->right_shift_ + cur_stride : args->conv_param_->conv_quant_arg_.right_shift_; + int32_t *cur_multiplier = + filter_peroc ? args->multiplier_ + cur_stride : args->conv_param_->conv_quant_arg_.quant_multiplier_; + int32_t *cur_zp = filter_peroc ? args->filter_zp_ptr_ + cur_stride : args->filter_zp_ptr_; + + Conv1x1Int8(args->packed_input_, args->packed_weight_ + cur_stride * args->matmul_param_->deep_16_, + args->output_ptr_ + cur_stride, args->input_sum_, args->bias_data_ + cur_stride, + args->matmul_param_->row_, cur_oc, args->matmul_param_->deep_16_, cur_left_shift, cur_right_shift, + cur_multiplier, args->conv_param_, cur_zp); + return NNACL_OK; +} + +int RunArm64OptHw(void *cdata, int task_id) { + Conv1x1Args *args = (Conv1x1Args *)(cdata); + int cur_stride = args->thread_stride_hw_ * C4NUM; + int res_stride = args->matmul_param_->row_ - task_id * args->thread_stride_hw_ * C4NUM; + int cur_hw = MSMIN(cur_stride, res_stride); + if (cur_hw <= 0) { + return NNACL_OK; + } + int8_t *hw_in = args->input_ptr_ + task_id * args->thread_stride_hw_ * C4NUM * args->conv_param_->input_channel_; + int8_t *hw_out = args->output_ptr_ + task_id * args->thread_stride_hw_ * C4NUM * args->conv_param_->output_channel_; + int8_t *hw_packed_in = args->packed_input_ + task_id * args->thread_stride_hw_ * C4NUM * args->matmul_param_->deep_4_; + int32_t *hw_input_sum = args->input_sum_ + task_id * args->thread_stride_hw_ * C4NUM; + + if (args->filter_peroc_) { + PackInput4x4AndInputSumPert(hw_in, hw_packed_in, hw_input_sum, args->matmul_param_->deep_, cur_hw, 1); + } else { + PackInput4x4AndInputSumPert(hw_in, hw_packed_in, hw_input_sum, args->matmul_param_->deep_, cur_hw, + args->conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_); + } + + Conv1x1Int8Opt(hw_packed_in, args->packed_weight_, hw_out, hw_input_sum, args->bias_data_, cur_hw, + args->matmul_param_->col_, args->matmul_param_->deep_4_, args->left_shift_, args->right_shift_, + args->multiplier_, args->conv_param_, args->matmul_func_, args->filter_zp_ptr_); + return NNACL_OK; +} + +int RunArmHw(void *cdata, int task_id) { + Conv1x1Args *args = (Conv1x1Args *)(cdata); + int cur_stride = args->thread_stride_hw_ * C4NUM; + int res_stride = args->matmul_param_->row_ - task_id * args->thread_stride_hw_ * C4NUM; + int cur_hw = MSMIN(cur_stride, res_stride); + if (cur_hw <= 0) { + return NNACL_OK; + } + + int8_t *hw_in = args->input_ptr_ + task_id * args->thread_stride_hw_ * C4NUM * args->conv_param_->input_channel_; + int8_t *hw_out = args->output_ptr_ + task_id * args->thread_stride_hw_ * C4NUM * args->conv_param_->output_channel_; + int8_t *hw_packed_in = + args->packed_input_ + task_id * args->thread_stride_hw_ * C4NUM * args->matmul_param_->deep_16_; + int32_t *hw_input_sum = args->input_sum_ + task_id * args->thread_stride_hw_ * C4NUM; + + RowMajor2Row16x4MajorInt8(hw_in, hw_packed_in, cur_hw, args->matmul_param_->deep_); + + if (args->filter_peroc_) { + PackInputSum16x4PerLayer(hw_packed_in, hw_input_sum, 1, UP_ROUND(cur_hw, C4NUM), args->matmul_param_->deep_16_); + } else { + PackInputSum16x4PerLayer(hw_packed_in, hw_input_sum, args->conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_, + UP_ROUND(cur_hw, C4NUM), args->matmul_param_->deep_16_); + } + + Conv1x1Int8(hw_packed_in, args->packed_weight_, hw_out, hw_input_sum, args->bias_data_, cur_hw, + args->matmul_param_->col_, args->matmul_param_->deep_16_, args->left_shift_, args->right_shift_, + args->multiplier_, args->conv_param_, args->filter_zp_ptr_); + return NNACL_OK; +} + +void Conv1x1Run(int8_t *src_in, Conv1x1Args *args, int8_t *src_out) { + int row_pack_count = C4NUM; + int col_pack_count; + +#ifdef ENABLE_ARM32 + col_pack_count = C2NUM; +#else + if (args->support_optimize_) { + col_pack_count = C16NUM; + } else { + col_pack_count = C4NUM; + } +#endif + int thread_num = 1; + int hw_thread_count = UP_DIV(args->matmul_param_->row_, row_pack_count); + int oc_thread_count = UP_DIV(args->matmul_param_->col_, col_pack_count); + size_t thread_count_hw = MSMIN(thread_num, hw_thread_count); + args->thread_stride_hw_ = UP_DIV(hw_thread_count, thread_count_hw); + size_t thread_count_oc = MSMIN(thread_num, oc_thread_count); + args->thread_stride_oc_ = UP_DIV(oc_thread_count, thread_count_oc); + bool parallel_by_oc = oc_thread_count > thread_num; + + for (int batch_index = 0; batch_index < args->conv_param_->input_batch_; batch_index++) { + Pre1x1Trans(args, + src_in + batch_index * args->conv_param_->input_h_ * args->conv_param_->input_w_ * + args->conv_param_->input_channel_, + src_out + batch_index * args->matmul_param_->row_ * args->matmul_param_->col_); + if (parallel_by_oc) { + /* input transpose and input sum */ + if (args->support_optimize_) { + OcOptPre(args, 0); + } else { + RowMajor2Row16x4MajorInt8(args->input_ptr_, args->packed_input_, args->matmul_param_->row_, + args->matmul_param_->deep_); + if (args->filter_peroc_) { + PackInputSum16x4PerLayer(args->packed_input_, args->input_sum_, 1, args->matmul_param_->row_4_, + args->matmul_param_->deep_16_); + } else { + PackInputSum16x4PerLayer(args->packed_input_, args->input_sum_, + args->conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_, + args->matmul_param_->row_4_, args->matmul_param_->deep_16_); + } + } + /* matmul parallel by oc */ + if (args->support_optimize_) { + RunArm64OptOc(args, 0); + } else { + RunArmOc(args, 0); + } + } else { + /* matmul parallel by hw */ + if (args->support_optimize_) { + RunArm64OptHw(args, 0); + } else { + RunArmHw(args, 0); + } + } + } +} diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_run_int8_wrapper.h b/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_run_int8_wrapper.h new file mode 100644 index 0000000000..310bec30dd --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv1x1_run_int8_wrapper.h @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV1X1_RUN_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV1X1_RUN_H_ + +#include +#include +#include "nnacl/conv_parameter.h" +#include "nnacl/matmul_parameter.h" + +typedef struct { + int32_t *input_sum_; /* per-oc */ + int32_t *filter_zp_ptr_; /* per-oc up round */ + int32_t *left_shift_; /* per-oc up round */ + int32_t *right_shift_; /* per-oc up round */ + int32_t *multiplier_; /* per-oc up round */ + int8_t *packed_weight_; + int32_t *bias_data_; + int8_t *packed_input_; + int8_t *input_ptr_; + int8_t *output_ptr_; + size_t thread_stride_hw_; + size_t thread_stride_oc_; + ConvParameter *conv_param_; + MatMulParameter *matmul_param_; + MATMUL_OPT_DP_FUNC matmul_func_; + bool pre_trans_input_; + bool support_optimize_; + bool filter_peroc_; +} Conv1x1Args; + +void Conv1x1Run(int8_t *src_in, Conv1x1Args *args, int8_t *src_out); + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV1X1_RUN_H_ diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv_init_int8_wrapper.c b/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv_init_int8_wrapper.c new file mode 100644 index 0000000000..9947c56140 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv_init_int8_wrapper.c @@ -0,0 +1,88 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wrapper/int8/conv_init_int8_wrapper.h" +#include +#include "nnacl/op_base.h" +#include "nnacl/int8/matmul_int8.h" +#include "nnacl/errorcode.h" + +int ConvInit(int8_t *origin_weight, const int32_t *ori_bias, const int32_t *filter_quant_zps, int kernel_h, + int kernel_w, int input_channel, int output_channel, int32_t input_zp, bool filter_peroc, + bool support_optimize, int8_t **packed_weight, int32_t **bias_data) { + int8_t *packed_weight_ = NULL; + int32_t *bias_data_ = NULL; + int kernel_plane = kernel_h * kernel_w; + int up_round_deep; + int up_round_oc; +#ifdef ENABLE_ARM32 + up_round_oc = UP_ROUND(output_channel, C2NUM); + up_round_deep = UP_ROUND(kernel_plane * input_channel, C16NUM); +#else + if (support_optimize) { + up_round_oc = UP_ROUND(output_channel, C8NUM); + up_round_deep = UP_ROUND(kernel_plane * input_channel, C4NUM); + } else { + up_round_oc = UP_ROUND(output_channel, C4NUM); + up_round_deep = UP_ROUND(kernel_plane * input_channel, C16NUM); + } +#endif + int pack_weight_size = up_round_oc * up_round_deep; + size_t bias_size = up_round_oc * sizeof(int32_t); + + // init weight + packed_weight_ = (int8_t *)(malloc(pack_weight_size)); + if (packed_weight_ == NULL) { + return NNACL_ERR; + } + memset(packed_weight_, 0, pack_weight_size); +#ifdef ENABLE_ARM32 + RowMajor2Row2x16MajorInt8(origin_weight, packed_weight_, output_channel, input_channel * kernel_plane); +#else + if (support_optimize) { + RowMajor2Row8x4MajorInt8(origin_weight, packed_weight_, output_channel, input_channel * kernel_plane); + } else { + RowMajor2Row16x4MajorInt8(origin_weight, packed_weight_, output_channel, input_channel * kernel_plane); + } +#endif + + // init bias + bias_data_ = (int32_t *)(malloc(bias_size)); + if (bias_data_ == NULL) { + free(packed_weight_); + return NNACL_ERR; + } + memset(bias_data_, 0, bias_size); + if (ori_bias != NULL) { + memcpy(bias_data_, ori_bias, output_channel * sizeof(int32_t)); + } + + for (int oc = 0; oc < output_channel; oc++) { + int32_t filter_zp = filter_quant_zps[0]; + if (filter_peroc) { + filter_zp = filter_quant_zps[oc]; + } + int32_t weight_sum_value = up_round_deep * filter_zp; + for (int i = 0; i < kernel_plane * input_channel; i++) { + weight_sum_value += origin_weight[oc * kernel_plane * input_channel + i] - filter_zp; + } + bias_data_[oc] += filter_zp * input_zp * up_round_deep - weight_sum_value * input_zp; + } + + *packed_weight = packed_weight_; + *bias_data = bias_data_; + return NNACL_OK; +} diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv_init_int8_wrapper.h b/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv_init_int8_wrapper.h new file mode 100644 index 0000000000..819cad60f1 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/conv_init_int8_wrapper.h @@ -0,0 +1,26 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV_INIT_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV_INIT_H_ + +#include +#include + +int ConvInit(int8_t *origin_weight, const int32_t *ori_bias, const int32_t *filter_quant_zps, int kernel_h, + int kernel_w, int input_channel, int output_channel, int32_t input_zp, bool filter_peroc, + bool support_optimize, int8_t **packed_weight, int32_t **bias_data); + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_CONV_INIT_H_ diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_depthwise_int8_wrapper.c b/mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_depthwise_int8_wrapper.c new file mode 100644 index 0000000000..3e5cf2a410 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_depthwise_int8_wrapper.c @@ -0,0 +1,25 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wrapper/int8/convolution_depthwise_int8_wrapper.h" + +int ConvDepthwiseInt8Run(void *cdata, int task_id) { + ConvDepthwiseInt8Args *args = (ConvDepthwiseInt8Args *)cdata; + int32_t *buffer = args->row_buffer_ + args->conv_param_->output_w_ * args->conv_param_->output_channel_ * task_id; + ConvDwInt8(args->output_data_, buffer, args->input_data_, args->weight_data_, args->bias_data_, args->conv_param_, + task_id); + return NNACL_OK; +} diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_depthwise_int8_wrapper.h b/mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_depthwise_int8_wrapper.h new file mode 100644 index 0000000000..6df27d027f --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_depthwise_int8_wrapper.h @@ -0,0 +1,35 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_INT8_CONVOLUTION_DEPTHWISE_WRAPPER_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_INT8_CONVOLUTION_DEPTHWISE_WRAPPER_INT8_WRAPPER_H_ + +#include "nnacl/errorcode.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/int8/conv_depthwise_int8.h" + +typedef struct { + int8_t *output_data_; + int32_t *row_buffer_; + const int8_t *input_data_; + const int16_t *weight_data_; + const int32_t *bias_data_; + const ConvParameter *conv_param_; +} ConvDepthwiseInt8Args; + +int ConvDepthwiseInt8Run(void *cdata, int task_id); + +#endif // MINDSPORE_LITE_MICRO_INT8_CONVOLUTION_DEPTHWISE_WRAPPER_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_int8_wrapper.c b/mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_int8_wrapper.c new file mode 100644 index 0000000000..3f916829bf --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_int8_wrapper.c @@ -0,0 +1,43 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wrapper/int8/convolution_int8_wrapper.h" + +void CheckSupportOptimize(const ConvolutionInt8Args *args) { + int tile_num = 8; +#ifdef ENABLE_ARM32 + tile_num = 4; + args->is_optimize_ = false; +#endif +#ifdef ENABLE_ARM64 + if (mindspore::lite::IsSupportSDot()) { + matmul_func_ = MatMulRInt8_optimize_handler; + args->is_optimize_ = true; + } else { + tile_num = 4; + args->is_optimize_ = false; + } +#endif + args->conv_param_->tile_num_ = tile_num; +} + +int ConvolutionInt8Run(void *cdata, int task_id) { + ConvolutionInt8Args *args = (ConvolutionInt8Args *)cdata; + ConvInt8(args->input_data_, args->packed_input_, args->matmul_input_, args->packed_weight_, args->bias_data_, + args->output_data_, args->filter_zp_, args->input_sum_, task_id, args->conv_param_, args->matmul_func_, + args->is_optimize_); + return NNACL_OK; +} diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_int8_wrapper.h b/mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_int8_wrapper.h new file mode 100644 index 0000000000..ec19d41aa1 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/convolution_int8_wrapper.h @@ -0,0 +1,43 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_INT8_CONVOLUTION_WRAPPER_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_INT8_CONVOLUTION_WRAPPER_INT8_WRAPPER_H_ + +#include "nnacl/errorcode.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/matmul_parameter.h" +#include "nnacl/int8/conv_int8.h" + +typedef struct { + int8_t *input_data_; + int8_t *packed_input_; + int8_t *matmul_input_; + int8_t *packed_weight_; + const int32_t *bias_data_; + int8_t *output_data_; + int32_t *filter_zp_; + int32_t *input_sum_; + ConvParameter *conv_param_; + MATMUL_OPT_R_FUNC matmul_func_; + bool is_optimize_; +} ConvolutionInt8Args; + +void CheckSupportOptimize(const ConvolutionInt8Args *args); + +int ConvolutionInt8Run(void *cdata, int task_id); + +#endif // MINDSPORE_LITE_MICRO_INT8_CONVOLUTION_WRAPPER_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/wrapper/int8/matmul_int8_wrapper.c b/mindspore/lite/micro/coder/operator_library/wrapper/int8/matmul_int8_wrapper.c similarity index 100% rename from mindspore/lite/micro/wrapper/int8/matmul_int8_wrapper.c rename to mindspore/lite/micro/coder/operator_library/wrapper/int8/matmul_int8_wrapper.c diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/matmul_int8_wrapper.h b/mindspore/lite/micro/coder/operator_library/wrapper/int8/matmul_int8_wrapper.h new file mode 100644 index 0000000000..0c9f48cb48 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/matmul_int8_wrapper.h @@ -0,0 +1,35 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_MATMUL_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_MATMUL_INT8_WRAPPER_H_ +#include +#include "nnacl/int8/matmul_int8.h" +#ifdef __cplusplus +extern "C" { +#endif + +void InitInt8MatrixA(int8_t *src_ptr, int32_t *input_sums, int8_t *dst_ptr, int batch, int row, int deep, int input_zp, + const int *weight_zp, bool a_transpose); + +void InitInt8MatrixB(int8_t *src_ptr, int32_t *weight_bias_sums_batch_, int8_t *dst_ptr, int batch, int deep, int col, + int col_4, int deep_16, int input_zp, int *weight_zp, const int *bias_ptr, bool b_transpose); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_MATMUL_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/resize_int8_wrapper.c b/mindspore/lite/micro/coder/operator_library/wrapper/int8/resize_int8_wrapper.c new file mode 100644 index 0000000000..58dcbbf28d --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/resize_int8_wrapper.c @@ -0,0 +1,25 @@ +/* + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wrapper/int8/resize_int8_wrapper.h" +#include "nnacl/errorcode.h" + +int ResizeInt8Run(void *cdata, int task_id) { + ResizeInt8Args *args = (ResizeInt8Args *)cdata; + ResizeNearestNeighborInt8Simple(args->input_data_, args->output_data_, args->input_shape_, args->output_shape_, + args->align_corners_, task_id, args->thread_num_); + return NNACL_OK; +} diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/resize_int8_wrapper.h b/mindspore/lite/micro/coder/operator_library/wrapper/int8/resize_int8_wrapper.h new file mode 100644 index 0000000000..6721109e17 --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/resize_int8_wrapper.h @@ -0,0 +1,41 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_WRAPPER_INT8_RESIZE_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_WRAPPER_INT8_RESIZE_INT8_WRAPPER_H_ + +#include "nnacl/int8/resize_int8.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + const int8_t *input_data_; + int8_t *output_data_; + const int *input_shape_; + const int *output_shape_; + const bool align_corners_; + int thread_num_; +} ResizeInt8Args; + +int ResizeInt8Run(void *cdata, int task_id); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_MICRO_WRAPPER_INT8_RESIZE_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/slice_int8_wrapper.c b/mindspore/lite/micro/coder/operator_library/wrapper/int8/slice_int8_wrapper.c new file mode 100644 index 0000000000..191908d29c --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/slice_int8_wrapper.c @@ -0,0 +1,24 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wrapper/int8/slice_int8_wrapper.h" +#include "nnacl/int8/slice_int8.h" + +int SliceInt8Run(void *cdata, int task_id) { + SliceArgs *args = (SliceArgs *)(cdata); + int ret = SliceInt8(args->input_data_, args->output_data_, args->param_, task_id); + return ret; +} diff --git a/mindspore/lite/micro/coder/operator_library/wrapper/int8/slice_int8_wrapper.h b/mindspore/lite/micro/coder/operator_library/wrapper/int8/slice_int8_wrapper.h new file mode 100644 index 0000000000..0ecbc875cf --- /dev/null +++ b/mindspore/lite/micro/coder/operator_library/wrapper/int8/slice_int8_wrapper.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_SLICE_INT8_WRAPPER_H_ +#define MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_SLICE_INT8_WRAPPER_H_ + +#include +#include "nnacl/slice_parameter.h" + +typedef struct SliceArgs { + int8_t *input_data_; + int8_t *output_data_; + SliceParameter *param_; +} SliceArgs; + +int SliceInt8Run(void *cdata, int task_id); + +#endif // MINDSPORE_LITE_MICRO_CODER_OPERATOR_LIBRARY_WRAPPER_INT8_SLICE_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/coder/session.cc b/mindspore/lite/micro/coder/session.cc index 093cc11759..f4fdc5084e 100644 --- a/mindspore/lite/micro/coder/session.cc +++ b/mindspore/lite/micro/coder/session.cc @@ -27,8 +27,13 @@ #include "coder/opcoders/op_coder_builder.h" #include "coder/utils/coder_utils.h" #include "coder/log.h" +#include "src/ops/populate/populate_register.h" +#include "src/common/version_manager.h" +#include "src/runtime/infer_manager.h" +#include "src/scheduler.h" #include "include/errorcode.h" #include "src/common/file_utils.h" +#include "coder/opcoders/nnacl/dequant/de_quant.h" namespace mindspore::lite::micro { @@ -57,21 +62,28 @@ int CoderSession::InferShape() { outputs.push_back(all_tensors.at(curr_node->output_indices_.at(j))); } - PrimitiveC *primitive = curr_node->primitive_; - if (primitive == nullptr) { - MS_LOG(ERROR) << "Op " << curr_node->name_ << " should exist in model!"; + auto primitive = curr_node->primitive_; + MS_CHECK_PTR(primitive); + int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); + auto parame_gen = PopulateRegistry::GetInstance()->GetParameterCreator(GetPrimitiveType(primitive), schema_version); + if (parame_gen == nullptr) { + MS_LOG(ERROR) << "parameter generator is nullptr."; + return RET_NULL_PTR; + } + auto parameter = parame_gen(primitive); + if (parameter == nullptr) { + MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " << PrimitiveTypeName(GetPrimitiveType(primitive)); return RET_ERROR; } - primitive->set_infer_flag(true); - int ret = primitive->InferShape(inputs, outputs); + parameter->infer_flag_ = true; + auto ret = KernelInferShape(inputs, &outputs, parameter); if (ret == RET_INFER_INVALID) { MS_LOG(INFO) << "InferShape shouldn't be done before runtime, name: " << curr_node->name_ - << ", type: " << schema::EnumNamePrimitiveType(static_cast(primitive->Type())) - << "flag set to false."; - primitive->set_infer_flag(false); + << ", type: " << PrimitiveTypeName(GetPrimitiveType(primitive)) << "flag set to false."; + parameter->infer_flag_ = false; } else if (ret != RET_OK) { - MS_LOG(ERROR) << "InferShape failed, name: " << curr_node->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast(primitive->Type())); + MS_LOG(ERROR) << "InferShape failed, name: " << curr_node->name_ + << ", type: " << PrimitiveTypeName(GetPrimitiveType(primitive)); return RET_ERROR; } } @@ -81,7 +93,11 @@ int CoderSession::InferShape() { void CoderSession::EndCode() { context_->set_tensor_map(allocator_->tensors_map()); context_->set_saved_weights(allocator_->saved_weights()); - context_->set_total_buffer_size(allocator_->total_buffer_size()); + size_t de_quant_max_workspace_size = nnacl::Dequant::GetInstance()->de_quant_max_workspace(); + size_t final_total_size = allocator_->total_buffer_size() > de_quant_max_workspace_size + ? allocator_->total_buffer_size() + : de_quant_max_workspace_size; + context_->set_total_buffer_size(final_total_size); context_->set_graph_inputs(coder_graph_->input_tensors()); context_->set_graph_outputs(coder_graph_->output_tensors()); Configurator *config = Configurator::GetInstance(); @@ -90,7 +106,7 @@ void CoderSession::EndCode() { blocks = AddDumpDataInfo(context_->code_blocks(), op_coders_); context_->set_code_blocks(blocks); } - if (config->code_mode() == Code_Train) { + if (config->code_mode() == Train) { Train::TransformGraphForTrain(context_.get(), op_coders_); } } @@ -104,15 +120,17 @@ int CoderSession::Run() { // 2. prepare, init model parameters for (const auto &op_coder : op_coders_) { MS_CHECK_PTR(op_coder); + MS_LOG(DEBUG) << "prepare: " << op_coder->name(); ret = op_coder->Prepare(context_.get()); - MS_CHECK_RET_CODE(ret, "prepare coder " << op_coder->ID() << " failed"); + MS_CHECK_RET_CODE(ret, "prepare coder " << op_coder->name() << " failed"); allocator_->enable_is_next(); } // 3. docode, write operator code for (const auto &op_coder : op_coders_) { MS_CHECK_PTR(op_coder); + MS_LOG(DEBUG) << "code: " << op_coder->name(); ret = op_coder->DoCode(this->context_.get()); - MS_CHECK_RET_CODE(ret, "do coder " << op_coder->ID() << " failed"); + MS_CHECK_RET_CODE(ret, "do coder " << op_coder->name() << " failed"); } this->EndCode(); @@ -126,12 +144,11 @@ int CoderSession::GenerateCode() { Configurator *config = Configurator::GetInstance(); CodeMode code_mode = config->code_mode(); switch (code_mode) { - case Code_Normal: - case Code_Inference: + case Inference: MS_LOG(INFO) << "generate code for Inference"; generator = std::make_shared(std::move(context_)); break; - case Code_Train: + case Train: MS_LOG(INFO) << "generate code for Train"; generator = std::make_shared(std::move(context_)); break; @@ -240,6 +257,7 @@ int CoderSession::CreateOpCoders() { Configurator *config = Configurator::GetInstance(); Target code_target = config->target(); CodeMode code_mode = config->code_mode(); + bool support_parallel = config->support_parallel(); uint32_t nodes_size = model->all_nodes_.size(); OpCoderBuilder builder; for (uint32_t i = 0; i < nodes_size; ++i) { @@ -293,6 +311,7 @@ int CoderSession::CreateOpCoders() { .outputs(outputs) .node(node) .target(code_target) + .support_parallel(support_parallel) .data_type(tensor_data_type) .mode(code_mode) .input_indices(input_indices) diff --git a/mindspore/lite/micro/coder/train.cc b/mindspore/lite/micro/coder/train.cc index b2bfb07993..f566e2c4a4 100644 --- a/mindspore/lite/micro/coder/train.cc +++ b/mindspore/lite/micro/coder/train.cc @@ -17,10 +17,13 @@ #include "coder/train.h" #include #include -#include +#include #include #include #include +#include +#include "schema/ops_generated.h" +#include "src/common/prim_util.h" namespace mindspore::lite::micro { @@ -53,17 +56,17 @@ std::set FindInferenceOpcoders(OperatorCoder *edge) { } int Train::TransformGraphForTrain(CoderContext *context, const std::vector> &op_coders) { - const std::set loss_types = {schema::PrimitiveType_SoftmaxCrossEntropy, - schema::PrimitiveType_SparseSoftmaxCrossEntropy, - schema::PrimitiveType_BinaryCrossEntropy, - schema::PrimitiveType_SmoothL1Loss, - schema::PrimitiveType_SmoothL1LossGrad, - schema::PrimitiveType_SigmoidCrossEntropyWithLogits, - schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad}; + const std::array loss_types = {schema::PrimitiveType_SparseSoftmaxCrossEntropy, + schema::PrimitiveType_BinaryCrossEntropy, + schema::PrimitiveType_SmoothL1Loss, + schema::PrimitiveType_SmoothL1LossGrad, + schema::PrimitiveType_SigmoidCrossEntropyWithLogits, + schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad}; OperatorCoder *loss_op = nullptr; for (const auto &opcoder : op_coders) { - auto primitive_type = static_cast(opcoder->primitive()->Type()); - auto item = loss_types.find(primitive_type); + const Model::Node *node = opcoder->node(); + int primitive_type = GetPrimitiveType(node->primitive_); + auto item = std::find(loss_types.begin(), loss_types.end(), primitive_type); if (item != loss_types.end()) { loss_op = opcoder.get(); break; diff --git a/mindspore/lite/micro/coder/utils/coder_utils.cc b/mindspore/lite/micro/coder/utils/coder_utils.cc index 55a92e49c0..08d31b6a26 100644 --- a/mindspore/lite/micro/coder/utils/coder_utils.cc +++ b/mindspore/lite/micro/coder/utils/coder_utils.cc @@ -38,7 +38,7 @@ void TensorDataToFile(const lite::Tensor *tensor, std::ofstream &ofs) { } int len = tensor->ElementsNum(); for (int i = 0; i < len; ++i) { - ofs << data[i] << ", "; + ofs << std::to_string(data[i]) << ", "; if (i % NUM == NUM - 1) { ofs << "\n"; } @@ -59,6 +59,7 @@ void PrintTensorData(const lite::Tensor *tensor, std::ofstream &ofs) { case kNumberTypeInt: case kNumberTypeInt32: TensorDataToFile(tensor, ofs); + break; case kNumberTypeInt64: TensorDataToFile(tensor, ofs); break; @@ -113,7 +114,7 @@ std::vector AddDumpDataInfo(const std::vector &blocks, for (size_t i = 0; i < num; ++i) { auto &opcoder = opcoders.at(i); std::string code = blocks.at(i); - std::string name = opcoder->ID(); + std::string name = opcoder->name(); code += " {\n"; code += " FILE *output_file = fopen(\"./" + name + ".ir\", \"w\");\n"; code += " fprintf(output_file, \"Node:" + name + "\\n\");\n"; diff --git a/mindspore/lite/micro/coder/utils/type_cast.cc b/mindspore/lite/micro/coder/utils/type_cast.cc index 7be96be52a..54dd193a44 100644 --- a/mindspore/lite/micro/coder/utils/type_cast.cc +++ b/mindspore/lite/micro/coder/utils/type_cast.cc @@ -28,6 +28,16 @@ std::string EnumNameDataType(TypeId type) { return "kNumberTypeInt16"; case kNumberTypeInt32: return "kNumberTypeInt32"; + case kNumberTypeInt64: + return "kNumberTypeInt64"; + case kNumberTypeUInt: + return "kNumberTypeUInt"; + case kNumberTypeUInt8: + return "kNumberTypeUInt8"; + case kNumberTypeUInt16: + return "kNumberTypeUInt16"; + case kNumberTypeUInt32: + return "kNumberTypeUInt32"; case kNumberTypeFloat: case kNumberTypeFloat32: return "kNumberTypeFloat32"; @@ -48,16 +58,18 @@ std::string GetTensorDataType(TypeId type) { case kNumberTypeFloat32: return "float "; case kNumberTypeInt8: - return "int8"; + return "int8_t "; + case kNumberTypeInt16: + return "int16_t "; case kNumberTypeInt: case kNumberTypeInt32: - return "int32_t"; + return "int32_t "; case kNumberTypeUInt8: - return "uint8_t"; + return "uint8_t "; case kNumberTypeUInt32: - return "uint32_t"; + return "uint32_t "; case kNumberTypeInt64: - return "int64_t"; + return "int64_t "; default: MS_LOG(ERROR) << "unsupported data type: " << EnumNameDataType(type); return ""; diff --git a/mindspore/lite/micro/coder/utils/type_cast.h b/mindspore/lite/micro/coder/utils/type_cast.h index 44b599fcb0..ab805d9f08 100644 --- a/mindspore/lite/micro/coder/utils/type_cast.h +++ b/mindspore/lite/micro/coder/utils/type_cast.h @@ -47,13 +47,16 @@ std::string GetVariableTypeName() { {std::type_index(typeid(int32_t)), "int32_t"}, {std::type_index(typeid(int16_t)), "int16_t"}, {std::type_index(typeid(int8_t)), "int8_t"}, + {std::type_index(typeid(uint8_t)), "uint8_t"}, {std::type_index(typeid(float)), "float"}, {std::type_index(typeid(double)), "double"}, {std::type_index(typeid(::QuantArg)), "QuantArg"}, + {std::type_index(typeid(void *)), "void *"}, {std::type_index(typeid(int *)), "int *"}, {std::type_index(typeid(int32_t *)), "int32_t *"}, {std::type_index(typeid(int16_t *)), "int16_t *"}, {std::type_index(typeid(int8_t *)), "int8_t *"}, + {std::type_index(typeid(uint8_t *)), "uint8_t *"}, {std::type_index(typeid(float *)), "float *"}}; auto item = types_name.find(std::type_index(typeid(T))); if (item != types_name.end()) { diff --git a/mindspore/lite/micro/example/micro_speech/Softmax-3.out b/mindspore/lite/micro/example/micro_speech/Softmax-3.out index 740c985cf6..f8a74a8ca6 100644 --- a/mindspore/lite/micro/example/micro_speech/Softmax-3.out +++ b/mindspore/lite/micro/example/micro_speech/Softmax-3.out @@ -1,11 +1,11 @@ Node:Softmax-3 -input Tensor:micro_speech_B+0 +input Tensor: (int8_t *)(micro_speech_B+0) input 1, 4, input type:DT_INT8, format:NHWC, elementSize: 4 input Data: - -50, -5, 121, -4 -output Tensor:micro_speech_B+8 +-50, -5, 121, -4, +output Tensor: (int8_t *)(micro_speech_B+8) output 1, 4, output type:DT_INT8, format:NHWC, elementSize: 4 output Data: - -128, -128, 127, -128 +-128, -128, 127, -128, diff --git a/mindspore/lite/micro/example/mobilenetv2_quant/1_224_224_3.bin b/mindspore/lite/micro/example/mobilenetv2_quant/1_224_224_3.bin deleted file mode 100644 index 1c035cd0e04dad49fce7ddf98813bcabda5f66ce..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 602112 zcmWKXcRW@P6vl0-WE9zCCQTvZz2_j2C<)owqo^b^p`l$`N|P3C?Ztb~sWkPgwD(j> zONzAny?@<5-}`w#_nh;b=lPzAcT70AWvt-Pkj;NzcEwpbXX%)q9QIP{jSBN(AohY0 ze|r**)2kv_JSw2Zi9qW6UJqa0)#9QFgL&JI*|4xn09%~ziwY;cfnY>>SoFgQv%_GbgwY^NQg8n z`3p65*Tm_I`f}Lf;cPVKyr`R$#Nn=?{Qc+!I=aak)kn_}PT!GIZqXO1)4xi%+EGmd z=k%y0Uq@l|>NuXW!XI0Y8S%0D3(|HAXY8M^gYnN4xZ*=O%ulKSOAB4$;mFB&_fjtO zcb>-oT1Sa?DW?SW_AxLy^1Ha%VI1Gr?Fx&Go58bQ2MQE-i9?=G;yc+D^n0Eu=(S9T z!Iz&wr^XEY7T%SeRt&^(bs7A7iatL*6vB#Uf-zP>lfBG}xmEW7lt~_l3Tq_%`p!mJ zsy!3~ZwK-k+E1L*i&k9tD8$~4!=Ru?aQa(co?6<4E3Wq9mrrbQqmvpx3i0A*+aVk? zAdg2*^uk`}N3ee7Vv63P1dCFQ&@kK-S17093h!IsHZcpH|C~+}zUy)kV`ntyyB1M(4#R24b~g)e2o3f+gayy!2D zIyMHzey@e;sn(*``fN_|tRXY!v!rwNI4w)=gxs`u5NVr*SzbuqWn(zD*Egu_vO^-- z?#pI}q-0{#7w=eR^KrQq!l@fs7&T}uMZE4J#4j`BtfF;L({xhk_qZ4O*SO;(Q3S0) z6F4no5v(rxU3WZQ0h{N~;Bl`8pz1CobT#jRKgx&FgH$iR@#Pm8&&gqF!c4Bam&gjP zYbdPoDOI_uaL?!par5zs-0wg(AMzQ_D_!g1#I!`1T=9d<2858^O$~4wK(lX(I1~V=K3TX%ZyX=XnvQnY`t!Gq2gPL> zUZUe0fuD^5Ol^1LaRKVMVc7sIJ<|Zor3U!E?`rt3?-N?math8Fd9$guIi~L&M>i7e zvFpS4q`1MDo1Vqd^C)L zaf#q#w++cpQE0L87)UaVMPQj`st0|BNVA(j}ewOSBw2N?}LNi z{pj_YZlJcz5d6oE#01TF96|0~yPGfE%F93_$sF3Ke0rUaWgvlB48?E0!gq zi`oGwuQx=M)HDvCTnQWcw!^G%PXuTCJRIz{mAt)}5_8Q&uL3u2u##uJ1C@Ne-p^IFaP`U|#v+ zsq5~EtE5Ylz4=PY7O{DuA3k?=;lv}Vm}cw9$)}yE^-wzhH_Q?3x^IOm?+%f}JvsdK zw-}ey4FQX5L3B{bP3&B%DVF=JgEhq)=t|ch9zJ6Pu0ALQ%hXZA{vBN~@_07dk7=de zxw+hMP6|y)5geir#M{rO!Xf)YIM!`DlywY;K9^>4^^WVpVU_OmTV)tt4NySaR6Fk2 zQ%<4xC-c~&Vc27oJ8j8AtT%W~o1+uy@zOY4c`yby?$Bl5BBtbaXRHq~qVyi8Vg7?u zYVBAe)=id2kF2|pUA+(f3#)*C6I0lzW*v-Q6U#gQYosYZia9pC7v7T1skAfaNi)6| zHq>4s!>OLwcjzQ~8E}*C=+(lELs|IW{W!EWeG2al`=NJgK+@W!!V!-t7^)-+=S_M+q?{_hu#e$_+h*cl`~7fQ^@MmP=NNVP z=A(kU29a`{z^ex|bRIj2Od5)Kc-kS#g;7`Wk(&OT<9KCuCDQ1TWv5fpyCQ z*#C(qromWEY3LkbYJXe& z_|1(*3>nPIi;HOb7#+TGqY&>bH^MySR2Z^Y8SehdglD_+aPi-T*RG&~+3pzyD1TnbC)(yv&vAcxD{WUnNEt5Cjx=mkp1Y_GsJ+xe} zj<hB^j({}@0 zsdEsA*=O>@89D6ea0cWT`(R|Q9$gD976;bm^W`h0)GbwkrgTn_Y4!tj)i8Zz5muM^ z^7ehPJS@)`W6jFMcM*U|YdY!Yc||%~_?&bCCgKUj%k;o@p*UYbo~B+N0>hlzNORa^ z*dt%aVaM!QE;f#vPphD`d?czCc=1BFYWUcp!e(AsJb(TddNT&o{E1c^ko4wx9eg>#b?3p#zT5N)(^|yJ*7U%ik!Y{ z2zVzL@Yj}XUc8q{KlT^2OHDbzY9*~tABSUG!{}&r0#Ewgo4rcoctGhndVF>$m>r1{ z`uS^$Da)6^)2p+D(Mxl=^_Br2+?9c)zmJKRom6>A#5682ETDfn8tlutINLG^U$sZF z=##^#j{4l^buw?aP32pEj!Q>&jpeu5ns~O|B7qUE>jaP{S#lw`aI2DpgiY_1Pi z`pahNt{5y{G6nHzndpN%0QUWWu2xRsc27IBIg-h~dQLd{K?ZwN8n6kC2lL!~-Y}+~ z%xkVt)CDCvw6GE)q(lQZo)kJBe*>?h+I%9$6b5Quf*Y|P>7n5uYzx>RS`}FF=fq)n z?Yjk5SoXx*jpI0=S1db^jKXDeBY93{Ek!TfC=7g1Le_(0(YSIj1eGX&)$Rx^A704c z+v}m&yccfskWhJB9hqf$;Fn$ne8AKOoAz}TCjHIFQ?p;wflu$~*S9Ay;Ql5WerqxH z9%cZw_wIr1m=)AzlQF)|(Gipt`f&Q)5oiz}3+WEdeAp)f9@i`clY>PV^49>vuA1RC z@4;Aqbv&w9$m+n&oy;9FcyfIR{q2>_cT{$Y_782?@rA6#Q*e?ar;>Zn&mWzUOk^jT`YWY>5d`%a-$AEWpWcoRHFgzgG!gp+%g&Z961u1@effZ(>N#ndCl10mtEF^M;wyX_r^<%Phd{22GESfQ3~GaBa)o~!*6AOl&7v|pcY8~>LTAyo zhPlF~6C?TjFFBZ*whZk2uh2iwftWq!2yO16j%wa_sY=fR7n{Ulmyw24mbQSpDo=qL ze^Uu-sNw8?W3i+3zNDq;2R#3LRJyh25m1yQ0^|a+ zp+EEr@TNzjw9yHC>5y_P&daqS2fv?`c)gZXCan>ruiP->-y`xmr6lXe-CR%F4!{&M z4IGy99Bi+8ppw)YZ!gpp?w+v`Z1O8;y)UxOW(nJ=?jq&Rxs)@=AKz};3~AHsWizBN zcfGP2=H-p$-@1PwI!+fE+_}uH|^y%MEayfkt62}Ajk9|_{E(zdZgR}saf@8}rGe1JISVxM+UR1%DvB$-OS957F|2$O#77z7 z4D&vymZXXjt7p)4O)j4{&0@27c{U4NNuBdrg_mUmG0A)$jFi7cZaek(Z_H-Myt)mn zOES0PdCxR z-CtILc9zX@x6(xSYWjWj44jZ3iT6&Bmdd zqWMNVpq&3ubYBsR8^&i~hR0YwclJCrd&O};$y>O2z>8X@T!0Ced*GxKn?N$L9!ee^ zq=>r7WV9`SH`N^i?**YyIQoQO7-7ZLmuKR+x)M^)y9TF5TJqNw{?KoDT!v6mp@#v?8wC7Y$-kDs$b6+eX-`4)DYU0I-ubjoNleEye z${+967UQ>g?`fOQ2YgRYHj(xef@Js;oU9f_}J}!oD3%9`9 zqz|BP7miX{t;NcjVws=6a62i1CvS8Y-<$n`$D7te-)E!v`PR)KITVU_8ojZ2s6L0w z>9UDRFX%pcqToOL0GyK5m;3w;^l#iBI(_wlm|>_nsME3K)W`9nxuZ=rq(EHI|(yl?QvS z68gNI#vbc}Y1Hb;==?$h7alkXZ|@}Ikmuv28@w%f%!jd<)VDYOJnP26wUhaSg$WzH z@a2CU$7yxTe5zi$zAnvV7R31mQRk#SIJ9Oa4A>NoX)T}0q+4>t_t8G__5@$tdZZG51l@oeuF>oodx_MF zvT*%Y3sQJ=MA~;(8-46C1xdXJZd?+q zOGVe`AzW7_n{dUq(h?)U`Rqvp_9hwHF+4#R+!KH{DqHs~;7 z1=!472RBc?gp;Rqx%ovY930#aMxEYGdd1OF%jgiav5MxMk9y+i@r__{XfZ5Q3&gX> zi|~1oH!A;~47v3&06{Sndp8INM3;f?RUNqcPz`tK?1Ui&OW;sep?G(Br_gceq|`Ir zna_U+q^_S@;ER<9I^5h1Kb4~KeqsjBF|bG5@GW5S2+*?pH+=f0#f|TixXW-Qm|b>Q z==MR0M(8{MjEUf(Zt=MGz%`m2)ke)%-LZ3tNVm3ZgY3F2^uH2~747Omf?*fz{pTYD zF7Toe{i3n=q^y~3rh~K}9{!SQ!k%|xLSlJ^O5ZnkZHhKKm z$)_%Ri3)#AQe!IziwFOTA38Y*Y}`+xzp)tX%f& zUrdSh0Z=q(5v_W?fl4ZS;p3$)Sa2$ycZY2jg3nE*U0VyFeM21nJ3ANlWval=rRI>o zZLb*LWQjf1eQD>_So|JTz-QwJP>ZKMufF8R9TQuHx&G>$T0MXla&Io$-4}GU&WMJ1 zhxT2_#PCE*9{5xqkM7w+=WfY*y{AZfA1dI1&t9CcQv zj9domign+O!DLk!-kk4E_vYOa0xwSF@%ABj=}--gSnj~ZM??8|dMBODa^RE8WWODE z4faOXkbQy;YJcs?Er~B-Q|BSEtouIFQM8cl77Ozi>Ry0^r9=3Gl?7Ix*Wpt~!+7vOZ7x~aN*;H;u<4yE z=sZv&JH=t_a6F%%XWDSU{0JQL?iY3KZpL%I^h5c9>M%X=GzGPlQpaq4hEJ8!9W5+A zt#sfnOU}{neLmO?#P>g?U$ZOcJ}Vc zwf-AvVswA>KeB<=ryYb8<76DGvKcy@UGYz)B^uf`ll#kg(h032IjhoN=>A{~t=PJW ziZbO;ePLhh)!UzA<`r?uh{JGUvlSk3i6pIknSd#MF~j_iSlXCM_7-Ef`y^za;i07J zunod&gK_a1FP^)|g#$NTqpH|Oc(!F4X9c^EVXgol6}OY)CI@&rVU64TsXxRn$%^PF z*9AHU9D%lRg?LHkj|Qh-gg43q=tR{karT&Rg1KL(#Grl?=RJsM;DvV~I zLTz3p^BFp_`=gh^46e!CPcFmzgGJp$ocox??q9m$pW?B2L8q8QZ+UZea3o8748Y8y zfY>_?2TX_|tE)D`?+5$o^>`C<4LmE2bVy|R{O{B!*qLsfRp+B|+B`)iSf)E_Kz_O( z&r&-p#1^OHF}E%7pIQRvd>M>gZ{HFWE^EP8gWuu}dL%6N?oD?BhqCEL4an{CiuRvS z<+96nX^P)Fx~Xi73a5tfczHp*xy_E1=S^TWkAe8p#E*o14lLa}ol|3SL8DzCAGL>a zOV^!3r0;$zdAOV8v`eMyhVF+W`j1J^O_d52dx3?H2KuQwh#yM!QLp|(1eF7T265Wx zDJkLy{xW@LZiy!yW8A~AUIfU2fiQjcw_GX_S?4{^n3N@l)SqzX~!sDbzOHDY%SXbM zF@5>?uI@N%qz9C~oJ0Gvv|zRUKuno+8O{}bqs+;AG%K+fKW}M}8g?Cq%e4T597l4| z!H<-&qY%B1dGaKwEm&UD#j7j7)uxOd$homusN1w(T)%4|`b9qyjF&ADKknWDaRz<( z-1ZjnU%xOM9p^)*nynZYJ|yY68IU5=ahj;iWxJoj@=rhLc=R%mdvTq{=Utq1bx)b&@i7k{&&TJrss^uUHjE=ZuL)Sd*RBex6VM3-xnC_ zrOR^nUX$^=dOFoZoi|w^Nx~qx(jY; zG6n-57ruRU5pCK!fxmohkyuW6Nvt+jS`s^-;?At1{MiHX{9hZC+cjCVYBR^Tt8-EA zR1vTIBrqQDf~y0&anxvU(qEd$S4PYdua2;meNTpB<*GoK<#U@lw4$)|&0I1!@#EH+ zmBQ97&f?5QBK^nLY1o(`EbIPPyq9(!rnH${|zZO>UP1YfZqd1>Rdi$LBG_lP z74~2)&07`B*Hl$G$udwPw{kqS-cytq?ubXbiY$K9)PuyBdBR59Aofq{j)n_edDwni z$X~nyCNGQP&ChMn-+eG<|LDd`oL4~GxmXvxi`6iZ5FJItw|oyYsbqvOHj6FD&ilhrRps!L9X8Fni-l znm@-0cl8*I6Q+KIWxjXBI|_U0?csMq--EGuCvZJ!g)f2^xvezy{WuOfzCxTXc?)9$ z7t?Tup0Hwr3wrd3$8K9}1%;`@(AUF{KLurBLv$ja$a@NL3M0|;es}z8AAvWrk><60 zglRj6@gMbJXw`o!b(#j#piwjV?tNJgJmU^EuNFapULwWZGX?G3sd#er5!!Ie8pUdT zoOSs#Y#iyyw#zN>m(NY;);kmCd&~HN){peHa~PZcDRc9h_nm?UY!oVnmx$y4^}scC zLEQW*9D~=lgZafYI2h*2XS7z+W!L}wXkdZ}UGwHs z@mpCwx+;-NpIQpX`)A91R$y^xVgM)0MMAwC5*wi78U!@-u_k5m+{mlpB zkfm05`CSW;vL^po@5s{VL-g8s9QJ#G=$9DDBZh_GF}XB&U9N?6a~3Up-JP@a)pE7XsGOtpXESqjhZ!8SsS4HQ@e3LOpx_%>1n#m)K<2d+p zzD&Coankh|JP`UA^bBRbsd^5Ve;mYd;kzlqzCvpGr$JC$tA{f->EgDSLVTS&SV%L_ z!tecDuzo}$E-}=k*oFwcuwM>Mwz%`#bzkT>bQLpwW(moQx{^zJGVgMohPB&N_;A

PE95S9;(z+F5dJ8e zFFr#Kd=t#A#(}*3;4pf#XdXH4{Yj(m=ksdWuD48mcipr}A{%BNJN@3s4IaS8zJc=B}sZ$ep`DYtc zzI+s-W&eBIlMFIzcj7lWUfAWN5%&D42+x9!kbJHOhFQqt;q0#vbHOKCh2{?Kg3hfag2wh_Hm^McI$Nd+f0|A}?z&HO#Z@-H)%tLs zM?b}e-}cZHf15)MTELz~?_E@ z_fRw$A z15x7qsxBP*r^T(~s4qGwS_+lbeI)Ng!tt&AKcQ%ZrLah|nmRPSd5UcY%7@-1_ky=H zaM~MT>XJvW`nnCy_#tpp>}5)7o50idOh88&;{)aWaQ=aB)bFJTkIH7^bf=9%x~wlp z=#`VCMu|_Z-U-iUErk)CdHi>3pp2zRkk##JK`Ta%htK>$!}qIWokka4Fe8P7l0!k? zsV{bUH=So6+%GguQ>EvHcKqgb2q)6qReTVCc^V2fD?`SgSKWWFS@se z+M_dSr`)&(!(3KCkA3~I+2R&CCtsltN3&>(_!n*p-FR_%cT$;_%SD;x(*L45=t!ie z^lhdF*IzLMtDYKsxie32EZqtM!XFpW_U4Y$={e(NlrugefF_+l(#^!BP zxa`U)2$Pf2;c_LW)pC5G%n)ZdS@HW_Ct!X~2xliZf=@&c`xxpA;f+&SZ;UawI{tzB zk$QmcU3jgF9~aFYf@9@qEn)_EEmlm`+?Wt zV?wFbNof2YhWVbJ_-TO`dz`sM!B47ay7L&;I`J4D&NafM%p#o9?=JkB;menw-zCre zz-JQ}{*}bQ619Q&&ftvn)RH849#Bf51ILKTcYCrzI*3EInc~>IXxf|Y$}T-~MW=^K zpmbJ?=Ei%Y_RU_BCBen`L$Rl{Bv*y))vi(wjO6qlXUMh23U==A#;#|cfkCJNu77-i znsY5MtdA@YZBgLQx&B=LWE;%U_$j%zSs!cdkt=|fb6|vBuoDB~) z>SDain{%&grn{bpC8cjCso=%gi)1xT6nmc?y1-dw>olj-ax8I{0IO zD$F0XTc&0Akr?O9H-+!8_k%XrZ><5{=~?{4?71L~PQ^h>=1|aMU6em$PL}c6_$Ac| zJ@=JCqNWcxE}cN^N=CmDBG49NjEgSq{v|xJcaJVU(uUwk?5fi#e-c7Sz&K3dUv19H<}!I*(U=|Hr)XYeN)M7 zmpZ2(Hsv%^b^P{85!3Pxiw|V%te0v6$~_zk2G#1kt$!i(jCe=G110QSn29wSgK%FD z4**%_w0^iAUQ+GCCl|+~-=|tAzU#%#v&}fYp+4GmKwy5C*-S4TXOUD02F8h$sobq`4#b zbBQ(YdL+v3M&!(a^D$a=!#kvL1>uos|>HHIF4rqgp*(i)^ z?!*1V<58L!!hPh@S^a7@jBg1M*DiABWf2LS(Q_~C?t7Q!Yvf||@ILtENu{v(&kiyh zE!)SIkEhV10c`lqjyL(*a%AQ^%IZ6U9}jW|HMvTpWD|IHB3YMDD~XRM;3Z)ASo`{fk2cmS?k z*qw@teA(i)BFDrWf%@RFER0j9kg8GQ!>U3Iy#7QqbvP$hj=d|{vUwV7{};)LBc=+u z-rHd9j;HYHxgXAoQ^NU4#<+h}SG@Ld9UV*3U@!~h6Lgs}>Ks_*Or-S8o));KqKE!L z`)Q}yNZjQgD|Qrq7JV8VdBfB`VtDNynEbR!F!At4jkCYR)IdW_G49K;qgC+qC>)d z?%X;{wx@5<#GM-+(RK}MZueK>=^ZlPs7apoHYIa{AaJwh5Z=7k1~p~7!8EN1>}wrC zyKb6tbZN5K6k>~BHiI#wt8CX!$beago$!eBL%J>F9_=eq@x{U3u;q|GH~LKBdzE^8 zX~s|Z8J2~gH=PuAm5#&XGwus+Nxxy1|2XV3Fp&m~9D>j04CG;lrs9`lswf7U@bN`e zF!5F>$xC(d)`?6WQSqB@Pzk6EHE^5yttT!}en^R_vuM^pf1bL1rQd@7s z?bhlXK505X)*i?#*BukJ2QUOjLd&i(xF9uyPg#dyrEL;BMhs-_@)78FBZHssi@{eL zPe8?qhY%5NffptL=kE`%oAvotZQWc8^nWeOhZ;-ZO-47&yuVS<-q{bY|F{C~Dl0&G z+lL#Hy0E(YeQL-a0!w1PK-yh9wvcz>?cu9vp-Z?-YtLi_6-!XPQwvk~hw+J$tFXOW zx^!yBEMewz8KasY@R6O_Xni>y!*h>`tr_oVVNoJasMh7Ux;v!bQxBhH9}zA#{-%Zf z27&F(Ff8y>7PN;&aq%P<9(Fzv0|W)>(oQ{kICZsPzxgjjPn?S5t5k8=ByC*N=EfZ+ zRvc~}jw|m<@O7yH-s{NV_OIj6HMf)O%)DrJh#o)8PDP8p5=b;x$L8IKsHBgKJqmH; z%qyOFCi;Yr0;@Cs63rLX#m^w@;Ii+nfLZq;pUSbe6eUY{e3kPYw7~nd6*G>)Ys&KW#OXx zsz^*cl*c*zgZwh%XzLJf_Bm1r=JwN3O+gQ%eP{CJmIipAMHJs91s=(1LDe@C-nP95 z|1J07>lxOtY9djo;tSf>IiAnd?S#c`@pvF`AiXthh5BosXhBXa7W*iop4$U@^R0_y zbX+DcOF1p%X~pB8^djydd#`-Qo#bRL%iYdhp(7Kmu_a_Kh2)PD`;O4TJ({aw*6E&b z+gJr>`aTn;#i~HYk(YFO&t*E%e27wp`caP2Ku!*PPNTA|c%hR4PF$=mD$n+0^}r(D zt1E}X?;zP;)DPyBUIB%P@i=75C7~`P7$0AMEM&$fpk1$2ELa_b(f5yveYcOm(p(kn zWx7PhSma|vK`!?wua@OiS{QWsxF{@6z^}b^d8*?~-m&Nd*%;ms&AU~Jz0-5}R9kNj z{;I+^QiHIlVIl2u*i9v7Ic(dch&>98IIw&Is^rXvUGL;szmFw*t+xiN0-tI7;<3p$VN%%vkmSd46`lgSZqG?_G@NT9Mu5kQAhZvUAgA(QF#7R* zs;piQb1fqAhG|duygr5>lzOqRb~@r0O@3>iMNMkE>E7^4(E4RcirELHe~KpZiJZ@F zQ<5)`V)HJ^GK)X(yFs08pQz%evm2pmw9IGq8ik&P=jm9mE%&;&Q5xN26CG%d!5-EL zEVXa|JxhB$9&dvxx1P`hS7UzsS|7CwYruNk0RI8dr&P z`~FArHa?JAycT?q{1N{CS58VkIcT`o2jjA~h|v~P@cfaUpqDcQw?07hIdoXCc+-=I zE*Zv0FInR)!w?Q{3S(Efq1Y$t0p$5k$I})6L43j!ntxN~8!3+C&hR$LKW9UJU(+Ur z@@AnS>jTt&(7?|^8=QP&k4_djbn;^iZ`@Oan^z$pZ>$x>g^uXnGKUVY{z)&lZGc;i zE8x+k6xc@#=qM#pby+iASMosrrb5;adqbrQd$5?VfYH0RKwWkf4ColowN3TZ^Qp`q z$qUB3bQ$aQW((|`c#9(RG$f^0yK;ugQ^D+O0m3rY*upVHl;ophf3Ot+{Gpr zzqNaCAN5GQU@`^pilhb5wl=@?Ub0SYOS_~CMf!H|tmn7gTd`CT;@BdQ*o3-P(ZAUns5zmk* zEr!lV1(<$#F1Sl-Ddo%&;m$Ezx?TMddL9~yF`LrGQ&Qwp`%>ZZSxeN{wPyNxO!}|i zBt8g1vU?sGH}HA@+NPIKy8Swk-JD_7$s6$VwTy)`Q^E2c^XkTnyT#`P-SF92bDA`E zB_x~e5RWet?e zs@aoy`xI+jzvZ}~ncocc@e+P@RF7*PXrk9k8LNLk1w-sq>Q0Y!=WDWgyDzUh#&`cq zmu_{#Nu?uk+d*Ya-<&5#{}_S;_3x5oUnuTB9*&w_kCIROaHugh6$&&5@Z6&+;+pRo z{N+vw4bpr-lQ(`8v)lUMy$ze;&LK6z;nzvaaTIjTmB;4oGiY0%Agnm1D98^wLS6qI z6Hb&3$4pLS+m?R(8WlKpwH~j$7eF-n2^Ac4!1!a~G{(kUI>7Hck!?~tJNb|tocEk{vBU#Y!w8+jMjQtb5JyrkZc zHr$&DPY&AQy&`X(dC5q|Yrdg*mFhe>+Y*Q0kEbLfIbO6t3AEqTimth1v92+gH{Ui! zBe#6v<;NJ@{Ulwc6?3`k2s^QHWryH*?UhhhWk$zO%;XfqalFz?ix+6S^6Z-3H1I|V z=oy?952+}lk<)kT=Uy$@&@>2rwpK|E=IQdY>_#}-v0vPgZ^UatgT+5Bs{G}HI=?ku zL>5!CIBeepnQt0K;k%_mP;@e7F6q|*U|DZ`e$8M3eVIf9evZ6!74#}voEpXZLvqW@`!VuEOQw7a1XR;1%bn)kM&LbQA zEGfCtANyAZ^W;^(!G1t785xbop^|^}>C$Yt6s?R;>K~*cTFm)pa8AO1`lEDqE^3t9ehV&UZF(0!aTPk}MC^2-zk zD}T<48^>6c!Hrq#>6A~N>wWiPiE8N>e6q7ZOxdf0WvOA9cl$CN^s~VS%3Gn^r_a#a zrvNUWdrKo#rc=}HVBtzyAoNxTlKYtmfnm?YQ-cJmm2q~jSEbObe@Zx6B4OJBUU0>H z5}O~df{6Aa*0)mUXYrNNy)XM>`ifEr8*k09Tn{V%Ir5H!x5aIhS#^i<^ts&lrm$hO zE#JTXhW?9R1P!|jgfYps6rT!0spJq8>IWm(?V|5)32jEpRui zpHK-0i;4}jRk6+=!r+XXYtd6%bX7mW$e6oXj zDwf07)AOiYQh+Z~zQWww4`{3DJxC48;ER8|bDzSIINZt_>(7Mn;YX8MtF0TV%9tb* z*$s!m-UOJPDd97nQKY~11t}l>1*!*hp~aE~*JyJb+NvcUw^qNS~!mX^5BX=rPG+DSuOyEL@??(gqEc-H-X z&ih=~^}14XxN6oO+0pyG;OzK3QjPdQD*JAWP3HFh;Hfv(ef2Zl&)u+@j zOq*G~E58^N#FHieEB#T$$2O1YbIDzrQ5%Z-Z|{jSdd?@$^JZ)&%_=SnL(s5(r10@# zXTBG!fu}Yq^CO?Z{4f#teRe-Ik9sUzcSBrX9mpmw0vgT96mDDzA+O0w^vR)`e14_! zq<+(R_w#V{nKPVkWrT43$LmP;1K@7#eTIbM58$R^vjDDfMK%zFP3@PC90u zAI;i5UWtoOxZ~ucu6+1I9!Ip?ryRY$Jj*%{r=86}pH?X|(kkF$Ib$58k&R)Jr+0hz zAgplvOEm>uaM_7e)cJ4~&Q0lzZ`@|X$2M(FS*9g4+Dkj09ciqgHvmg#4CC1gdvW&S z-Q?fvF3AO-b?YBcgcX(A=u$kDh4a#^r4xpK2kT3Ff$1=AP7?3yTMKR7YJ?ika;UOX z$58VLJmIh}Zc@y^O&$$!+<6o?v`wemdb{d$+K0357;V0H$PADEnnkMLn&?ZfXk2c& z?nGOq25;Np$|conVaN_(edBocOE8el@PAF`?Xy{{JP!BWmokr+quFcoNBD5s9W;vk zvCQ}mcql7j?+HHG*2fXGd}5*bvj>fs>4$eD59jVPHf(1-4dw1@Q@=SsX!pWkT9{oY zGf+0?_C24$r7@V@_NmqvoXWwk@+PdRVg*#y6ZFQ^$UeERuW$Bng?hgUr2lRmxoV_H zXQ>z5j59`<|Q?HWS)cmeKvyT)#Z!jl`*tr*Aey^Ri#kEfrHR{PzX4nqG*`O;2H& zdmX&%l+KzPSJQRfYp|Gm;!G8FvG9W*tN(ZpGdC$7SljEd$fC7Lk3dja8dmcI@sPJWQuu`zCD`5taYeJNnUXJRz(_y{`BFG7Mfi+ zMdFuU_;$4xeIyVJoRs)c6yP0oH$0@H2K#SEaofvwae}lfSyvLv(;j!@>FNih$Z98C zq--KQ{_e>^0h*LFTNITGJ5l6_@51Wxp6Hf57JbD+u^TS&s@WUN{ z{&vv`uO@ZHSf@**W?)2b!uN~u!a90VY5o$4zXj|ssKV~jBCP81iF4dDZs&iE|O3?huD!?36yLSTq1X}q`r zd#zN+a(My2aF^!S15RAAZZ+K=_8zL&YC+-*fpQK{MmEdiAI5^@(evb^ZiV-T_1ekv!pRZ_@}QM|D-7N?p{qMYk<>6?8Z zJNUc?53gMDn4KEl`d2}5<6ThGM$#-b5>U2p5?(FNq6S}mo~si^cMX(yQqLZgT;V6W zJee%*To^VUeh7Qx@_EwlNL0`D%doc&xOvUAn-t|6`Z#uI_iV(Z_8l4Wg3s*}=qIP=%{#vTYO@ob5 z?Pw&Qxb%)L4Rq$G=R3>x+!-e2fqb#^lK#{Xm`5h}o{7H={-e14SE0f6sG#d-g0oYH z(!d`oSXR*rYFe7S%f*LPY9@fqgg8vF8Ns7;y|E&3lo&WR3CCOJp>}l}#cXrq9g$r* z_Hrih3SYM9p!pI*4(KiAzvP23 z(mw*PMipSi&?IcStVPBLmHFRQB~;uJg^CM;`OMn_K5eiLY<4t5Qo9_s_8Lf5VuWy3 z)*zjSg7~J>OKFB$1J6na3IE>6L%!h;7-w2nKkK55yXd*{Jc-#;;d+*A%5_oug+He# z^+GOEg84?)!pf(iw5(qamvjWrEn_wvh3>uQ%z>ryI1wYqL1!**kcm z*AwIoPr;A!f!uA#EJ*+TMBLO=z|jY-x!bM-!Ytj9_+@q^hqh*O_~X4Wy?;OGlkNnU zWmClwTGDL4{08*-l7wUSZii#@&Wm@ihJnV1XvUI{@Gi6tMmh(h_r7tQ_71syMLlG! zSV@Nmxxt!#&xJm-2XpkVB3fGVhbA@+q(=2IZ2Z-q)jeL(pzVr+yGAw}+4kiJ?~3vG zz(5?;dIUzNzog2K9dI`N9PC@z8zXKXrJ%!WDND_s#haE~_Vy>}Ni*HmrICEZbC-DB z{wP%Zo`PXL+*mtlDGVAi8)mdAgPi6g=+`J|``vW${euONU*#iov9Y5a(r+gEZC}bW z=)?wsG-R1QB8z!(eA~hdH|yErfXLb4A9t7n3Kx(pJ_?sL2jI((@;JHGj`v>*#=Q$2 zB;DDDy?4pskwhJ7CfY|wnw0SFyAWY_RWWUGm@S?uljDnNmi#VCfhLyd@u!dFLRypL zc{CpgF2@!4M^~iv+K0%=brM(W7eI9PI*EaBmTqow#ZJ#nakQJXFJ7gOfg57*sdXh8 z_-vG&tDQ*>l}dOxNQGB5e23T1FTo1gL^LXxifec51C34U&|dc%YKMEm^0(67cV-$o z>@g+N zHjkIySJDYs2M0mlsVfPd>CD=Rm|74{womkVu)=z=_)R^Wc=;IG%Wja8(qwMg6T&yE zCUVpCJXXD9%0_!CC~Lh1=SkUAb?ahwxF&GU06RR;>VcO=o}>NoE;Q(~GTysx#iH73 z7!3(Pn}^0Ag%8jD#bg~Gi5j?<};SHVt!D6r@` z^wqx(hve1Rw9Xx?ThwTM*>~#t+#Ws{I+D(wOwN>a&=w~%add92xY+HxP#A8Fzuri* zfq0pcpOy;WWFQn?(MOvW2fpG~Uw=#r9~bo)j9D!v@Y8S@oYzz4$MOj{zrP)prbS`l z5lg=4KM+%>Crw#!N~YDjANQ5EXft|Wfl1N99B?67?6}Yu?@o8)#3migbk)I@1b3eH zb_2x6gd^Q~1ZTJtKdHn0V>c8N z$~~rP;7OHH==fF-oT4WPsTSS%r*b~)*Y6UQ7k-D&rj3xYJ_nATTSl%*XW;0j`J&8v zBv1AnfcFn1v5D1J=oTlyu*0cnoa+M-$0Io3ONBqIjh6iJ{jgWfankDAg?DVo6{GL< zs-LrND)f2S1^xLTO*}67OyUI`VSj*}98bgc37e_+wQbPN`6t*2=_p*EfL&0tJ}r3( z-0~Yn51Us>nQ2Ksf0Hiu7&?e^7H@-&yI#0^*G+g3R1fj${a|;lw2$1GjVtA@3oU>4 zipL%)5%;_FQ<_qj?nuDepbA?|P6ZnYIbShk1 zP5Ci};QpUG-c{A+PX`}U>FG2qPZeQH@0miS>q()~$rIhpHo&>UZ^8%be%xhVhES_9 z6~9{Q@qva;te|MbE2O>sMoUj@UapHT)du0q$pJKLfl|GJ!zb7oG=X=_%OE4?ztnAm zJKo+WF=kqNp}o2uXRWNGIflXb`uy?wH*<8c#Um4&ybQ6^!C)M}GZ9UUZTPfiIv-gj zah|@r;{7EEMpvxy@AejYuN6Si%_C@S#2|bal87lc98q6oJk=O16q+{{qu1|R=)1rf zS8jO)59O!wi<@!cTFkFk|BZO+Vm@rRJVs3HvWAlP45oOwU3I1#GI2zqCcZ1MBkS-B z^at0%yK)OIJ<*#VN?O;Fb7yI@oh6o;ns6h9^Bq$IeCPWg7|LCN3rYTLbEOdEHr%Ix zKjt{)XC?I3u;AU+>Ud764;;QYgjYV0@|a)R!DON2mENC)|9$R-GxASL*+>)4%k2=x zR-Kb{FfUHA4CVa!ONH&`m%zoUl#H9T@p`f!)CPx1>@bl$v_mj6Jq@QPOlJF^#gx48 zk?4BNg!Q~Kuw{gd$7R)tA8JhSU9dG?O#TO?>1Y!+P^n+Ncw&Z9EKS;Hw)wcX5mWvGV1fXSq!Op zNNP?F=;J+?cAhL1H4J+5T?Z+jm@)|;6fXsfm;o@hrVLKRjZfQKLm%FK77bt6NmYZz|LpxfoO7!tc*N(nlPJAo=SxQx7T}| zPHciHGg7EDQWGuqPvm=_d$9QvebHd&d{|VWgYYgN^3NV2pTyme^ZUCP(YpwHNHS}{ zKmhn_&WqoM(Ah(MaQn$<9Gm!z9Nszc(F-=*u*`!O4Lbm@msnzC-Z82+muEkpW8i9S zN6BLXQ7POIPo8zaD4#G+`mKzH?|+L0tL?CS?-5e9xCJUl&OqM~FF2%jm-xg>aeMHp zy1cWO>8`On{T}3sKfER8rjrJpKOeEig4eQ8S z;mgxbXyL8N2S&trXw_d6%{@$DcIqkEazbm=6jsmvh#k+!t-?kRDq$_nzg$wJS& zF7*n@=~#Nvl{MHNUq$r8et$Fg-CA2IHxw>tj*iFoqwWc-=Etz&`~a-08Hf(DiJWsL znmeSs|J{5UZhagEjuRN*hYc!4{e`k}AKW0ZR|;DIpTFse3F+zV812sCGh66iX|uQ> zdJ;QoZWa66{-)R`Ut!S+d#-;q7=P(IwkqjHc&7_hkewdp;ERI~^gR zlO3nex(w@1e}ayjA$+>!9dxzz#qvz1bDQ&c^ z*akzNwoswkaj2azlT2=fTB=~*58*17&l6mFaIupYpEF7hb zgD;H2o?gi)SNx0;S?R+PE)e5Zzr@B}|EX3|a%npu%DcUQ($f z7$i+)zo`f7f(~|w>OZH8A0=Oq!99JP*>aU`Isc@_uT^mWUO3*kvy#SduBVbA))cx$ zkC#0gfGZ!=())>bY4q{#=o|J0EGKBDu}gaK^}9yn4_Nj~`H$^?Qw2`AL^AtqA2N^B=;xBlDqg@i$26IfM(Y zcnLf+m4|9n!|@M@ZuaB&T$&GWN=zl)AMzOZ)RNB(RpDS41u8qFR$sX4Hk_Vl#Q%nQ z;Sy=Snh`RZ2Q9Y5ZXd@+**kY7RKu)Mw|mW5Of@N5V!& zVfpE4JUdn5wk2#NgOVIBma>H_og-O!ZBH&!59jyG-qNR=k`E-HTG&u@jTWb+;^BP? z92BIAzA-y!&+m<5fo46yka@Ii(_6SxQX|en&*x9Lhmq0w;0Et zUpnIN!tbD^7D4!Q8f+yZ4jZ(EBjBMvX1MhH#LDF3&W6 zDO9vRpt~b1>le{B(9kIae!5Xmwws8B^X%yTp8Yhm?U~pkF^lxZ=knjzOjYZxd9RTm zF3sCR35Sa?^4>sx*43Yn^mawtPwM>3I26B*xGlEI4MQ{43fY2KM;^a$7E~9DlsKgj zpWhlOHZM%$3e_oG=wC%fB}LHizLx&VkHg0Geh{+18+zq$7H*s~#cqfDvEeBVKJ3{_ z5mKIMR6vDfHlK{Wwa-04bP{Gl>rnr*M-|dXKz8@)bdah7e_7Pq*X z4T48Xdr9++4eyPdNMB<$`FZC!NSiT^Kje)>b0-rH+tr0_?#RGWJ5b!&tsbb8ws=}i zkwO%MF>aM5uAE?veM-Wyd5R)mObUmLake;n#W*&5J4Sk5r{e0qHqi9Gh=*HQvv<{g z2sKb(tuy;zv~?kt&g>!?hqYl@aR}x#`apb9jqvDcX9)GZ2^C8WFhi*aEqZLur@ttI zEJ@&@=^B__*qx14ba?EhS#WT>A)gkssn6;>+S_~z4)8JA>bQQ~!*>+EmHh|fr03LU zb`B48MRZVmKzYMD@rvL$wB9!gdKK!U+1R&q|GBHs-Qg;=9^FS;s|xVSHZMxtW5enX ziuv(xJ#IPHjh!XGQh@##%srC^U$>}2<6kw@e$qyRhcguasiH*A#o;U4Dd=t{cRHEI zoA3EzKzj~Wt;@j%>%mwyc@}-nAy{zQ5ja?CKVsv;<^S-41Z3`Dva^p zdz7?*n) z<{b6nCn^Cn(ZmsNzAnOkEu)3`Nqa!szm~47Is;ccQa~xEFFsGwrGqm}abe4RSf5=3 zUyn*G48{~VRCtS9rmKR`G#oI78dD)btVNfm3dxiM@aqJAMi zJ3d0==ci zzpsNr?*q`99w?6eor2w$2C#W#4wUU2Ej}vi!mCw2gZu?gyt}^_R;i@%t6Pe&O8y;` z+zi2W0UId)oCP~yStj08kHff>iFj`MDExLVM@;K^P1Fy6Kt7q5>F~C6EPOD6_K)5| zpM4Zrts|FBmmA>iYG<@uJy+teHHtmB2mZI=7dh;k&cf(_;@;(EeBW9VW+f`&EAOtT z_+tRd2G1mmX+gYNjK}XkRfV03%Y+B&nHZC_rEcz2b)GRl1)^u%q0e4#pwIb_KrLTs zeD!#kEZzN%Wp%*PC-o$3*aux|oVd7c5p`RXj=lRY5j)9U1l_Y=gi#Heywh9>yZ@d= zGD*97y7(uVZ1dxu&!eTxnjLgmFhJsLt)X=j2Xbli0~)FJyS&{j;I;ujJojQqjC!e9&o=y<==O84uofqx*II zQol1$RFR&sMDqufkySyyMH|U|T$`}{=S6W@!9jSkXgtU5&cUG2F|vJlowhI2=DQK8 zcyZbl(NgyV>HV|em<#vF(b=2-JbEa;wLDAbhOU8As~kxC<|Fa*>&Kv`><^G_f;*0n zWaEDq;C;wyIPx(Pd%A`&G@J3*;YWm?_jM(vPZtc?^ha#E7?1UbZ&LdWL!uNZHEm^c5+80jUlXm83qZy7$tj6y5ginSVxNuPrpPc5w zn|Dv;c)^2rbXVd=KVR~r-7uw>NLjs=dBn6mQ0F63vY5lC4kCJQ+)Y|b1L@S3>+s`w zgKR`Z5^pTe=N5}YVns(32TL<`%9y2Okf+F#)L&BDAU!s}oy9T}d!g&pYz&chAmxh# z_&`cnUH+?4*qG*mC94Jg8+H?7V?L6$su%yr*Of)Z&Lr<~cg3&r5(_ur4*4I7!}iDF zT#~f{%Esq&mE<{#d+$e;@d+^hw=Wt?epLl8Mcn?qQgjOIpy~w`)NNH0WL;)B&?kke z!n$L{z9Vq>)g{nqx&p(0{veZ{58?9{iK)|TAfy~|2G!3_9F?1nO7;fmDDgz^ZpufK z_We+#Y=)VQsc_O}n#7PO6~;?>{)@Zbllz7+v|Q7ZYi9ID#kbRW#e7HZQ)5qeBPR(8 z#u;$=`BvekUnFmtR?KtC0`c;PB6d;`_;PM0Hw+8n12GJZ|L%ho^#@aXC9wMfc&Ohs z>Ukv%T?*njbIf4=nXSrKe($3xd)>qu+n&7YzhmOp-TTF_=e%ja4mT{6{NPvOBSlXQ zbLvvvA3MxWP^s4{QQhJk{P7wA&r_#h^K?%PL@kO_dIf2poMG+unS$CX4}AY{JQsRI z?{rsPRC2uz3}()Srp_x7*CX1^Wc<2!UL6& zJZYuGB`b9^e?tXOEik$@&iQNv#6@ z_rwcdFEqhvDqBSV+Qkx!;VI2HPz~D-%3+|vJ}|B|#IOky@a3o^zB0iEm9M07?k@!% z6O+w08E*8#{WMt}cjaiWFk0C=nR|8{Bc{mfadEE1D|fTz&3=(E@!3>d?EZynKGDq*Z1M z10#_USOK~Z527N?5_ono4VCUr#Ivc27!OgfDJ&L}PDJC2 zsNMAYzn?VT{~?^}rpGJiZlfKa^7y;mOZar`3+<5229Md2r?+cA{yS?R*5A=!lfCLV zWr0Mpx=;F`9-V5{;GtO04sI)7X0z@MWnurhAvn|J4UBge*ue2F6{;)n=R+)>T{{Z4<-2qLeS@)6 zX(A@&4&b|TWBytd7-1^01g2&P2DfGWWxe!zRtG~%^Fw%YqJd_54WaQ8 zGq7^dQkru0DtSphpv>t$SkPI@{Nf78uRAGd>e%DyD;D_p?Fb(5wE-SH*W~|pcEGjA zxj4*Jo$ub>OBW-D^RMtRxbjRKe=q+$1n!12Zz3@^G!B-(?nVh$ z`tyrkIjHb>EBrg(89(gv#M|qN!MZ~mGAq-tuZldj^cjjb{~17KxjL5=JfL2;`(owO zSSmAoOd+`$I6Zz5I4n`&+TTSI&$TzcYiOl!Z{0BK`^UOkr(n*1oP>H4ba9iDK7QFL z$9pFGakk%bv7mM#WOmo3O-B!aLZk~{)to{`L6b4%^ANiEOAVhw2An_O%|k1{(T?5= z;jdSP@J($q1n;~h9J-T;b3FFfzm~S7c{;&-v}_8leiu=nE}H>%a}J8HKPK|*Q3PtM zmy6MAOX*g?FYqj0Ps$TQc*}hOw9Re^FG42Zn(JxUMi#N$?`cPC$_(H$U z)KKoL9t!V{P+fo_=MGOt#UB5`$_f{;@##shQ|iH4tJcsZiSL+}&;$+bZ-j1UJ-E@u z7!_-!{Au6S(0;f(-bwr*gw`9e(a94eyK$WQtV=~}+W|Kx+T#bkM=<$QtC%uK z9a9EOyWyoKT z8==pU8W?!1kiMM&{BKQXZj83aR*g!J!Y_f4xX>Brn`Gdk`y=s9)&Zytm^z zhg%1|*;(?JnVOst{sma}S$!obBpYJgnh?S5TsXa|cEEN^FTSGxsm`WP z18Gc^xIJ3~S$ozoA*pB}rUeb=h}VYLclHO$z9|RGjt=8r>&LLT{5<8}RR-;C+E{FA zg#mwE#cg{EVNq=$M+`L)3U?M@rN4*G`CeD3Gvvhxw(fy~i_a!GL!zjKFz%9=xWnHd^%P4|YZZ-rD#X>LtBxYK9qC^_z$f?n&7&tq54*wFAoXkeg2hUwLSgqvAJ`)o8Sg%;r~(dNkmOA6_w!r8#XSbaeK3 zd{})LF8ZrdMR^)UnQRr-uh=PsiN5%o+~I~g;)=8q`ge6b+=?2^vH4#?r&OAS`o9;w zRXU;WyK(h%?~f4WCbfc2a3sb%PvtkE{|P5^Lb!{OCSEUe$DIv5Fg(JJ=B%9!r))|f zWSYc6k9Y5?+ zdCCvo}FJe?A83nD=2fJUp336P_8dd+s1sI$y-G?XGA%x)E-6+bz6U_eGp_&IK=C zGDc5pQ>5}vG(pm18#i5t4XS2Xx|GGGcFU=&s}i5e@5HOMP0)ISGN)8HQ}l!HP}Aa# z=NGD?=lF8ErIEOCjxuiZy@jS-Yf{ydGxx$_-gogVgAfuPSp&= z;1~ZqRDLIkBd@(6#{sGkoH0k}(AMM&rD}X&l^J$Z@#K;174_?mAEdfo3i$T-R!X!O zh3e@|R1};>&9ceZX^}d*toQ@NYlia-O%>cXI}XU-oHH9G4&>*-{5B>VSB$oRQQ5$1fyx zYa4ucRRJ@jJV5*G8!1nCoQCcTW-f8SdA)DZMC-*+@i7{Xd!^DVizIM8rpd2(D@BCu z62JIGqsi(nXdh^XLpKO~vF4wwecDv~wrabuYGNf7_EM>o^^}zk zz^NZr3oU-SxXtVrbXw)fXI#2Lb(y5w98ko#F)|FyIw7Q8&q1f~$++&xb8!aM&}*>5 zodK;NKe`KAEB&Ci7uD#d$w-dFj=R*rgb_SVDWwz{%5O*4Vo3CvS1Kg zNS7Fd_7}v`$k#OJ=3H{_lZ5+@9|Wa9L%w{VgZ8?gA-`>-Ft&CDm3|tDGlL7bQ&&%`6X%BVi(4Wy_Q@N7e8?D23so+5>c~UdpqEKs zS1$%9sar_iZ-}J#92DNgb;hG+I_$dmA;?!t|J%lrw*FEN_r`pLUVmihvv?Y&MR>#G z8UVtZGo_Bun|^_GGaH0i)j8Z1;(3LbLY0fTyFb@o)t0~XFUST(E=Ju*@Rae zBE=j~g~}6tJfgqW|MPWJdxFHV^5;(N>*}WGNbEN0^Hq)60Npa5LW^A#oE}{VpKbu$ z_msizW2QW%&6*cA`LOSkwP0#1b?U5A!&=OzA-hk)vZZNI<~*LdnQfr&5f)H+&ku)Y zXyS^Iw&>NlL1_J#3*)Ri(UdL*I8f;sJuMu9H=B}q__q!9wuvgd&T1De@VE)3Sq&hk zAIJ;UGWl`eeI)xRv7oy?qdsa9dv;$XI2Nj4f9X#6{cbn$x9tkJ@M9Uo{_BK?Gd1wf zvYS+x-v-6szSkRD7QrD{M4zVwVfXG^VSkmGpmQby-$!hO*Ovxk&wG*&cb+y|CwJzh zeF%QUyr<#*1I2THztEZ3@1zu`#{CjWnDgl-Jhh%q>Y7J{7Xvmy@gg4{x8Dt?$=?HS z=|i^Z_JJBMg>#RUv8*d)2VRM8n5h0lL}^bO{x1@>@0ZJts!hfK!!`&DPQaRP+aTEV zFu9*@phq(fxj!Cz5Nx++#I&A_vmK>}TGpu?2 zR)5}{R{^7p5hp#o49iod;TJ_2u4sKtQ?p%A@m4AAU*gB#+L9z6VK{~#$)lF7B~Yp8 z4#pR+Q0q1o9JDr$mHqb%Zv7MRKPz+Ik+2Sy8OHI^8Y|XLo{U@Mo(S9OoO#$QbC&an zW8JsED0{ame7V(I>O$;-2YqsI;Fdd3eSHK)O-bdL+j3%5SOPUXJym~rvBb{be-j?N z2&_1LAgV;Rk>;8n5LBPd6RtlN^n4X?y_w{F37CLpQs0kfhA$>9iNcYNCOCg-BnD6H z0~=ly@Gu26*c6h^hc`Ns-6Lzda4HMje?@TYhBTDFI*NO5Tt!<(7jvEJVR{)KkCVD1 zYqcBU)>5Jwy|tmdT$Ru6(Bc~x`{7)Y^r~bK)5#0afk3(mqbGI7g>dyZOQPO{K zXK8MFuV79a`aKl~z79ot9f_fmt}@FXjp- zy37ap8F4tT&X{i4=7=WW(r|IFP(I~nNQGxYaN8(JM^&+bEPN|w*Pn*$hU;RO>>VvX zSxvqWhI-c~)77<}TsO84ldC!o%L_u8`)YB9MInxg&=H<~zfKXYB>Nj0E8WRo2!7u~ zc%aNt*syL0ocPcLw>>nu@A%o|_uZKJy&~(++ANN}^%}g?rDs#>7gGr95DXJ_xL2XH zmmE2Q?mipG3QvY`OkN(UOjsp8xTDWi4^6NpU;~W$ZAIQ$T~T$>Skw_dg5g(X{^_(% zsF{(%)Re>F+asw)J4fo*(w4F~_h6ROH$h!pA8);QF5c`u3EjK9@|~>%aNEiQ!nKo= z$Op|K&|i^bVwJfu^CC_2mB%|>d(+A13iw81K!nVADLN+&7ENiEaQMl>`s>|_&_HXk zsNa5Cs4~e#b)}uuN!N&@8We@!Z>mAnpJ~vmoBW7)^#e4P5t1G<(QA?1W(=U;|+ z*kdvdHtPhB^rmAU`P%wgZ!bKYoXd}wZW7K1kAuXP%V7E}o-_6p@Q3@`DgKoy))WTw zJR7P1qKt<%U1>;b>TJMF)tb(%@5>4~ft(7;eC3iCCp;Tozt^R&F#cjOdsYs^{BLsj zA*)C{Y9Q~*x>cbzJ$_R?I*hxEb35YL8KU{HP&*wlOCw|9PAeD)+= z(F}!sX8r32@*XHW>d9HZH2HhzNc`})yndeTQF_+tw6JB#S0N=C*r~5KAF$pGM&smp z%530{a9zsX|AN}$lQ{CF4TawB&#&)KMfv7v=?s4YI`8L0VPXnj4Y0u>|8)mg{Ds=3 zEc?aC{y6yEd>B^Jokzw=Y?_1mVw$-TEnRtDcJ*pGY;d29hBhlcvR|v=w0Q63%*ZBZ@KYI;|*}XbpC9ZvIo|?m4X)$4S02$ zc(248kR{``BOhpKk3gKPGZ=8}!k1 zo))T?OvG;=%}~#2IzHQE#36zX>jpliHO8js9V(ztoYeicq?l8cHbL{>L^APlU}Mc_ ze0ggneQz%wWXcgZvlxpVbV2cEy) zlY2f`K`J^%e7kJ|tGL`2|M~5paow8%w?C#Pb)WkAahqiOj}FF!D0e;+8_i=k>fw^A zLLAX~0X6*I2Gv&;q2p@-h3NbM?ap^SmfkuAS2FK{<=ikRvMA=Vl+*PF5h`50B?4Mr zDN|m}JUDq)*hgT8;Snw zc7p=?KBV1e50QtL61V3nu-8XZ`qF7E`X@MuDU~;AsY@jo3A${sXdV*?;wl263_KA z(AlOa9HRJ;l6OU6T6T9~vRVn9K0KfHRS)B^DKo*q=p4=7H&+nL%nkF3THj2%M-2erbcjO(G$#+~s zWO)*2VCkYjnBkgIyE4a^VFj!;!ZJK9)TK z^$j_EPx3&u<@uoJAvq2(?TSI&<;mdwG#;_2i1(zJ@qkqmFu-yfjXPkC>gpjpGHQb8 z_%4@L)DC3tpGq9Fu#BE9yCR-Rm2!liu7mFFiTJ#s9hz#_h_Tm~!`j~#Tz#Wb;{Occ zteqizJ^w99NL$YD7lf%B9*FO|$)iojX{a9|L*tiT?41RiWY&WMx5dy%2U|XzzXGB% z(^2l=I=Jq4f<`5FqkUC_`JKXcxZrICG3On*#|B6I(I-ay?V65bqQ>#oO}ojn*-`2y zaKtwuAK?2UX+QI7Do#q3?!&L=f_na8I9mM}Y8Q6pF=rd##>ugGO)drpwMkvDb5qgM z+MU`K#!^aStw=BL2sis#Q%ce@N?UP|8YO;&<-ZQf{BVJOJ>3k4O>a;~YY=YnFQ=2Y zocX=f5j9}PINn|VQe3?}7v(R^736EJNhxDAlxQUL?@PrPsWj33Qp|b!aytsyB^jqo z7{LX*W_bb`+mGcr8wN;N}dP&?mEo~0HX9-=RJW#kXlWea!u+xq@>T-9! z*tmB!lukGaA5KikUeI_VXtUg}-n<#sugTL++FY#{3YR)tdqx_qc>6p!f>hapep z&~Mx#id|?3WuuS7xcXFB0$q@&Z>7bR`M50Gjm)?ooNOG586z~ME|k&etgxSEl)2)~ zu|tKlj*oCEArp900Q6vQ*z`@2Qya_Ra-av7OTP6fabakGCYDVqZ1KR)Cg{^BF^%Hn z(eZf(_F7>C8Vxmc*|!jiw#M=K*GpykQcr^VOD&=7QY6pb>WwjRR=oLh9uK?r3taw5 z9hav(aM80K{O&<6J~0?7YD!ES=L-d#y~T;2`&w%dqP*y*v#bH2R3BKUdlrzwoNBDs^So4^CY6 z;sk_xICAgcd>*u>3a+djj!C-KxJf#zLkb5=*=KJ^-Z_bDEAsGt z;YrxD>LrP%e$b_0IdDrm2g+0H;HBYPikX@YEnAPk?Q(T|UweoSyj%&TkL-E%+Qszh z>lF$b@Rs)WnSv`DBhg`tC4aY2#xK{cVQHa(I7I4M{~l0FmeGaS>S2RrWlq>hZUqHA zkXXzE%4vL`9++m_F51hVB#)N_bGA*ztHVNYN8di|<2;d1=adP);|c}Udf>fNGax_h z7`!~$D&E>3fn!h4q->%VM-3%?tNjJlhyK2NPwmabbaXWS?>4V-SzQPw3I|^SfecyMZ zS)sElFMSw<;=UE4`jK%E6xsuidA}83q(soKFk2knCJ!&G!|;}27-_nj;JHp!G@xoX zT`LI0zKMv7=62?aVYw7h+fM7B&4%~+PwEHyTVv%rKh#^$ivt%g6$|&J;g27-c=d>j z;C|HxLnN>LhPSTxDNzj#EH;2&zsd%O)q<>*6>A@k1M{qM2 zl8oJX@Tc|E{NOS5=u*VtwlAn-y~KUnV?{%vrc!8d0L!HO_=Va!QdJ$tXB-v5vhW2Z zZybn9FE5JwFDJs*zqYto7LEP9rCw%}3hFiO08Iq z9{0wN*^9_(%^z`E#|h#7sb2ibESvKjhcoq&ycs)$Qm&o{SH-$Vq`cLsI8-h zk$+OpQq|Qt^tld479Jf27vX>((W~$?Z@haTjw3tGtIYM(z z9X*ipPabsz=y&g=keca-l`C68F5eBm#+JZT3q>9klfeqx`ofo5Eo}KRpH`juCt3~b zjw?I4(D0Et9QNQ31uT_^&F;?F>4^##4RK{`llqyqh4H89$r$;418DW{BK3l%@rURB zoW5c_?R56w+w;!Cs6~Ip`E#}T7kO|>`vWo3wm)}1Z;b9O4&g~qM|9b%k=k2FE)GRt2)S@Z+@w7w!pKzzwNHOHx7@jjcoPr!& zaiV4t|GpU`WolfZ?O6aW*PIVQ4GnNjJ(60Bbg=NMFDY9}XZcf4JZ!XB>=^%$I@MU_c5Mh*vTO)yP8k=D7q0eL= z7>j>19+272$oi*b+PR9^K@9v=>nIOuT_I8|plKP{YuTl!4EVNbNUpZg8+x!@{3X|9-guZ)7 zvS1_pb|?=FGW=0Lpb>U=J^?XEEMl!N8BFTL3 z_rLzAOG8$$Ef$3SzY4+YQV;tV7{Fg&26R~Q zBpXp!%Dio3h*t}ydfDwP81qSGOEh@88=~6&Vo19Fj(ePW55vYp(SW5NSZ$=Ws3&+D ztuv6O3u@!>VMQc+;&GV2`(!`U+jSfF98#v&qenrKu&`-!QHQRHLU(J{5H5D_Fecyc zLq6k^nRH4m8>&==>$*L`*dtz4dbgJS-WCF|D~A+CTEg^tHKsf`t~ip446TpVOc_b;2V;Itr;z44I=!wK44 zs|@iGVr&CTcvRWUtge-Fiy!*(o}Ix0he^o03f`UG)P?N3f+Gw_&w-6ulgKb-3}goz z(8r@U#7Ud=z;EX)nlr$F*klF#l0BRc7AKNYk&1Aa(GbaRXlADij)WxKX^Y(NI~lR42U=`>kraK{at6P=pUVDw zAI?^p)N$1Z2GgQVLZGKf1{!`k(h*%fNbr(^v^HfpxQ#*W-EnZ?d=kZbuEVjj#?upx z>rB63C!4)Y#QXX<(Yx_i*v%ki8o%%{s|YKCXa8MgTGr`sRbcwAbdTc(8r&0jeNzQb z;9-_qzZ}o{CE=1U_b~gF4g|00czciAU~LfyZe+Z>v8I8)$u*^~K6S#r5=P6=hRu{PM4%7uMKkD?ryZj6WF$0rm2BA1pXm1C}& z1njVIVWG*>Nbl2snDA4Ms;67h=WR{w;9F~`vHXYizRTFuB5A6~$)hv>fF1n1TKw|J zT;!*ig4tlfi6CzQC$qvRab5|qFD2wt=T7VY@OWAJG9L090x5aJUxfQv96h}tKdaSs;L2md5GvgtM+ zs(gph;~eO$OshCx;zYVOpjI?ax)^ppOQe~O+xR<6_n`lk%j}?P9MSq{`flw>2EBD? z5^RAwU2;xb3OUj&sGEBWrSDX-dy%iO_|Qshes_XJZHQq9+a=*)R6cc%uVA&O9&>wb zCa^dmTUYc^lWukR3M~G6?A+{n(MFlk%)@LjEa&%QlzKM1U-yS=JD33req7+gI%d*< zV0E&K9!ZYhtC?te32ptMN3)|+VB_f(&oh?oC{lrRp~tj7eKI$2tSM)<-GY)Y2(FKS zIe7JDAj~SgfIdB6*oO@QUqRrxOF7Jdy#3=r%47`|h!W^c>KY+WJq-rfn9+=rv*Fps zJJ{nR!Y8$@_~u#_bA6i)0Z(6Z5fkL8O5_EukAx1yzA13%xGP*&|BN@coaP#sQT4@1)cC## zzdYT^8U0*=y^2#n%tjiJ2Rj(`@U;nI$7RFAullLmhh{C8Qba_vNc z8UBqW8E@hjcRfH;!5MjLRXd(*ZNrl$sr=s^QIxK8igR3^OY3tMa^tlXsO^Rt9RHOH zM)sY^_G@r6f|jxM8zz#$vKjE9xr4j=(H3HpU7+NXGVPgl3@^q0Vkr2Psy?`a)Lg;k z=JS)^^)->2qw4Xk#|d7O<&x;C6y4i82^LF7iQN3#7{7S{dPo{DlRi1R)+|SpkC@<7 z_gDB+=`f1Qn%GQ<&A2Tnm~~hj!07SYSf|Gz(6{}eY1J- zu9RQ6|9U8MDY%L+`d_nP|2l3b|DAniWjJAw8qs8)mA=)3hZS=0RLCf_pR|FxeQ7NZ zf`9Rs4t)`~ywjr#;Sc$=PHD)#Ew~lbA}IFPeZ2TM9O{3VQK?ET|8JZb+qp9sGjd#+ zm%x>1YE{7cX>VCd@*6bkKFEAaHJLIe%?9X1L2z~-v#3Z0J0YKb(0e8O)B1!-w^)*P z^9huSl&8eGi~0PhNZ8u>jEzoQiwpIS;Evinh?X*;w+S-PFFyf9#yxCJ<9Y7Lt!~_Y zx1YbHFq#xTd(e>C*_3Wk$Ld#P!0DUQD7d_by^}V@jqy^nGp<}b<8(i(-BF2Wb_p(p zgnx2ij&mGfyco@>dw?4nd-q%vF9lJczY64*eS3E1LVkM+60(0 zWIA1LNQS*CxwLj$q-dnMEu67ah1=I(v3*VtS>d-(vTQiUmAQ?kO!{cCY*RR5;8wJFLN^uMt!u`h^B& zs`RMmJdRVTWmXFUq1a5Fd)s1(^MvO+F=r9(m%c2Vt&(8a+-Uf{a~Ql1P@w%yLO1`L zImLV^rZ%5eJoKRrRVtMxt=vp1a z-lmq2Tzmm5(;omM)y#ks&IcVf#E2Xy zy*L~c|5UmcdhWo3&4ZbNbsY2z%m=OW*U-l09Xn{7P64<3aH8KoEGekK#bwrbcTb*-?%+TXRzQeIdD_x-KX6V7(qF?q@&lz?3HE- z8OLE5+fas{cSpk75xxBV4J%pUQU*g8Kj9LCXQ)i-kRUpheb;CPc9e^X3nE_={-#8b0kQda3}lH zPzu|V%zn>PfNzt9445!;J^N3CZQCaTY!g_wX3}_lsyil)%cpZz_xK@h!zgWnqR`Fm zL(h%wBre;+ju~yo#(fLe`nYv?tJ9Vu!Ygr|bRb!NvLL0Xxs)vo^6ew3V6P&D3^W9_pMo1_%XI|3Uh&ict8lKf0uvwT z<|R_)$SPBTI)qHeIjK@y@ZbO{d@un`r#Q%sJBiEedwJ7W@x06rbBMn_7F@+ExrECp z%sR)Rg_~0dOEv#+7LUV8qo@eJ3YjrWU43#sdml@k-Dn&MeVdz+Kqknh%-qhtNg9*> zcnd1j)r8Ll+7RXQge?LgyO@*#57HdSQux2(xnmTqoo6NN$ak@r<1bM1h6Z^Q=#$Qi z`+OCSr(4y+o^I|?+F$ydrQFaE*o}8s!`yHse%vW86Uj#f z&{369G&R7T6x{c5+GkU_vvs~CK`lZ)ZW=XAbfxo4hm&v6KnjyAgqOD4*}|ATtR~l( z7VnXUyF&MA>ElfFI;27Cv%1lEo6sB4i-dCuUD)6IitXKROPfsv4EZf> z!ji!Dpfbst%!O0jP*yECGe^hG2QJuju~wrHShDpiw9Zh3s+8p6V0|HZ`R1~e^ko5&W?ate)iDU9sx1G zhmqIQFiNf&M4HP4c2>bak=M>NSh#UC)w;-W)_+21+9C~#3v#BkLDF>nloD}PapeB& z9e(edK&}1}ptAHZw|YPb=r$Ix!HvV{x_KgGjQ6F#tE6D@3mdNTo)NCtFbnuKITW3* z2@#3+m{!j+c1f*-zgV;rXCG4_rBmZbYFZ&&G#2K1JCrzyv6s0gd*sM}V=bro$^h(Q zr?H(;t5MgZ18Yv%Lr%{Ue*6z7k;UyuDm);@8`HE|GOpzY-UtW%osZb=yg7_U&&F?S zjNx}i0yz)RfT1I1QE!nJZQMSO#!ktgEvF<%Q{6`RUu8+d%p%~$yhHo}HDk)?Z~)6c zF=(jQhjNp4v9JM=t09yPwThhrxF z?8UAZcx{s@J~nN|<&iP)0_xqDZ=A#*cAE`xHQ_K^stZ5=ZseclPo<+vhmzerf5;pg z2J?h_z2@;LqWFC?ME(jR;P2aCEsuJ#ap~Gf#JTHGdz3O3zTM6Cl#XY6#+&2u{6L89 zvZsWw67btqhTn5FF}mjw_BU?g*GG4=@)#wM95Ng?=NE}>*WY!YxA`-M@7>Ha_yD%Z zyo93GH*-${yF|f%qe049o1Q;bgZNELF?h2ry+4!(XD^M1vnGGL%#Fw2mv9=ldly*S{Ud)~fQRK{Se!7Hpznz3RU@;`@@C2p0b1eOi0}OET zg?1HZO3!$KSD%ds^%*E?C3!Y?cwxzeF}*m|6^6#l<1u9VEDOf3G1rb z&pHLZ*z{*dvFEh}{kwUU4>C=JGhgRY)8J%q?$3oEj(IRIRSBvHv=U#fd; z2i7Z(h?B;=U`rKvJkxuYy?9c?JWuatcXt0~$L}|?(v1@Ex89%34i9DKFa|7cEXI`& zCezoUr&;dSAo%d9k@e}0hG*NoVe#cK82`D7?KzmwoG7$nJGZ1Qo{F~20L`oNBoeqPF3uCV?+u|e<^Fkgx>gO&2kFzQtS7ABF&E-8 zm0^m`3+%2P#tyGgfZjVYAayZ^vc`>snswn2^|TXrgv!Hy7YT?JHDat^2K-i@LW%ve z$wSDlPWI5`O7d&4!&MevAGnC|bxGi+VcIfuVIlSYF@qKZ5w`Qi;J@h;|GvnSPF@~C zF#!v(Z`Kp{mWElRFz}zad2JP*G0Ns2gxK&+@4sQKZw#F6n~b03DjAKM1{d3gQE{^b z(FK1PmG=x=9n#o(r7WRCn@F}J`k29M6*{99$2odg!P>|(oJIU$)Tqei@;tp^Z82k~ z4ceGQ&;)d{98ZO%t|Zy+1v7R0Xh-8G@nJ0+{`$iqwBIOOa0m(cJC78YJ>Ep%&GfTa zi9nk8rs3>T<`SW5SPILSV@qtv zBEpnJIrO9?_d+CfM$MCg3aya zm@?Fe!esyB41SAf?7^3uVU9Xm;q*kMzRYC^?%yF{jV#W_ zSjc?q(i$0ebn{PzVrgCA{Q*M%)@ChWAUY2 zIBeog;J47<@KsBIl2&uxUd{x1G79#zzmDQNjU zwxeKei>2Npe$^)rx~5o2f#u%RsmWOXj5Yk3jww)l_J@el&vT}dZt(HG6C_6-!Ox3B zDP+?KzAZ`2Y&UE{T?HX)W$_nR$eDrRF-4MFYlnjzwQ==@pUn7(JbYNW2!p$3;bCJ* zwtkvC81@B0-)}#hdB+*{zA*-^(A_8-KY~AK)6cxh-kh9YypDI&u;gdiU*MXz|6#As z2)h}TrOe{gb{u(3okR(1v3hhUEh|jtcdW0&yQ8;b^UQB-kL(*9_I?uzr6_iBoC|HW z^kA5U`1$lS*!8spPCZ7Je#{6a2kziU^=Y$~e-e<;FpkWk-r}y>U>ZAVIxXtEix5;m zE^?u4e%*%WU zD!Q#@O8>WSQ)!|f03xPHx&GrX;XfR0S4NQprPN)g!%Uh zZmg`}{P+_~J(u*^&X10i{q!iWSaOLKTTKURiHW2iZjWjTb@(FI82t~}QdV^!EV~m= z8>-Z)B)Xk5ITHvOUzMP4#T-g-b0zJYDpWLYENzgQL%ZtJVAHC6=Gu@%b&{J!Z?i^F zQ`iz#EZomSv*KvU>O0t%=1ezd#*-`*@IjyCvFVjMD969xLv^q7rCD2eOM#80p5Mcl zHvSQ9PV=PcF0t%Rj2x--D`DUxDYE{f3tINynaL(wl$Y^*%Eh9+7kdh z^Zl60l*!O_wvXw(>0~2qqv5B?a0tse#ZA0Z1W$&IqLo8JAWZly>h4G*o3ANo7S)9f z;!K)%HWl3Bv~a>GH^B+-4`2U<&>XGNaJPV9^ydILq;msPJ?rFL^$N1|LeND@TnH+daWhF&jRwDr3v+``N}$7g%>n znA^e>i0j|MQWd6d??i!_Juw5|N%Rl(5o*#_PmRD&Un;C?#kcbXw}CRv)F@^{E>36ny`<>U*2P@Zo>9ytT^-!_PKWg; zj`M#XY4KSO<-A(63T-?Y&B|3=pl|73v~Sm-L+9@BVzb$Bgo|P~6L&M!03-O*UBY=U z@@J+xO59sLdHilyEz$_rgsCs?;kJqOY~$2)iZ$DZ=5gPIv*}P&iLzkY-%qpdzD?}Z zB7ymv>;RqL{UAb0hd1Aq1v90MSgn67Y0irfc?fx|SzkKEQR-=p^)h-cJ&kh!1!!VgSMS zI%ZfE0F!Se8|E;p{jRj-z$QLbu5DF$BQyNGYZAL&HA0a$jv#BP^1(rVKuwUrZHtdP!(pFqgd?Zp;?Ha-uq0 zOPbNu#d|mhQ4qewGOwB7)R+e=`Bgl$3?!RD5jb?q7=gz;9w6IK;LvNsm*e)-pZuG1 z+F%RPgGRw6nNswA5=ODv0^fR&C%nI-0PhCQ1Ak+0NQn0bn-#hg^VtDbXI*CXR%&n} zA)E^l`m3WJ%A(suBM8pFi-{{Hv7xeGQ2pmCyrNdd1sL_<=N}unGGP|BDWeM|QI$_D zp3kkX`GiSUlQ_G~dED$*GvMp(DX37P1J)f{RQu8b=ANwM|9+oIrwj#W=;cum|3{lk ziwUD>84GZ1*dRJvU53GX_Tk()gV@j^syH5{Nw=aDyW`bKf0iAXY_+Dr5uQ~3WtM34 z;|w<7O&FX#SIIRF|BI*Zi0I2|D{7s!ooT!jW*i$Taip-n*8O3Jzkpb7xhtb3>14h~ z!Hu2Wj~1Wuq1Ea-f5Z?dFr<`s+CQH?`e4q=oI;?~K7t+%XKZ_UKRZ}p45iBs^TCdp zu+(fUR<86Ut8*vZ2inwdRWCi^h;6QrJ#>Yd!il8#V+xy-E=8{$X2QTL?jXBTiI%e} z(I@3007b=c@@@{4bR>exDr>5^Rz!bx{Ds&b67nK5sO=bF7);~WOKQ-RrmA~uJ zw73Rt{Z&Ug8+?YbWx;G;VmgIcD?`2SCH|HC2zZ$6#KN2Q;FW~QEM~tq*eAL|$r33_ zwVX^9g1ag)q??x-v62Nn@nWM=-ms)GsZ?;PhAlS#i*K7;*yRKTbQ$Bzof9R}gU$oY z^1UeqKT4)cvj#wKqa`SvmZIP0GBi4THuT<-A~3teXGW>A^$}aKPzhiS#e5g|PD1Ht^E;DmDt*R)YpN4V*Z*3V{Dm9xqJ|97Y24&!<4RW9xr$Uoo znz1SiU&@NLhc91EVaYcmFdWp&uAJ8)vroFL%_j|4f&?ASxGf&`Qkzo-d1duSAlv>8cH)lV=J)dc!4nxk)q%2Z!iNEwrJDc7uu zz5l3H@#VqYINHpa=DTR)zAO4*(7oKHxw?|oJ<8$}9yT-e7c=2+ zQz09^IGaZ2HR8{DVJ7@^9(Z~e((u$DeEOF=Om>I~EXrSsdtBVeGop`$9o2`^_`kOkU}O@{0xy`tf8GwTO3R7{-;yCc z@c_2uSs`Wru&03+yxGPN8dRequoI@-yhYf5ht7- z9cbK_Bxt=h9qc}p;QE1{oL24=K1*^Wi4&%}=jfyhUN2+88Sn)k3ttcWY$|lgQ=#v( zB^$I+IEyN6U^9;9In$JL8UofA*WvkvlgYM2j-Hz5!1OPp%alm=J`X6KK0w;m>s#)Z< zLIlcQ!K8NP8~OkAVLHgBhR#F+mFIALc zX8U9LU|~1Z@o5#)>WgO`^GxVg!a+XENRJ|ZO(C7uLXul4obQDF%}HfR@^s^|cCRsw zddf5TUNh#V^pBmpQ%vvOrc+a{gh))^acVaa>?ae2=Hf zyEaf}=0SJ-OnH;MLo9WpkV|yg$j&cHgMh=6=+$T$7Q0D~j=sx)_PB5wc{B~e+9xp_ z*2Awktws|bwDRfQGwI;XRPwLWg}v2L)M|B=&+zu6S+mU{yZ8xfb{+)yOklr_7c%-Q zg!%kFF^*re1GT&iIWvO->h=D?YI;&BV)}ZPV7m$B3eA|)oDFzGp26Yd0&>| zvNF|haJHF7rv%2o%kW#QT2c*kJ%6ywgha?1TEN~77WPMzXEQNl_ z9?a05$el6HqW9M{`MZO3AiZV)CJOumH`!@4pmrEMZg`GkEsl$zZ4N(Ytqkp->PUh+7ECcx47$LQeL#?<>Rpr^$V zCZ%7;eBY$ehyF{f?TgT%9mffyiey+?=?kCEZDeV;HMqkjlQ3CTg-&L9!*fp$dj8ZC zl1d^d@?{;jZc-Y1_Ip0IS+3?*1)IP%mJHKVWKj3`Sv=LQ431N)@sQvc+_L2bJD!nC zipp6~wbBvJlqi7hcug3yGYjbaT$1ObA?vCfJ3Mm+JiD9=a)$CSH@1`YPxPUh36o*L zLt`?}%LL5=Ym|F!fTyyr;C_p7bgpLsUcH(LE;IZnzp@)mO@pz!U*Hm0bc;PdIx|}~ zmiq6^rN$R^d`-Y$_`Nm-t{l?lx@TGAte#M68xsIe^F{-IdJGNJjDX*bx-@d?78KVA zoYZSE;ME}TL(dh$k{UZ)*DP^-LO)`-t2)mEv{|PzqiTT15xo#=AsP`AvsR_LE%Myb3$`Q_n8d0P4 zZo!c}kF1)e!9Vj*Cd-!Lkr*3rI~fo2X$;y&7(nr`A#{DnWvmO?$PVt3p_#ct7o{r( z�Nek>ufcGFBEsdsT#O`fv7S+By8zEy@4*EJxudv~k9ib~GQyaYw3iXu@w@fq}b} z_XsMa*~0noa#j=mp5ZQ(EQ4V{j0e0Ku0fSe_t~(VesrrH1aIvW!A;=1-}e@nd;>O$ z9D}TBsW^~sIIU#Vt1DEOPBQsXvNT4t4fnb?d{ugv6@B)j zD}VYha>WF^5Nk4I54C?Y^-N4R4rPVFD|~p%4;T zNX3%|(U*7|Shr#@sOhifHc5I>p}jTLEX*VyMd7@(@+WS{oT6>yz_=a;GPv$vT4n>HS&e zw%P!0MJmx#-*xQ1lNMzTJISgY4B(!?YDn4f6(u8AVUdvs%xz1h^8PS#v6f-S?MxtO z^Kxb>eh%oK$3D$8Y5~feJg0BxJ)3_pMZrguJ6sjRj(MR9#iX)!m%LUI- z{k1ZC9a2IazluSzvjV>h{PWMPo^X1CDJ=(6D(jsLQa)S6_kHJK#a{*^CCj-Dwxi(B zPb0R@WD;yHS|)l>pF!h3j3qzEz3lv5RXD~^;FC|9a~5G0OykNOcDngJx~8YGt*3;H znS36-d+rI(&F#3;y(wtrGn#a(CxgJQ#&_QeuyjNujQb-ZyC^lVOpu@h5yHD$UXtqO zd(jh5!POIPh!OS2*nV-d=;)Dw&_6F5ucg62P7W#wg&Hq(ZnifPpSpqAa=7|qu2lZ2M$?`(GR zHtz1UJo<3Bk(tRkL9dA!b97mXTlB_rU7GQoxWp*9)1%poM=Fm9-MGT(jj?e!A<|KLF{zB z2H#h|LaQY^`B%qVS+k-QcP3Ebl_>X@F3=Txtv)E8k!xFZ%8jgz7SSDj(F&q-0{k;5Wy?;xtbKLqw$ z#?zmaVPxgy3Hg7r=w6%+H^xYk&x_QM%MrXgj+fT*WS4Pv-*`y3);s=3@x~?QY1r@bFg=n269mR$TRUG6I)DXTBVP1g4a#o{ zy~ztfV7t!+c3c%WslC1I-t4KQQ8t3s-W6Pt=tjm$UE;BXEqtb@&_?6d4`CV4qwL7Lb9l6Tr3S% z2nL0{E>y5v@Nj(0qmqpgbi&;R&|w{0EY=lvIrmwRR6f}F~`m|#e zKW6DWtT^N&uu#A7``db0i0oQ?)gH$>>>|KXwt<^?O<J(A!hVYPJC1(|Oz=Cs!EcIFL#Yr_+QDOPKP@_W~#SJDVWzk$&Fd zSn5nMEAa2-OMOqH{<4?MRg?(t3alYt;Hz8?nF8gFf&yo59F z&?Vu*-HzA@eJ>buV+A|I-(Vk$XVBYJp_~0+H@Xy=kxYXkNxMmce4s5(c(R7SChU36 zw`PEe?Jgm&8OSsYEh)zJ9uC`70!OlUvcd_HG)KV@hMYI2Uq__iac?GjV7wQjHRq#h zaUUKwd&&+y6nucEW-!}R@lL99~G%#L%C?v@Liko3T@#Cf}{VIo`6u=XEO%Iqio! z=_@ert1CMya0(u;PC)s=ySYcnCKTvi0(nBO#AE6xQJt+dYddIhe?&C=KEy!To$IPqehVk#*U=M31_gTwqr-fBHfVo9-P;m%CKpc_atN0`@WS z{6R2erw>S-=whatBUn_LI%(vsV!mBc^wC!jLhNPm<yVG3q ztsRe=^|8E^TC^Lk%3S4hsN;xEi|zM*Hr#nS9Ix5Mc0QeqS;9;^ZreOe$&} z6P*Z}0)tdH;lCd?5GdU3mF?f-i^@dQ!W0%X{RNKOI+5pItYJ^5$S_O0=a_dmfO;hh z;HdpvSkXR+#%;u!--{HRJz`r+{StpPn= z_L1GXCgxSgoMf+=9POW~Nq@|O*@q8<1ujb*J+mD`SGH)A^n?l=`En*!E_s5A7w0qW zmS!v`e@O0n!dJ31N;_u{)2%;aPGlB2biJ$DwZk~$b%jfDsHFl$=?`T+Ya2%*w+dx39F15D_?4FWFW~|b{ z0xN!ThPNw3L;q#dkpTmRUH&BaXfJfax{qMg>QHX-7eg4@<_w`1Ex@_SnNB6k2_)G9 zOgausp(vcrx(fWbIX)D2F&6)%PNtcc)#=<$4_fWLA4Q{AbHV%)e$jRjYqX1}RVFEP z?dD5XwrCetWu4?fbqjF)e&FR}M7vMfyL-L?p`ZT7L5B9X9q4!ImaM4UkS$P`Y?tQ~z)(9?v)yr9N-egKC z2%(6nQKZ_NN+&&qMxcD})`%4Ga!Q4GO1Mv>e?;aw#10SOeC z2w#KPkC=M4rX`L#`_t*<)Hz_9Cggvd?Pz9|4us~Iv0tMNP+I6r?5&&#FT;ibw}J5C z6m5ua{KNN3%%tks2JGtOi4gs?n(22>q0Ynw+>kNKEcQz-c{t_6v;sp~aG@F#3)hL^ z!Uz1j#u8F>M{}awTGk&K26CH)na)H%cy4Efah2(0y7ej^8nB&xc_v8?MFU7R*i*=1 zZ(*|GN)%r+iKbsR#q5bDaHGVGhCPzvrgq7|lJ5X&MO7G{`i%|#YCv(mKAgvQbLK6t z%sNsg@#_oUvIpz@X<&y1l|wEa{Vqik`cq(_!WgQr)}%v5@%Zv>CM;Gj#;B_G;>3@M zq`P=82F@uEEfTN7^<5rNi9$|5cqhBge#gW!i)g~`@!;^y5WiK;pgD!Qe0_xh9ZCzM z!|B5~!^?3vzFd;3cD16u;N*??riTfQ(J=E~Hho)_1xa~8TklS0joK5yca)!x#2$RlT;f}e!_jBpI6w@h49Rf*rGkhxz50PEp+!((ZA%BHmofKI)ogS9 z1mS)%9cT1Jl9AI;^eQ`pT#y{k?{UF>O{OqPZ9H@<6+%XOI_xjrh?{T!Wqo7Q1UA}L z(elA9ysqj_k$l*2_AarFJ9oLA>)daO9$)p?UdO3mJ?lH;>Y_QX2fkptT%9&)h)^>= zm`a!V(4ViP*}bt%oK>JX1phAMxKcNGpC*kTcLc)i>{wRiX@N@Ib7ozU4tDX8Aim;A z-c#dXmrMn!9t#2YGw;~EQ7>7HeLhH7f!iQK9l;`~=aerxYEc6I4(7F?dslpI3+OImQ9+X1&5Kf}Rc=XK_uzktnXRfi8Uey~7M9jBeJq?YVq z5OcVlKc}HZBSxI%&dbf`ET74cV&p$IlDDG)ljCWD8pmoaLvgTL0PWVljO9vy_!T{t zut9z_&KaN$hYwB0w??THyZX9#eD-NJZ;2#4z0!ia_3QbH_y~41B7;TG+0L$N{9&8t z=fc;cb8y3$ot#aPA=F>BgKgWa0Cp?j1osIPZ}W^rt@fouQ^#XZ-b~(lMG3thw30cM zr@_hE5wuV{nq;5VJ=&jq-_F54t%dd2PzRHVlzec+M(O4eCe1knyUaI$0q zY%O@fFObxRxRH8jr05M}NDCdmxkA@tb38nBAg&)`MTN^wu>8Yzbb$B7wvpo@>gr;) zb8HIzUeLjEglu8S87s8V_X8Ot9hiOa6rNb1D^55d4_&{f(*85kpxL!gbjnEJjz2BJ zqQ9xE_q{B3){ddfx$?OEwUMy%3uE>wLQj0%9qgR8fo*k!zcjTha+-q$uT ztFDHB_iGlrqB0Ss20DTI@h((quz~wiId;HqFr0XsLVZ^PP{H6nR>57it<(kkM3v0q zg#{Gt?Zo6e3b=8^Ef!>%gmb>ikq=IVVb$R@_F+G>&ef)K@=i3P=LwpO7vradAkYci zh~u9O7yZz`&rjaAhdXrb1?#ukz!L6#L`lJyb?=oSZHTnv^&)@r_RA_*>y5dP?K6p% zoqdilqKoBNm{HN+3)s*U%^sfKh__yqBMKR}*3f6Dy!|cv#4X0@nS!s}FcCtN=cD3- zNGyJtPWv8)(96O?2%EcuOOhexpJWJHZ=2Z;FMEo9_W);A{>R>1K4+dWD>2gM94l** z6<87`kal$t-TyKV_goX+4O3n-@4Jcc?4%BD4RvMxchBJMuCwgvvFF%PG!Nw88gN=S zXEUj;LK=7B8k^kR&mZ*jhup8W5OethYXzN?;{+zA^#n8a(a{X}kAh!R@)+(pAqN%5 z9oY0u0r1}&D_S#pBz#$*NSlusl47k7p4r^S!sqsJ`D=t8Z*v@+FMi4v+|PyPzk@Mq z&`{jx@`YJHazL{}W2$q=fMMlH&?75!>?Q?qd!HN9Y}YE$pKo$lU!o4(V@FbMWHu@* z$g&|G)2Kbq3QtAs!NA@xxY#I=Z&c8O!g&|DTIn2|6%s{iN8X6@EQG(kJAv^14*q=e zZLDY!I_*NnD}Q4aiSAp%K!HQsH9-ZqBUW%?_iT`JHm4^WqPPtoC1BZs0&3c^hPyUm z1{}53r~HEfY=Ah3tkMgKz8Le-jv;jSq9(I^X@Pij7xVLyr2ZYd@uVAu&m(0SS=?AMMw~u$E)29$hU(j=1m@ZVxM=1;FMK6wU~~zd z5^@77v6oo$%YN~=lb-Z#{uNAK>w%uHlh_uWU&8sMjSC!^2)~ol_~zsX;#)h7==->6 z>ew#}vC1#SX66E8s3{YRQbWLei3>Pp7V%$8#z6)cgZ~w2m|8u9_I}>SMtSdMIz`T8 z-#(pIKab}R$IqZ7&0?DAC$M}9RN1p~C){O_M-f%K*qJ4|5c6yZMK@%k_fJhu=cnKb zyr4pJzl~sxn?0yRW)04Fi-NWX5p+hPfLVSY1+9;gxKWCmSYBo{xW61vE?tubU z3Nwx$E-mb4JjXAWaTd-HvhZZb{}?(Cf3Er{jw7QoLsp6?Nr;Ah&ru;W8j>OkNy93e z6i<6k?VXmEHtsnsrJ<=k%+k=Fn$PzS;PorNd++z2^ZC5r9nta~LaI+^8ayW%54$j&Bn*pOSHO@cC+@W9YsFl*9C(!WTV~G-Fm6-|C(gY{MT&}?tmVgj z4`p)dx_=aZP?uYi=fa1mA3#xF+(n!loLsI@?%}htZZ9)x-5iGs%_UMhx5rZTREg`Z zoALg&|4FOVEg)vcVlwtohb`$&_;ynx{aj?hF;yg`-Y`U!#Mc#Wz9zE&#Zi(?yJIwN z=uElf=U`ku#uAgmw?XInXdG$bEvHr^zWEo15leEhv9LGZsS0L?`D4+k<{s^-6J4*J zRc^ZD99i4Vod>r&NF(02>+*jQQ9`n23qkbxH6 zX28!fiH!C8L9(d_oy2?FbfGa`t2{m@^rF`^B?;`v-E@OKJRm@)r7~*#~14)4Yh8?;NblOXh*pjHP6>1{b%<4{oirVBZg zDYk5WG71lGsU~a;W1kT5{&=#pr24uuj9a^oygVO5O7AW>KmuMnT~Br?PY~?ndD7uE z{kbG!G0FdQuq=+aAQZ>l%s^>EW6@J;f2sh90F-)I&$w@c?36>(Y&os>2$IS z_OzT!sUd4<%G2>^f5V)TIxL{?rX%=vo*oY0nTb_`$@SvAsQQI8&;+}_*l|-b7IwP{ ztH1Zh#+)7;8+;MUe)pm`&(_0Ok@J4JU?lIf3gb%fl}|+oH2QF**8+^qsaEX|H)20M~}8ZkIsRt{z-U9 z#&+Y@W^rB?x!(`JhhkyZ>#%$6Xlja&;a~bdKV9rND%gl$^%}!}H)dhzCz+adbi$td zF2mUoSs1tSnrskdz=yX4a{o`?X!EHx5b)ZGTh1D}Z#rwv8_UMwj_46^%{P*Q?kMAq zmTS_(rt9+QE`qaX9)or{F7yPepweh5Jr0?~4$w?JSBfsYvN?5K)&NR>3gy<32c>6X zKYcH~1E+194V^alQRoXJ*8gUTKHLSBh8Kfl`g1wHqXzzYeUq*$1@rC5DR|f~gM!_s zVPIKX?oc-YXKj62F+L;!Q*MR8n$ZsU@!B(Z{%0pllxCCm%SV9!)yp@%3~2nb8tU;S z3rj6^(5bitT1+}&A5Bk;5ubU;0$0vGs|20X*297h>g1iM&Iy8Dqjda^M=zIBYW%f9 z&YQE0R_sdykEk}B7Z``xUXNi!N+fSHI3=&za#FbNCh*{Xk$8{m$-%IOe2(k!%5ZaB zT-XUaz7@UOO~>imy%Dg|CREm|Hs=@q!?1^NcE7LRD>C^@VDQPW;CkPiF3)=aCT2&Z zz^v|I{G|*IWVcXsMlODS?aRfE0tfS77_L;x?D<9P z706$lCvxp({JL83_HK@4vpV5r$`;SvVl{5KZ_Tf5N8{SHt#r-ShHC@^Dg3NDe~aD> zQPVr%oHYR$`ArE`JKORDi@p^{_BipUTW9IQhiBCLs0O4u?}T0bedxUD#){)|7R6j` z19J^_0K{J*jnm!m%i9<(4H=DB25IqN>yPv{^qrj6b}Lz7LAgeeG5+|vTAuFO0T)rSibc&m$iZe}}9UpR$(oLn#VvNl|$Y9w8^aN-w(s=zwOhOOqF0+&`#9MOA?H0b3X zcwZ9E`j5T~K|>ba4JoI4Pc`|`qW`4*`+$FZ6LX&_H5Dt$&j?0YEy*P}L_YloXbY;@4gT zy|+&0tik@gw{sLUm}cpFBBE{Cvr6Ug4O4<2YyV=LV6ai}Fjnmz=27D>KQgJRP?ccBEgfhsomV30mSf4EqFk$5A(aQICphTDf8*F3!@!I+fk@ z^-?M?THZ*r6Ou(AP7znkuN7S+clPs~2RX*ZSo0x|+vSU1s)_~&*XrV9%S%-JZZM8J zpTJ>V{Hb@Z6#nLHMh#Xz+(&nowA4hG^B?8ylLnOb-Z+H5Pz2AL1EtxvKJi%x_Clx zXl!ta0X{6`c}wF#oU@8rRpu>H;I)P_u+4SWTAa z3!c@q@UTPQhB)@5C7?l4k<=lySbY*#y$;;4F=QK zZw>VMcc^^Ww?z!_0O{V=hPWh^NVT z{DBKwy*~*zW{PLkvp^^loZoR9(D9F<;W@d1xfIP)cuwXYT_^u7pxokn44r10|W zYlg(eEFR<1P5S%dwd5P1!IRBvDBs?ieGFDhUs7%Osz($(&FoIw8o$zjBi%46Zwlr- zGUoV+cinZXU&*(MI??x@4s7A-!a8p4c(B|^?;;JcpZ7XC){+nTvGKg1JB!_t5o%d2 zg+DX)(UB@Wf-XBe4j%5yCnGJ`V2u)}{8>!9EWELBgq5V-cv8x(ee2P+c^4fXlg%aO zKZPG*7;fhPKA1R+M-)#2_w-g+zChx{ll^&7!x0EjS`F`NR+LpfK#nnW`^;GZ5wnKc_PhC7C%iGHK zJ-c(Ak{Q^PO=L&!ofNad5r3VD#rFQ98-A)k*=$1IzQ>i#Jyf8LQ34%!@)jD83O{?z z2HNBu!|Ag=kl7p~4jAZ#Gd=vs)9Q~Ll<}OVUKk7?JL=-k+Ud~jeS$hWwMFS^2ez=h zEg!qk19m)hf#HrZM7vty*N|C~;m?ocU*iY46JyZN<}6tcFh%$4A>2c7G|NPfbAjL( zzb(nZRc>QN$*fPVH4S`M!P#{;B@0K9;f`yq<#)w5TlnO2j;ZE-#$tlyrqdo zZXXVb>w4n5Lo1{!_CdVmpfzmw^TVfB3#1$kb==?`j0NK-VZxC_oZ&W|n+-c-m&<+F z>Gp3juM+PZVU;vn;U-P#vQnIrS4s<2c0q?$AL_T%m{)yP0RDYPYLnyz11HwPP-PSB z*}(;iy5-ZND_^CuYBl~bNt_YyxpTLv7Q8by1toiPjxbsYkCyv!g6M(=2H4}jW8-n% zlO!}V&c>NZdiXbI1NcOjLHx-yyzx1T{|Vnpd zE>Sdrp1FC!+XoT&*KjoUxOR{XU6kSUG8Kfyk0G#R6u;dv3KjS4Bh9vI+<8S8RE=Cs zm-o+w2S)DF#n}T8PmYFV8KU#hw=>1s>!GpOO;&dn{EW{fbSKfi%rR~^49ak#x_9F_ zHOmJFHhFV_|43F_afM#w+rgDhFXg^JhI84h?^NHYD_z`!v|afVeXN@=RU8V%!rk9! zwK(I(*YB=KX*oqp8dreRf7U>8@1f?@VwzMfy6Vo=AT4X6tUyKfcn^FxVF*^1X7ZS1 zFC4xsSF*XQj8p%M!Ty6klcAF@hz*Ohr*I6HtxLeOTkG9ahEJlZ0|itwM(~uT8Y7te zqJN)yu;PDXv8e1hm3+(P#xZ`F_SuExB`2ZJ_EhXQ>$&XLZwwbTNH}84H2(IZ4fb!B zf^}+kv}x&b@Gtb>yiMKMGoX>O{j4bB`7d~1-Uu6>Z1xx>Ixasu&IX5@(fF}$`1GajzZTa*bqO-T_<81uK6|2-4lx<)2<|mkT~i81z+}-=7su?J-{| zJH7(`w6_#9zys2`b!}j>=z0dvjzi0$TyE3kg9qM=9KhRfJhyfpsgJP1;`yFD`|Dvc zc1p*lQ$A>4Z^rHFEjTeM3f)p)3zus&yt?j=*5PJ+)W!zdj_bou8-xQ>>^>JJn88Dz zCiyJp@~F+@dC5%)JNBHx3;T4yEad?rV=v}%Exj@2rW%e7k~l>18eBfPhIYq{KzFrl zoIlMOQg)2Q;OB0bvEB~BCyR4~(_~K_k@ahvirspIa_yZ>(ktPMnA(tvx4(a> z=wa=|>lO`ytKw`IlKZ#Zz$Xp8Ri|)TOb0p}+a7JIJvnAXPxQ^42j4;TpL+hIqU1ar z(@v9i??`}Ue@}p-_f-?a;M!2Gw02#(!%Wj$D0ChdU|qujvcmU0x<{yX-`6Uj(mxW+GfoNMx8djNfk^ zj;3FfV65*ruJba%?}qts`rbggt)3;@P1VGPwtHyuB3oWMU3eh^YT;OU0G70ussFWP zuCLn!Qv-*!!2Sk3haxRU$v{C8p&E zWGS;XkeXI^M19{JtXWkp?|M0&eSWVZ^U5xi!n4nxgFZthpx)65B-hxGQ}+i_*5wfxt8@UiL{H~{_ojTe z>aU!CaXL5E=J4xV%j6dO{_y$pUKpe<)88f2k;=7EDnI7#J#hqHEA51ObM<)D>ApD4 zHH1}r7tpl4G<4VMi(Pgn;LnLQbok#?E@&{pIVt)0&{ChRzPtthg(0}>vL!b^@a2`y z)A8n%vGSdIf1cJxqO<^I$v?3e!ZS)dOs|i|;I?m}Y)3Y)e6&_-op@QEJjCM@(Wb&vHMhd0#n(f!R=Ikx;LMXmPaG2POoPo=eGfypWO^2!%^wfz#B z6>o#{1HxJAbDjP>;=^%95xi=}UC5r1hJL1c=vIB0tQ;bteTqA3pYq4$?{AWxO)(sE z>@Ckv90~QYgV3*aET)`rLCe-Nq@SJ3)4k*A&$|Mc7Wj&uUUn?+;nA6IE<8ia`sY(* z7fX>pHK7$2!Z(wf1gCVgF|yk&8hUIvsu=*QKJE?b?t;afm<;nw6J?voNFoiMF|^ESd6)S;XX<)mV^6pMU3oqDFef#QNTIN{%5UbX)y>^hst zf%mgO#M5br_aZ6z)J2$UD|X=_`n>2%3T)k`ihTo0Ji7h7MdQ96_81yPa_{z|LH_+8 z1-wF`U^C+yf6EAbr6P`Hd8~J28e%c$E&J&6zFTCcv){^Jp7%=;Iq?xE}8^HJ>hBfH{}%0sZ%JCrX? zegM7d?RdWE3*V~TNTq#*&q-Be2lp6p?v3xHp&{mFVh8=JYZxn!bi=Df)-*`Z3^$** z#wGt_`U7KZ3(%^!{y^~3qb@-Wt@eG7)mwo%Wh)8Oe5Dvi1N80NU&lVidj zk>%BGQin~oaC4SB?#Z2kNqP4uN%5eR`XQS}iq2i&1oQI1-=ApV>Vdp`pgjcqz2cEH zNsE7bJMyeB4>(o!%$FJ|z-B{op{ zHyos9TPzYRoV3hQ()#J?xcB}eI+pjDh7~oyqbd`=H(R_@M3`u_ld-~1b1r1N+*XDjO`O_nMT{r+1-aSgwn_Y2S zVGoYd^TdRUb#j#5d^q4R7;%=Gm^W>gw|#Ao57h^7&$ol{-j))0_9=;d?NtRMMi=cO zgV@nYajH?E2d*|7=AjgkJ`1Z@`u{? z*k|f0DQW#tQqhToF)hK|5HNwC_sS)o?Ll~82(sluWk`1JL%v;GVBp=)w6jeHrrl7+ zCyO^y`}_WKk@0j`R&X4iCK^L3PZqPDbFfXR1ONG#&CXBEAz@1o-Xyvh!}YzPo7i#Y z4?O~^`VxQL{td>gDy|q~9gD-0Jn(9_p{#UBIOHaT;{?rrG*0^^B_3@9pH!wnxy1p_FEWM>c{CS(ejzx*_%hQ zftwz^tL}{YIsqu1TLN#C0`YR@E4lbzF4w>C#z(K5QI6S7JJ&k!Y~Q=|Z&_c-WVsVZ zc;v&9gg%&^=EN?WmXQ0IBXni3CJwjOq)Tsgz;luTs@?5|T77)+(ZU>zoIDs6$C=_F z!!TaAFNjJO*1=wrU@SlQ5xNP_LhOaH7(P@Nr{8}s?-9=4Uy_&yn_QQiwcYXk`KXG` zlX{?vX*&CU{0Ux-nrQ#tv0}}4(G!cAL8%M1V7+AqhtEtw1zDu7i?TPSebQ!46X;n8Er zc*&@)3I5(4!Hc_h#^AjLQuaxaofF4h{#dp^|A)aXv!w_XcIyRL&Xx;;@Y&cqs%XwKC#!U{}>RqKz)2j1?5 z4T9Z}5v$C%vYh29UuM%@rwMdycn+JUC9*+$54K&dz}q%Bb7Q3komUFr2X|b!`Z>NY4BQalJ2PO^-0-t4pf)BVKjMOZ6V^I{Z`J&Cv zn9S}v?KrN7Cz{h8nA!COsV^Ero+qnFd1nC6tTAAn=16RFU5DS5%wYZCUDBHilX+j! zN2$5Qk*zC3`RM`+N)zu~)lY)4?#(bVdXx#L5>41JC5CSuo5t&Adf<`;ldI;gIylRX&s^9(Jr;L;UqiDJHc(@=DZc)@lkV8};`}{Xta2_8F6M2ZHWwP@ z*Him({1J6_`pGcsf)i<8&S3UdqHTFO=)TPdM)YaNmGez7)Z#dGywm^*cRS+8(c7iU zysqf4cZ>|r81v@WIXZSao@oJom<01;fvvJPy&w`(H$*DDsfn$0Uuj_1%?`Yr-?J%rPR4ihe!ri#IE+Fw?*S-IKfFj9U_~ z4e-XPIu@*(sn0Gpn)3ZhMSPz=fT!3y(uH^f3|-ohi(Hc`I`_}UP6o}8uU%ucf(G1G`UqnhRKI?_{m}ycDUk*n_p#c?bvy+y->{lA8B*xaa}Yk{|3QFqwt!u zCf=IRMYw_m&++7JY1`ox{4w|dMAu)1iit_odOjWW#|Reji}rZV^fxpKCt{6Y6h1op zhHe^9#Z8OyIa;p=I_}#KIV<y8r3~)GDS*QN0R0rl@|0?0eU#^DAHOGbS3ennDBv=^tJ47Vj9( z&dLj4_r-~eOC|qH|DdihhHFmDmGtgT=h~!nZarbf%MX8$mrPnuIU>hY9X^Lfp9?|# z@Wq6GH1P57F#I~|Ez}EE((qCpF2229y7-?Z?wju{SV@8<_Sjmub}rGNx{-{I?fG(T zU);Da1QlIeDk^S8u=#>CI&;JlHzeC(K~@TW3LAw6*=E?TOFRC0uM=txY!A=0^jHg4 zQpmeEa=#Z*ykqp>iuqTz(&}Z?IqlX|?qLy#TTgkiXYdX>zb%KWA6jusq=xAF>*5u` zxX5y_~f(HDhi?(cn( ztP4)TDYY)>QS%fmPUXuhm4kU~L!Oj$FN1fCU|1kr?B|2KaFpLv%N zrDuPRxsl3$lSfJeH;w}vQx!aJWz5H$cgcNbcIA!R0n4uW@gZAN_A$-p(#V0-x>`7h z3(eW{z!fR&YCb+sKSOcqp}gVyH3F=Kr6<}Veck{|6gR-zh}E#d%ZPR=kH|6plIzuCvlmRvS7HqqF;p?^j>ch`S>XQ1#%# zr}jK+&nTu1Pw3l!3R3U;2cV;T8EgzQ_(rq^mn7VU+k#7dHj2SKt~-0WgyA6VJZX-i zNBPVTn`y0s7M$By3b&tbBgJLDcs9m@qXWm{&=XpGb(0oZ3{m09UV%6`Hk*5WNu!-P z#iaVIE2tVz$I9vuNyD|6YEmb$Qn5cDDICBdQKx9Ywme?bZHv6^t^*7`=s{JzPC)&i zRkEE{DBT~Ii+RsC(}@Q=VePL%NV%!X2QSaypJU5txrsMe?Hq{_r}}!lo7zPAHXmq9 z@0(<`xLH!kor+GPM}Mo(kS|5_;O@f{Fh_kV&-rS`C&CZYkB%-FJue%B{|!XPZV~;di?vGmc zx02rWY&bclJ*%}_A>M6(&D0H1eZ3#4f6~LVq=&JSHF58mw{-UI7Mf)sdJSsrdB3MQ zJ9iOYtT$eSeVurA{dTEo+d>-k*dOf+i=fuD3urtF<9{YIVda`^oV-;DpIsPD3(F2s zzC}OCZd9X5adWBf2yxDH?#-hc+SA`a7d%)ZvI5d-DlPS)nc*g!XQ|4Wue##Ep?Bcs zy%JijFZj{T18{HLNa`&7e8pR|C|hvQepV*Z+?{JF;9)!;Yv_RcLp$KQ3yWY$(L$Pd zXgXG<2*%kWbFN(60BVnWvCkpky~1hhwqPlAb@)SZMR%M>HD_S3-f52#l|YW)V2Cve z;=S4`6N7)ZrEj8#7Fo-Zb%GLBbW~s~^}eiS9*vWZUhxQd+ZOK(8-VJLdKhW9ghs7B z4U1P8;(za4xkLKDvc=^lZ0^z-E@gcNg&z7m=j|H#!{4J|_)VX~Umt>k*kPE5%KY#_ ziM-wF4{Vvdl)Bo+V9Z(3#Vy)HyEhBw!0vMQo(pD47c_UkxWZvnQ|O767dFYh!?duQ z{WP)XR)xJdy_Sto`fvyYn0WOkCvCHptSlJv@3Z>v!8x}pzeyO zIW>#}uJy;|;iC7JIUE&B&%*Luui?mw4!m?t4*NH`V5FtsNMG&6*EMF-itRe=@wdHf zy497EcewH0w<@@}sS~&!ox+sVpC4`=O9#U2xu9|(ZGegR-;8|tYNd^1W5c+%*@geT`bTdBXL;BCwZdEPDtfGc=&-svn{WQ* zablx7s(E$5MY^*nVQr0kZ21Am%X7f54t7#$U=!q=x56VMtLfJMDsng=T#XTaJa*Da z$b6Ows^gT{b)V>?KbuKei$%U%DvDh0^%M-DDm)qT+U18m68$;X8%H)^nTUC^v!Ll=+q;N=NHY#}#LgjW=wKKe%bS$>6vtA3Sl zl&J9S*{`Ibrk)Vh-i3!my0e#sV9aAVsP?y|5?fPH5dXH5A+b;uo`g+#{iSsa1Mv49 zP26bg%I#xJuz&9o5~>C`FwX=};0Wx#YyfAcx$(t^URYv(oqUw$!aB>7WY*7sO@0i= znNtmEe3aN9^s|PXlh38sYetc7>NHGf@#N4q^CgWNcD(cTSXRjO$ECXEv|xf7ru8!A zIeUJ?%=f7{OWX_CjA6`-cYw)(per)zXT7v=Zh{&Q*}aL*th1+`!**6=e`=MxoK3`D zUWq*VT_2nl@?O5$A&XYtehuHWqIq(MLC9nO(u2{_ocU9H7RQ-TGPx7CvoNOOulMA$ ziH3COqcN&zTXDA2Q(zZm+>@fjZMQT+u)Z_hv{U5h1o8haWh>cKH^?cDu3;xYe?To9`Mixhw3w6}r&CAgW#pTlBOktZNARgSpzml) z>NY%= zvZM7^DY0)znW?jI^>$9;yRGx!ZSg(0dp?W@J={r!n{;5juQRWn{t?O-S>Qpd0eDKg zjDk$9cu0b94u0QEbAOeQNu{SW{AM!zc5vg?0jg-JQziL4mhjmy9o{UDgC9K{Vclvg z>Im=X#;s8_}{C%IGkbzBWCn{}09104;)-q|vMpO2hY{|dx z1rU0B;i0u7;l|W=Fmzg>H1g&xxK+PRQh$G(9+p0Y82`<*LgNz|4-3Ha%~v3HL|6If zXK{Y~q$>Yc*cSJzjAP%cwQ%idM^fIgngX6>VAABVvU`g=cHHt4lDDRz=J^49MmUdW zuGZ%#P8VqEe!=N%cnRw#ko4)jGJ38L!pM%Qu%j7iqrRGlZ-b|lB98vAwwltJ(U2T-{?NTWXbn*R*paTLo~v;NXq?XD4#tarkmtxwr@Ci*8poxOG>O zVmc4TMlTP{AKU})iris6B*V2~(J*N9FS3n1E4|#7hCKvl;caURbZ$F_Dnveddw~(e z{U>H`4YNo=FtMNQDXwsR9>wKbtl?6PA$JPUL}$BB(%3I{6&?G|2V2uPlz*hKZTCHN z*<)wL!KhDQH9ZR37DsdG>o#Qivj-0Q?Sc<)31<`D`Qh?iZnA*?y@s*;wlT{sO>h$9WtGr_KbpoPc_);P9D36dGg8I;@$RfE5yYoB8=?` zA1EDzHYxI<_Ag=YiT89=eBQ%%f+5+?79PAVfd{%qc%*qMcZ&`J|J?)NTA4Cd`N^`` z7ouk8h>8P$+tCGe!6aNz2KjHoI3-7!baidfD_O8kULS_}3WIshm+i3h-8VR-q{lZ- z&7!s@T`}WG0CZa9OAe#Ejoq?+vmDH}i zA$o+2!`PG7C`B1yo@*_P%L>GqUK`>55I+WyCtdCH4g&9H^WMuBVcW(6*xmUcsOvnT zWf5Wcbb;WPPaekyN|dFA)9S&XLwoA5@xDC&h6#o(eJ0ynd@T*A*5k{Kz1cit2%kE4 z8=Oa1Lq&!Ox*gg8H^oVuSH{53!qKSyBpzdI)7<-+WuW!Hw-mHlIHv?Jc|kulj82>> zsZQT0SewnTW#KFu6ybG3}|NR96++~^8?SLG&5wIaD8`~}FD60rPI6+Az73{o$fV$Zo}JQkQf2IX%7EDcHE zuJtBdSzG`o%&LY1h`{p1g11PVv$8U-Wzj?f+L4vztqblm84?*ux)Vc zY8x&KO2eet3gkcB9#USsq+7ZB>CM9Va(K@k{IpNCG{v?pyBgQaCw;n5OF$a82-d-b zO=1UvF?!YH1_mC-4-5CwJHl&HJclidzBJbROIiP}hARgIf!;h9Y(^LCxvd+nE z(6#Fq81Jr#tIUKGVvulh#)Ywp$7HT*b5cs$t_-X5v|-W+u}}He8`q07g5`#AHlIF< z8>SV*o18p8@nbS6>%X9-dTm60YFe4X)y0yMu3%f+=CNi(Br0`xl17G$esTLgd|V3V zlarzZZ&itNWB*F|3-3r<_XV+chAc1qW`ydcW>}@Yn?~)OLZz*J`NXHo^rQ))AjcH9 zYea&#(~|(!J^5gw3-3$1Ad4)^MtRB>2BmF37a07k-_;n2Ni_Q5%OHa{1vTyp`UW3Xay<zx)QR`qD_R>L#M%?16Z#&0g5r{TMy! zZ9_v=tfr@vqoG%z6}(7_=4HLjSaXvR#yeKRvZoqQT$+Jvy!wkCVmh?DvX)XbT43_; zf8a1BpC_&E1b>u<$QQnr(be^~j5E5jO?VkSsn5YT(NnnN;~s2uelOL=Me+idVR-iE zI@+qygVT~N;PJ|il>ELD9>zVZSP_2!{*_$>_Fo2%o;w z4m&ty;gtUOUPY#&2&ZrD*(ju(~v zvVaF3yXE)AxxDazdByL;y?M6l2RU?W1D#y+1T2py;edOi*=CKeYq@u*)SZ{XmvSA{ zx?)VP{kK%~znF;&Lz@Jve<#${k4MM1TghGZEOo7Ggl$Lk5r5Q3v0}D(EIa`5c05@9 zv}Nlnn;}CjoUVoHLYS)>Hg7J2&ztLH`?%|JertdJbuNS+T(igN;dywzNtKuOxDSs` z#Nsjg99}$s7gSXaz=mdXc~e~*)Sr=nCxV?IP46td2;E7vMwQx@93ZuJYPixR1*SBA zlfp0A;~;fUZnig|DBFkT@vn)auUt+6r|l0=dFF|{ApDc zn7re)g-l8e_zZf+n)Jwjr*tX(#nwanvJH{~(zDSwxawS6q=jnVJvvW)G)$ zD399#>jw_yx=&3I`RW)H+AV^T^0V~sy?6$#j;BH2A~?7p1NdV;S_JE&@--dVqVpfh z6KC*%??m1H+2W@EhH#+X2wdt{1%oDQapF+q5??#{+L0Mp<~5X(rVhh$e{W3hq()00 z*3#X*W3c1FV2sv3FLl-r6<3QaUHBM{DsPfePs5LAe6h#dD{5fB)_7dIxruDfT1&=z z7D7VEU@Vy6hZ|GN<&Ql}VETkE^1Hjexqh!T`_EM7A4#TyS-D5r=p%G+e?rmplt#tj zo-s6HpCx|1Is+G~tbp-uQC!FSJUaduNIS>AmWJ9HW4gjTd5WSlgi02e*18YIR;`j$ zUEJ`Q+Hls6jivHY5oq(xm>=C*3Uh)E!PM3c5H~fI<8u;mtim8>!_J&BF%{nq3W4;H zp}0!51DbW$zMn)ek7 zZujDIdtSrPm{IU{{~qdlY7}!&=ZX_o5mkPlflwViswv179>~u8sy>V9R1Pk1sRBF0 zdt`j>6B+Ip22;$Eu-%S}bYP)12D^8|7JHF#zu2A!b~BWII?t5ebeqJ^i?2YCK^k|t z(UWd|20pvdkEgsJ0NuA)@;)(B`l<4S9*X^O&NEFc`WnN9eru`z;v|m9n#=)<_L4#O zS@ho68uxx2fmW3gK~evq#jRpbf>| zGF%AO;dRp^uzSBY-0(>o7mplIO-}AS`&bI?IkE+GdJm_9Uk(_RQBNwZd*Oza4!8Mm zTkw;-@Zg=!Qh7ueQ>HeIGStTW``x+YQgL3q9?zY7T%?^#B+8px2^*dXuAIk2PSO4< zB|P8gq5onOx$i;L3bv*vzy6jdbUaKijT?(IZ?gBG``9m>yk2sd?nd)avBL<~9PPJZhw z`RDLwbUf(^%s1-Bx?NJ(%WDvtcz^NSF*%B9TO@*RQQRJqah+l4WWsf=QJx2UX z#UX2V$|>!`Y5jH&{;SrBWos{;|f<{pPV(kdV_E-R*D6X&R&X)c+KncRW}B7soRr$_R->B$Sd6@ww+9 zB&F`}XPCLcTK%H!wNH!b27 zP+OsalWI58``qamEU{1HZo1*WPQRhrbPUV)_24bKJ@|n1JMX_R6k``9&1~6TBpvx5_0%k`7lA8j%a0fLNL+Qrc25ZsBdMl72)lO{qgm9BSd`lWUk zu4aVs8_f_|&5$(SAk2Vc5)(Z7(SFKpAIw6hUN}N!5^vcm59`j@;2b%1*4rU5J$-(_ z+NLdy_jW15hnUSmqw^c1p-dbnFvx=4yRR89$m#{GIvJyhs}KattPRQNj@TM!XZBR7dk}`JUVyZwi`wA~gtvXS!uC*UZsztD_Q#ll z*)V+`cq^NebDb%aJ}LX)`l9_)|66InRef&7a}%K`oX&YzC*kmjsi| zdQE*#&VXLFE#$PKAD@+fL^WFn;l;IQ=zT|j_H1s3hQCTUb;x+Mlk?)9LMj(ZIfj)d zG9Oy;vVy*Er;L0PEhDv9rl=PEvUcXkrh|2Ty}RlswMdY|R~>TDZAYm#x#P z>GJv!`0`UAy6s4m-TWspa(9N{A_HS?xmg9R4$4wTMw`QaxN=hEd~#5o1ID5?uKJ4L zAJmD*y+0`Eo_k2{cY5RXT6Ngzmm$s2I`f2)CYUH?1^?@&!QEBsf%bIg!BXb*f~Az_ zS)IyD);Q8b%^Dh)I3NDRyRpBEJzLrB7JsX6pdAaPyrJo9(k{rOe}-ROf&v^kUe5`G z_IJT92D#XOM}O3**T#gq|LFeWVK^l31+06UhHncisKR?eQ|z&9T$r(j42Ej)y!RFK z_yN*M4Rd^^XH0U7C&C-2Jnnlt9nB`FQSSGb;%SS1yr`e#c%1iA>gg(A(i~ojf?e z`X$`@k%awsOBe5dDg5=!QemUoZ=r0e8Xr3N5-KP$15Cb_6i?!O4-u{Xf^dLnh% zl#VCMZAf<5n0jkji1fh-zb(^5*{Ld;@_MK?G;g^B%LY9o?^8OsujINo`iGm;H&bQzp&#MYX>CqDxCtgs4~Law#>2E4jYiEC zfoy)Oos^_qSJsI{XnprntZnFp$3Eqw-K{DL>^g#{NY0N}Eq(BKNgiH$EDv36KEtI% zV?Osr1H;>VIn@4E(@>u-sHxwJ*QpwCp-BceweF?Jx1+GzxlwGt;jK7r+bR0{!VWa{ zc_6PjK?j!36=fz3(yn77$IhAyUq58h#N8I~s_hdbCq0J36LS2a<{ylDr_a4Y1aKLp z#oxv_Q2-92QkfT_TM#Z?-34F0n+`XWRQdE0bEzZl$Z4H4*yCrRaA2SsemZ`PYzwZ_ zur*9i_dkclS0p~cnsrd}cM|7lM}xw;CsgS%7^}~j;f_h(Y?<#4MH>&ng_*thZ{Qa( zd7u+0-_GSJ5|i(KkDX#`$`lN4e$rIpAHo0fkb@Tmlhga_Fse?5_pb~WI#k3UEI?pW}u>3f@sAEfZ1*;f3!#GGF5o5^waTPXB+HVjzfL#;twq4(lq*eTtC zu1t9gpT2A1pYZ!aLeDfFv2mXGdU_YU=zWbohNgkL{C%0m@0r{X{Rt{wsPJ5$RH|5~ z%JvT(am>+fxTn;EjgM(?s(wE9HH^gx1J=_G(8eI?cYNf_atg@4N*U*?#iy$`QNsHq z={=f6Jv7zv#;#15(J1hu^b@eCsa$-RE{_|AJF#W@05m>hjf*wi`DNN*q{Z9eqLLLy zo{r%g+s5BFaJI=fUUMUnE*M#{v0fNBnww&?!B@E2eHa$J*Wn+QT0CTSCZ)XB z!lg4UaO*!`7UloIl1nx?a7(dJw6iaJ?ffTxliLC#h9raewsmx}Z3#VDcSEf9_vhiY zgP{3eBHQiGqCC>!p5OhYIeB;9cym1sp>#gBZ5eF6EI{ar-Y`F~k8u9851YcfLv}ds zkE45L@SdYyJfKqy>T2w#bCJQql(q+;z28V;EgD0|_C9#!%{TFH&<5z6mCrIIOUhgH zfn*akc}7LB7#JmSvDY`#P31`dqZd$W!vL}Nf-xumwa3m=c8GQnPJFV>o?P1olcDKy zxRvCDKQsH_?*Ml`xxPZCx;;wrIl9u*N639=`(W9(J;MCBC!(*nGYSd5yhXkSdc2YD zCs%^$OK*D~_*spkjCDCsbA{l!Cm!xQ`9jN%6mBnThJA7Byf9jch7Wh)A%jx+>0mQq z`J+T(gtr%5s@Vb4R~7JoFVfkk)EXyD8;n~Wwz;Sr?Sw{ix&lP}g%&l*-MB_y^6}{7 z@=llF^tv*zKO4^TkG`kgi8kC#$~7P9FVgc1PbHRjg}BT7Kgwx~!F#t0`0cmjL#*2Lwf0P_pF(7ii(?10g@d(b5a(K5iDFQc*QbvxupzS3bKl2-gk!CB9!fgU|HL!e-TIJS@s2-_$oKR8Zp9a$7i7 zyPkG+%EI?=BhlJoFUd)tu@qdOXUe1DMUuqNh&f0P;#!5=wd2+hVjPZp)Bqz| zpNfCJ>*Bb9qp@U&2$%bKp!dvO+1azKmQ55Jt-$VJiP^eY4pQqX^lcx z`%ZlH_jPIaRS#8fyuhn|HI=nzq7obxjTS5d)k$YaF);uJwYc-c`NyC!=B+q?kUaRj zStIKgYL50H2Pj9lD!W>>2n?gzXk4dU?w|LC0w*QmHBAu;j8{=Xs}-w zaJ8B$AGzet;|BL4Q^#D`w7Z$EF7QL86l47G*^HHcoAbq^8>wnT1C73v!{?%txnhP5 zdW+Vqu5pVdb?m2t2`_2b62KA9*VC2BTVZQ^Djzo+$tP{r!%??DUL1R#c3U2xIW41E zanT2O;{Tm;B=_G_|B1BKP?@TVc2epcf4Zt^1X=b^nu=|_Skt$Q@Hl=NEB3lTzakqb zVV^!&T$A#q-MjM8QahfwY7LZ}^@17Gvmn4~2ibqQF8&*8+%zZnC7nGrg}%Ox;ODKe z{2$iR*cV+BNUVt56nIBBAhU71)uB8-pJ#^U3#12}I$7&`CihiytKcwpdWT2Y-W zevvo|y&m_&^$^HcAKh#kztIR5#r>o)gYxm>BQ^Tr8ZN#V7ss*V`tl~5!_Z!rz?FHs z;N;K+(oV1o&$b)QZ%lpR`jI3ITM@?_w_hdK>+2~qq+K|-Tk07dX{V0E2k6zbmoQ_A zK91Vr3o1_(v7pEfU7V9SCQlx(D0$*Qma@Ao*7&!q8_%267p6o_!@*~!4^vP~(e z{Isx|%5o%Dd-_1MEANViF}1X?XaYDzeiP`mFGf=}n0Ir=A+Vn|%^y$h;wU^{c~X2= zn}BxP`ie=lZd^Q17pr$k&YBI>l=rzoe7HLrL(V(U+_v4IdMF)(ByQ5t=W9g!`#Y&$ z$@(b|{L?-s<-lIZ!NNfvjOprJnYa7VGXaH&(c(dF$ zeLU355!Es#@{^@Us64uivUb~I^(STOdpDMcobbc)hM_p{dkeKI`+~=a5!h+mHCS@~ z2OWESQdA1>5FC$e5={JU@xw`fc2wC+hI2w;?bRS&IDY~Ut@VJGKih?ckr8<1`#dPu zw!}l`A8Cz;0|g#-L}QdX8jYQ#9??lrVNQ&&X|OME?Rpg2vy`x8ls4F0`2vbdlCb$m zByM`-KwEJhY&p{a!j4qFXdc9R+5(T2TPRfMwl|I5GlX)s=aTdNzqIblFz__kPM=mh z5fs}FlHPx};ZsU1OY>?h^c}~W?;7#iN9B|-{Rhp<_yii$zl+(YZo(kRWmZ;M>6~IF zvH1Tz1Gx)PJgr1JJAXM3nJrsEZYDH2Xi$@DA8$C>f2nXdQ({#1_D8*0^-adX=ft*e zouz!%0ytHtPiAoz+-u$g@xn-X*#7{apHgSxjs8lZV1)zcA4}k(SLUR=;E?#`NG7F? z&*fXQy|{8kAs2jz;~C>6j}M*`W=Pqb*yb#>k&k69p^y(0+!sZO0XQyaILdbF;lQ2g z_;utP&>N-72XZ!1?%9vx`8&SYk(Y@BnsvEzoEt7p-2fEZkMHE3fd@lc3xh8kxX}loVfif;T zu*zstUU^6r*WFgZl6Ml*Codn@Z#h6)t{s78*H2QH0VYkmZ_f}cn=EKo-BPkKkjK>{ zd?`dXowtN3OZ^%hobcc(?0Ksr9Im$J?Oil^+^I4AC`kbW_IC%P+;PxHQ&*_(tceP~ zcR-~z7+-g(cA1heh#lrw@Zg;eFi6ng_>YNr;M{dg*GdN56Vxy%yJjtSzMy6$YQ=ug@K4bb@lIepMYe z{x?+m{nfzeneNp6q6zJZ-$V12&eF)0W^h}zf(HA!QM&7S>Q)*qXtcc%+BWKQN~#|> zCyt`W&#%h1FNtGg@A=~TE1pHL?808`o*L-7iKY8O>m89W1W_>_DRU8;=7O2u!GVE;!pNAg0>Kl58y^R^cx z?&<{PT{`hnx869nhYrfseId`42VsS!2C5uOW8G)!64S0L4yRo1}GCDVBL zAP26=4Z-G<0```xV8g@$TrFD)DEkbm!9}tKcWdD93xSSS4HkMC1oF&3(|Mj@XO6BIL!*NCfN^^g zzW21?-LKZuu&sgEr&U2bZlI5yCJ&U<4RU-ca0s71lF7?_0xA(jk;~tT<2?# zJ;v3F?_0IFb=y~R%j(3qQyw~&UZa!lsdQ^z8ZJ-k&I)B41pnEFxKgQA47`3unAfQQ zj$cpVBSSsW<9UU|U>d+%B(CZcS7q*ODm5W*NAbZRiGS)*2ix7k&{cCPCv-alSySiI zwO!%t;wOi1zN~_Y<069{k)pReaMhNhovO1-FVz;oR(c8oh9;;BNa5W=QP+N!|rm?rDNET;9^^ zH_gK7V_Qi5dxQ9*MS*|jXK>w(Zs>9O3l(gbO*J+){Nz{^K7EnrGNy7LO*FCK;1Ycv z5HXfMtllflqTA^4XjQED6>#mG?wl{(DU=<$U{OOH*N$@LAl-1(UGL9oyE<`+-`%F8 z4+1%1isVm~$>Evj_kG042;g&&oLXd@^j6CBOlzjXi!bPT zZaS|?h(Psc7ObV1IPlDRq=ScZL4XOj%=MAPbbkr+NhT-#;1`y>@g-*PV>16EgYc+%I7B zYdVnDqLtO_4RaykWOtf-?X?*f|TPs{UZWbt@w!50o#nyM>tIrT2Gs74!SIeVYmN)O0c+cunUA-3MY-uLwUoiL456TAzs_@66Op4nglC(Ui{A-ry59ngF{2P z?A-)${KhV*B;7;D1?l22-DJ|)Zozft5_7H0KuBL|N6J;cTs=1qF1FsG7_}VQIKhV7 zGVhbltX4W8y8+p?9z5aUC0Knd51M7u>3ECO;dgjVo5f^qygm{C#A(3So^foyQU%w@ zjp02T`>}9hHOx~~<&W$2Fl<-_9CaxY*W7WJx+i0?)GCk_gI3a(x^ygURl?c5n6AgV#l!hSa|OWWpAv6h8TC#v*j)D#+O6M# z<0Kb&+jAnXH7KwvUY9-p?oVs!|I+wjvSBN}toJ&!hN3&qTbI)J7j4iL`x}l%J5ZW!HQ?_QTV3 z!F3=sXd2<+s7CPVm=8A=Ipf1aUudvn6gR&di=RWs@{(3dZh25e)-}fbWTFk+_!+|2 z@^oPM>>QjDz7)oclk(wqrfj)3jvqXnE^%jqIpp7eu>Q|EaQQl%UDrxoVsb!>+7y_V zZ-ny)x4`fRF8IW40`5Pu3l2!!SAAPI9$9~sVh-P-0gGm!skJ#~Cf*h7u7=_0!_j;t zY&ei(4~bWG3*0L2!p&tBve`X9Q-rZAIbXBjd2dYku1^pfN9+-+beX{2&^|iu;Ujf|O;l^nieeumdRZw`YB~BXplGgc67J83*1|d-e zSfI9o>;VihI7!FT9;&z+|os{{Kl{rKYYUvSgd9sb)P`D2t#xnr9z zuFnp}sPB6~FS(gailyvA+dbj$s$e?#^9DU?wBytueOz=XjSqIOpgZ5?v1PV0w5~3d zJ)RScr#6nE-VRH|MIpm@O?WQ6(7#0=oc7U)R70%ot`AB{V^}l5guQ!yhda6h(BtDY z)StWsw!b}1Z!2}A|8FGrFOtWK*>SR8R8Sve* znY{YxK3dy(JRV;97M$Frd0gsb+G;UPnj4>oH38Z9ac3TSTuh|06H(kaX{{Jm&?>Iz z>x?(&np3F6efjj`B56tQ|0w-|d^Dgt_n4c94c=+^b4EFtcW)B&%=~bKTq@GFo>=6b z%Mr7$h}o@$II(#gslWeBy7gB4;XoXH+U&#Q299CZNyC_jn1Du_BYJ<=#)fq|sJm<& zt&cJ0o=MSseUlB>41F(%E(v5d%@$W!q~N>)i7g$Ew0V*eM3$)I`0XxmwfwK3aNUc2 z)1+OrNhjFy!vlXU$;BdZA=TDS#4R6>K$wXNH+26k`!}!;pY@yzLmxh+%2^H=rx#2g zy+=vx!e|WGv<9x+lN>IACOD^VCR&+mz;I=W8k^#ZH!6F9Vu+h;)2{K@p5=?1rYCMDN@Nu$-yH=5X@D-u5L>Z>ov!&Di|2}B@MW>iP-4wE>hY4-bKp+CA6jHQrReHWymFT`U$=L{eY<_}#ir5tG~hNo zSL?xta?QmTN-F%aw(sHl4V4h0eiTkm@6I(+ALpl|CR@$W|NS7PPcyy#Yx@Ra_}tiY43T0*-0k^ z#R2@$?-{5j4dsm2`c3b%0%<|abeX6BHd&KNxzx8#z}5?o;6>jMG-&qcQ;mi^vwaXg z-=>8w|yFmT>DlSXCB!;e8;JZz<>=iBC?T%Hap+cXLf7KdVHTpRQ`Q$`wn9btPv zdDtv5&F);cM9cgEnA2q;DdcK#p1&|=dkiiP z=!a`}s9?_L`_!(VN{v5fLut90#D2OBCM#UTvpP<=uzjTQhr5W#NeUayWcf(Un2EFrQJ+FrBrrSv16mI*C6bz4YvmkM=QU{ILUkh z?g16<5!HtRRE|^c{FzwdJ_HLln(?72Bgpx>#Dx}rLQ@!W&c}YDeV!+pH(rD*=ZXZo zC8?M=yq+Q}3u%VhVAP+`1kIzRjGUblT<(-Y-YOQj?9euNB`?p~qjyWsQvt6w&BU=- z2@YpHV5+=~4V;}h{ev6oC%WRx&6DZ6>QF9U(VbzL#Gu#~ftM~TaA|@Op7OTh+IeN7 zMnO31+6%bYMe-Gy4x_v+;k>8n8|lRF7YYxLLd{u=!9^EYHB6N+I6a1s8$yIFQvUIf zYA#(HqK=VOsnTa#O3G`@>CcvAF3}FcqTmHk7Tgc-JWa-9vc5d9MXwi^j3i&>hev_Y$=|9E)SOrt>?qINs+m zPk5Xc{da(%{yW2on_YB^W{2u(32lLTybLis}e|9g5 zV!yY`n&hga?1f?^y*r?R#bfH|SmIRN`B0zBw*`=c`ea_QK$B0hA7LR@EN0Zi0;XhBOv%dc?$#)rmeg{9u-X&~-?B#9L`=JHdDo3+}7>?n& zS~#XchOkZ_f6tHOGu@U8fm*xZ3T`L(;dgCdSfZ|Qd@9hquX@NOj-tinpt%@c4sHfy(;>Mr*J91^~8JIeDT(Xifk zh20WYpj2Y`J$dCq4FhNLk#qg9Y@q?BIU3WALN&f)TMf<{PigO?LQL}BPO19_;)l8z z8e5=&d2$Qk>zfSB-!cb0cEn?*qs? zX7*6|auM|L%^=t4zu@Bg8J6OHsv8r6BUZ@MkTh!unwyTDhIzsR={|1|tt0kKGslsa zMzf}x2bxIP&hoNscKZAc{3d4ce_cY**}NG##!bim&u5dYUuT~EFO+?68}j1`Q@DPQ z3n})C=AzDkXWy7o?MeqeCY^_RNxSeFKLW8!egix_H-Lo=|7Rh{!_u+!qTD)Xx^A$A z#(m1cN4xBC{@w^4fB_CkOg(7jb;v!m@{{^C-1ww@P7@j*@VjZ=* z@wNLIq`pJig_sV3z1KAH;<9M0ZEF)%)~4`}@I|yB;|G0ka7E$SGO9Yhj`WI#;LNLg z#H@-bT--SpWA~UtrSUG<`zVo4sH&sIYvk>l2JuG0gfHtU;GJ28JXc&PWX}QK+;N=F zDR;%?($02i{WvMBrph-1#)0CaZul=#3nMC(n>F6V$Nd|H=*TgA*`Y7K zTx^A*TW*o%$|sPh{g%F`4ad@<6L7$@&b-{nmY)phiIr0}(F?a+w0@VtouAyLtc5+` zjn+*3oN|v&#R@$0Y9*~0n~FUomq57g71&X7hT2o7a`lgFIyzMkzv|m#{p@rY(b0!J zrA(%mydmb@%o2^ZmxImHwQwcokWgW&$_rh(p;}>)IHvdl{qfhuJsao2)Uu&$wv)iM zx`mzu#Pg$B+IalK7uj_cc}%d8oL?KHn2WV^R)1GZOui(O<>DwsUnz5Z8~h^T#7>7D z;YM{XRos0*)$biRT{{h%k_T|W$YD5q=_9e%uHi6v`A^clIguMz4io={2Z|$$R+2)B zHN7ve=i>becvv-@jZLj-`#C-EE}Y7ndpYs705yDV*H7jde+Tf+K%9U7Ikg;%#V_|# z`0S~T(D#Qt2A{O2uYWg+&l01tKtY$6ySx-LYja@r1OxWluZdN8qv^+XL;k2)4*^OO zc(407Ui}SuK<#?*cxea3czB_qO%U2-6rh#GBlt7FlvJ&QdDrPEUMaCIY(M@KCqLK$ zr&bK&hZje%nd?7FT$PMB-p>>9L$bu)y$(B%ijeItf@i!LtTi!fI_sLw|D-PF*YORM z#wys~s{!u$*=u{jrLyJGT?1j6Jv{gfIslAKfbThEG6i|M8rn zIEhVfouRFDO1yugH~o~J#p1Fm@n@P3muE>c|0^xfCb0qAE8--yv`tb%{1aTRCg&r#uc~epxCZ?rh zMUp!&lbZ@PO@6dzNR`$uIECvDs(PIZihYt9#&qNI1p#I5S6WfF={xC?tM)n?!I%jsUT8csAa z;i~cOEU41}q3Oc-^TfErs zK!2#3bAmdW-oW!38=CjdkR=fo9&8dRijap4Y;;OKiE>-ksm|awXqok7#$M19vQRfru-$ zpnrD~Zr#>I!md7aWvqg*R(~Y=pXiPS_lux>vm(#0OX92-K0G2Tnw4%w;II8k7^C|V zC~^pOTcf~5`+KqLxN1;(-v&vZ+Ptk+0XxA%kdAktG5al@nSX&A7Fc2ByCrm~t}o`i z_2H1?`$e1e{qUGRRvQXV2`(@_ygx46IhG$= z#_>YwuG#(}KseQ_j1)Qt@P_&6_VPQ+lWu{C+l$b>Ii|B=z7ozxgQ5nFE?qo&F}Fx4uD9S3s2e@#5Elf|IT z)F4zmR|g7vPB)GC^_ZF}$MLmY=GaMJAho1me0zEViZ27Kpv8g?(Rlc}c9hG~ZRv`wuAPE5QEMy+Gvz~NAww`LXP|M~*CA%eCXod`< zk0bl390mQ2K|H0G#7W-U0oA`eh#f~Wba4^&TS9r>sti^OlpL)o^`xL>k2;S-dD0_I zwzpqNbFNBW_wiv|5#oymwZ@{WY6#w{ki6yBNAcwq^89L;T#PT+Y>MPOv3oG9$=7}#d$LiLdkp$%#5m`CqHZ(XJ7TA6j6>JF3iWTiN)ZW zGK5F<@#k#)9PH=u9X4%w2!9;n_;goIxc_|yKl7I1C%-`u6(>jWB`svL1tI0xGugyi@!N?L>E1tynnoKcV-Oyr!xlpVoh=8iP8KsY7YFk z;SJy1T_rzgG_Sh)fd4=7RM@`y0U2&Cc3HDo9uH&;!E>k8*fdF5?EEU7hpP>s6c@?G6PJLo)hXf-yX`cn z)C%Tl|7)tt(&lvmy}5F!#MKe{@#L#dVCAI;uu|a%%pAIh0@e4rs9yHQ^%328NA-7l zFjXJrQpfZ6J?*5P`Iofnk}y+cx46FRDYz@sfclZ4=wG=80$=;^t=cvUlKArRy>(c} zW*<%Qwxq!U{(OAMwZ(o-Kzl>nzcAt3Q8~ z&@P89PQ&y!TA2MSi`^bikeCcINR#Hz{S!yviGEJ_bc-pbofwHbL>JP$r^=I|7lQV6 zxRxKxkL&-zt&It|IesQzx#hv~4i#YX+Kl^mmG*X>w6ObGYjj!a&M)NbsK7Fre@`ff zwG*6i?Uv4%epE&?&8Cr+)_k(o_2b3Hy(Q19GoRT$kZLouc-Qw#Y;f-;u9&ZaYj2ET ztMpp>TO+v(?{0z(=RC2{$_#rJ>?fQR%cc`0u7RQwzh8HdW+!F}BSKr6V#+_#J(U{B z(e>iDe?E!zH$&K4(+6{(2H;-%)y{J@%(+jz4j$@JLw`GYap^%R)5~5Qu)`CLtBpB- z?FYD(@5wKnJmK2icnS{fj!x%Na8LCthzxdThkc7-petfODdT)EM*%%1WuTIEH$FIc zFh-X5MZ;Ul+SVn@%vLbt7> zaJ{}NzJ45sMreQwdZj?w-4GaOs*M=qD{*ySQ&{zhreIe~yxM0p!emD%aF;_TVL#N2 zZ=sO?eu8bg30I8Nq82c~#L^33f8CjNj=U4z$Lt^#|DHU=b2;drw!&-Y2XWW=2k7dY zeKhK24{l2+qW*u!A>5wK{(~0K#rea~)VvgKq;=*k&x^#7pE9ZAP&lq0+7G)K?W6oR zs-n4YNjPjB4XZ2@Aa6z+>3AH5Gu__OAwy;O*CP?V0)x2kCKYKen8C;58mU>mJFfri zO=*`WqW2HUV>!j0O-~M|)pIPd_xsmy^PV;-M`h!qToTTodO>?8XiDs;w5E^!jQRYN zOBA4ENE4SB(v{0@oiEt&kBvRJeS01+=Tt28vF3)z4uIqLKwCi{)0!qreU}}e z<+58;{qUKJSJw*H66eqnw^#Je`yzbOJ}7K>>dpEaoN?)04d{F!jJB+haj|rk@8jyk z$GFZq6pHkD}#wo6>!hC5ul?y2Og~c20czMrorL+VD8RdTyp9In8_vafpIqY z!p=7?1SXcU;p6eLlUqdar_aK}n2fYMK7e(A;?1%jieuA#+ zBGJ+Qk!O6QtH#~6OFyPO1Yt~(0(DC)fIZ-Wr>kFBj_)^z&1H0g{XJ{{4Q=3KBw$FhF-O5zgqb~{B^V)nw; z%o4cybtu!JKpL|075zLC15Uk{Q}`e0?zCVA$G?rGbyM@`c%da|J<8#u>)j}{uM@7@ zYRV;t_L5e`6G-j$M_e^tg)=TUz~7y7g!aYPXjG>(zPQ1I2gq8Wpk4us3s(S#2jl1| z!+F`ZFl@Uw1&${Mpw))g!i)hDQ$hU|)tyj*%`-alw8ndo)IAP2_}9TG`zRjVn1a3k zDj)_*T&Ge4+`n--b&Va%%OVT8G|nCuPw^KYzuHU9ZfW8~*y9qqC{HNtW)B~TXyp+D z@VYNAWq1#fs%<)cc`9JUjv_IB)@hnnHleA`{Xf{T%LGF;4v~BCWYjj*fQYLJY*afM zBad03{oq#cNZBCRICwRs8<}%U;#;bnRV@xbB4u~$BnIqkEztBnLvPA7S)>_}Z3ITJbg< z0uC3`ql7Tt6P$xPx{ZOQ@qXOzgeLb{?LiOs^}}&tec{HaP<(%*1eE){CJz(I<6$%% zAGG!2E(Jc=J3N_tUVB68bJmKdq#l!ekDj!}FBsQ0ED-ON<+4xiXuKaX9s?e=hz(gm z?7QlSaQb~LIs1)4r$-V$Yf&1UJR1$)4S=<8-;kJ0yUFp)P`Hw$-gw+Ghkpdlg<EI)DPET zI~FSVxQ~>%TvCWDyPlo*!SG;QJK=2(7pAsUD2O~R(4Z0g!_nRa;( zXA5bE=;!`$17*zzS# zDA7qoj~_qh%d)&+N!xCYAw+Z|&$Y^s1MDziX6ID%9Dsld{{YLFtGPcC(e<26$EcF#yZ z?3Z^3QeGy*&{YAb_0}EcJgKEc^Q11xWMh72;>B^Lzu@ObQ`|boR7`m8j&D*2^TZRa zR0%$~u(v#Wgsi38MhWaA^^A9=OMa;SfC^eFl)HrCm!l&r6_*J&gGTVlrv`l7R$_eQ zWb@Ni7ohotE#E!e0QbT|@tdpE#az3Ns>jRWY}sCTqU_G)J2ncAZY{E6iSuyBF@w~q z4msPhBK0tl=gp67C8q5vdKvh)(esQR{;qWvrz9og@6%&&$mn#|ob{MIWs*?5#H-0l zqXtx$y@k7j8-$+5miVcAC_4YRM}D_rc$bwTp7{F!)>Pbwu#skv)UywtFi!---MVb0 z?uy-et`%9sgNJ-Q1=$bW(fOG`w@OyQtuD1B4%rJEvm9B^`JC8#L=D#kD$4G=jo}y% zC#qkRkK)^jg5fMhykk0pzO647cI0+;?lPesCJmBnzzNS(V_~WuM+*|RrkauvGFj-;{Y|KsLl%OnLb36g7J=6Kfvm10l(B+L+ zw8ashda&JWLv~pshieB`kspGNjvkPh0!>?mTW>V@mD41+&?gE%t#zTtJ=V*P%hEY$wFmaH z$iQEhWHOkkeUfq#tH5#g41zveX|~iE^)c*$ z?Pr#YYB73TY%mS)jFhq8tuR#FZb6r%y~@;_nRqo82|G2LB>$ywlw3)ved zCp?4vfAL@$GaMbq#)u{;66!#*BOjXN`d) z)%7LUf+A1w{nM0QEqPA#E(o0!JC39OJ}ZO?@0N+9=k+zfb2s!?y}d06(i2Wk&3SDve^JV2Y1EjQAv1L zJ`HcqSC$-c4(M`Uz{*#zXj!Q_7k`wP^!d|xPG3d7`l?FyP$2^^w`y|5=0W(Sax}(Y zT|gVg1+h)XRKAlhow56Wh2PF1Bzr1L`wT03QKccYNq&npCf@j`=_)j~KBUfRow0So z6H*=@$o1O&uylWIHZt!&lSo zaDVCm&Yc-b{~f!>(wai4Eya)pS@2ouL3teT#|*E@P9FR;A%G-bsEcyOAke;u*#f)qsaQ*K; z@MEaR+U4BQa?lvO*m~^zj|VciBYHUb%xn~1ZytohrA)AR>U6Brj6;*NLxdW|M2fqd zM2<0;XuBYZ>_sc8diEWJ&Gl?i{XohHtp`p1uARU4KelqHcJ0Ql4PcViGS); z5V=g8%-`3l6}IAvyJ^(L^W|5$k2c8G4m;AX2w8UqL%g{a=`K#kx0{~x8F~TkQai}* z7#UJgmJhYBoJvm5c-HAbUwj^=NpCc`(`J$v2K7szR;|9=c~;DnEElpnmP2S`v=SH% z{l_j^d(>XrtB2=*+-IZHZ?o>gXp(NQ#`WwXDEwyuD+-pfn;*HSqF+AFu2>+dD;BcU zZ|ab8dNXYA_D9!;b83GLZ)eI9OJT`w?xXymPmhmt&sz`gOpKHe5~t_TD8ps7T79E% zsNpEG+P;t7>M_GV(?7GWC2dnDB*Ovh@uB`mmmyD(G1lopyQiepEl3Wwfk z;i90gwM`S=LGkz|w!5l7z18)g1uu-q^MVc5WyI5Stu3H`uNjiJSWwULxs)LsXR8;h zFt_`>Q}S&K$OUL2+*&IRu{^`H-Ht+JcpLj&*&x>O-_7ffgK=GSdyOaO{VE6U0B0{- z{1eSNa<(d1`of&b%f?~3mm!@x?FjAv+TiA=HNwH_{ZN15rf5Fu4U-8E0GXD2Jlv~A zBkVr1yvm;}JGPf?uq{BX7&B5^Y)9;`nwV`GMAwH|Qbcz??OH5Fy0I!W^;-uke!80} zJ$=lU9MvTc%QSenc>zn9*(q3M&S5iJ)Nt#kL2Oqf&u|PpD|!~GLv?OEb1YrOq^#!9 zaz1~ZG13ZEo(`ePa9h#kwF*9(UJAO~`-r@A1C7%ZU`J{t^G=S$?C@Xgn#XXA{SgJT zw5HOn_*{H2CIr9t};?q>4HX;cT@B`=V}wCnhVgTPjNMSVxX3vwRvp7e!1>TzugD&Y9z@faiv1lp= zW|B~b_a{bE^SQNfbnjHG(buO# z8L7B%?+>^-I}LPre|62F3ApD#f*9GEioG5>pq!k7=RW~?afbB2=C>@?EsD(^xLWX= zrOWg8U66QJ8(UQJD0o({u>M^r4IXSu6TSwJBr`$3-W_b_H#w$rCXmW>`jKkY7dCB; zF7w~i!wOP93d6e-@z<*+wj(8imh#?V#%whTAJZxL@_pI}l@2hr=MJo5A1FE-Oh+s~ z2-ckC(r%H>rW#1Fmh}qgUM)@UmIhJe9urj7PNa-)mSUu1HWmE03ASw?O~<6IY1*zK zm^yQyApa|r3i*4oYiE~udZapiajIsA#;8H|c_aFE$(myB+yQODo0RD%bK;!UWBPgQ zVwXC}Mdab`!YL4%n?>nw`_~-EAAxblklxv=g4yx&%yRA=_z~<%J07(O;TQVjhZ!@_ z%W@_T^;fRpvRa&>5=_t2BH0uvfegH5XiHU^kkt8wt=gZ2XLlw-artE28`}r}7-!-9 z+kDpdAf0YH55P0%3A-2Wgm0HdaYunbIiX{5*Aa8k#LyR4M*5+JL0??*=o>sZVGqGU zcDU9qjI8W`LG1Zy;Q!Vc7e|aE$+n@?yxJdf^BdUTn|&}SCsZRRNYQ!-I@&VD&y-dG2*y( z5$+mfP)YY~(Wr0|4ua{drj?`qCSQM%RrQKkf5inN5$>6-LTQa zkD4wG#UCk>Xd9p_z!e`>d1W&F$GypAc>~#7w_@?tke!hI#u=7`o@RBA)ab_2be4ZR z96oJ|K=lYqJj8okQyctfb<1Jaupxl_9{Mx!lHTfq9qm=P7bA&bPzgy?y~c~vJ{wVM7=A=QfZD2{@rnyZFwUD z!@3eE9y3_j*A6GEpahhf(4QW)XOqeh&c^oRGl|?!V!c)g#7nIa2i`Hmv*%o}Zq;b~ zK5zl#rv}5RLSri3>;*TUTA)>v3|=Kfz_Sj>vxD{&o05Ole?WtqAs$% z`zt`*AQB~mN^1(O>xsI#mMT3Qmp9sARK3?w3R zSx<2+mP$n76%QNQxGsc#zT40K>gVCkgM6lNcQyFGQ=)GdbzsnjpI|Y*FQss5k6%#+ z9`Q^g-Qm6BB*h2Jg(Z>snN47%=*8~U>*E*i7>bk6rop!BSlFO7tVsTP&4e*3D8Mwl z9%F@K(_WUoB|`M(=lAeL1t_ciES&H?!PMoYP%`hmShr)1u;51|O7I={tNjykvq}M7 zE8ND40=+TTekf)t#nWf=>u|+%4ti-Ljr_}<@#^c?_p70#JVyfm+v6ne`;!m%@*aZl zqMUi|G?&LB*8eKLa8M_Z4SsQ;^WGIJEPriUv=PwFfwYoT~YXjRd z-;>J3F4nzzJm+Plfn8R!F#S~$4w@fD2W^xpqxzxnihWKHdcaaF9( zArihWXoGpmhFCYi0-wh96;pU$|JS=|u%so&@lAR$vl#f71-w22snc&TC4RnSV;sPM zKl{r*xsvhYa#-xu4qC>1zV_9HX?U2?PkYA3#HxtHo()5}!Jecx&>p8vR2BLSP{Qq# z6@>%R_l5tC2Y`H!Du!3);%n+*jkhvUYrF~R4IN2qYV1M5;XM3#((kEva))tK1L&Sx2rV|pT9YZ^;aE={76m{It5R|I=JGatuG zDbwA@``EjG1ypw}pSC4tVCT(P@w<|NI4&_6?x?E4z_4BH@|aj!GDjONM~s2b>!x94 zcOJpvE+J=7oZzASL9FX~EBZP;WAUw7%>P^;QYBNTtH(TW)^l%q=wem#e(yWB)--?& z?3XfcnJaK4I+h*3mxA$El4xXh7EIhx4SoE0hj-clrsOgW@2ebTI}(X)sBob%aVOY` z-r4x|)qgNOgR|FmWunI|8!=F6IQ)A(hj}T=V@OLHeVa0ZjWaPp^H2OeGb5k8P0iu) z#HQ$*}+HFb`HZ+TNmEpJm=i}0a0z0oU z4+kWeGX;%&c+)^|^n9DJBgKJER!zmKAZ2D6=StOf>R9j}pRs5a!kp#3?C9K6tmm;L z)sHuzBbTP&vA9x4L(3;lWgb!FIl_x{+$Dr{Yde_lZCN}r{T_^LDRWqVCW*iM6WI2) z1FW?xl8%k+foD9s8}j27JbiFWP&XV%R}#A6ee)y=59Dr*on^3Mz(g94sZ@Tnl^yeW z&z`wW#z14|+IPn8ut9b%y!{u-?9#WfmS?Jzr4va@gVwY9QytKkKhJ+(9!mS(MdJ}e zX?C(hlA>>|5-cwJ;A6cScxt-bY06FkYgKK8m0SLb-=5bn*ME~Ki$Cj?qu+3jRzLCf z#0b0^dw_*R8-j6NBkTCuAKjmRXNh$q$>Nb2R(wyP*n#^-xbMOsJZ{!d<9SlmDc*iI ztk1Hck13m-l1mjxV!JPhG=)01sgWILgUyNdW)}G~$@9xLINSA9sLojlL2>EWlf9N{ z`u4E->mo2!eh3BhHK5p}LUt^F37oNgEzZm+pd$yCu|I{`c=g$1;rFk4=CSg+z5vX;#0DtTpf~C{EDAXd1=DjHsBbFPo-FpX; zfpv$-VQ{d;R+IU>DH2nCGfA;{lXxMhSG2$R9a58Xm_%eI1z0FySMw2}Dswtkcr6iT z%htf$1}$(IbVBssUqFGM8lkK41B)w2pzi+Gv|@xmDfk9p3h!!EUf2O!J&IteT{kmz ziKXp-97*kJ0R0$jLz8J9cZa9YcjtI)&UgR{DaLfYLWw*rRB?7hG^P9ZvcgGeboS9M zamn>xEO5zx;)l>*OxA7=dIpcDn!o3TrZF?H=~@%04(r3FuB(Q1FAZVhKofdn`W9B7 zKPiN=(U@X?ge6zU(v?YV(DNAZM0p%`Y&Z((o8R%>nHvi!H^OvZCkkxHq1AlZB>zhg>Lc*qK78eGNsqr<4Z^A7V|qYeiLWmDu`cNWXo)eH3*inBm08A=9aHF<_!6W)ZxjuTIcHqO zj7D`1!2?P|XwjV|Y~K4jtoddry-W_K%=-nTuhNH2SZjj@5k~N2?MQm5l!v{#0raG} znYF#%$Bk2!l#aF-oD3v`7X>zmB!pgx^gqeyq9YoXJ$6n_7k z$5KiT)W!^Z$bIC9zVqf#)afqK@90@@ujz%iCxg+ODqx~ci{PUuhjy+9z~szo@SPOp zU^i_VIuBCDUDx~31J3V$rc=aTXeiLYRfFmKLC!tD7*Z45+@Jj90OQ4#FwiUkb(RDQ z8A}aWgL6KmHCoa985yi`*n2@@_Z&Kq{*BdtokS~)CkWelW}x8JJ1D=u1$HlZ2H6#t zgbQQpS&Y{&Cl||XR(C7`3zlYJy~4xV;l9@F>!RVcBdX@(+BdPxX#YxKle;wb4UZs~ zsevelgs_^t7fyrQGjMsd4bIynMTfr?vA#x@_|B)0J-B(71qADp)X*pzx?&&PK0lLk zcB#^fpYOQe<2h7x>C%{wVZzxaTRI^Q<2(O2!C;IzdAx{ZJ}*bpKD|lo-rX2zQy2li zzYHT+wV7z`ppDl&Z$j0JgYcnS0t1|;vgv%z(Kh{w&~M*iSYPu9&VNYe9mq(KR!ycD z>y@JVSbYrWDCJDik$C+|I?YRe2oE)fQHA1rrn$C1hG{$kL%w6*lpIHGYvhE;o=I5m zw+136rQ=`;UtAYDR8$|ahQ+1d0=Wm{FvR6F$ZnIu$dC{m^`U@v%9pde5xnF4OW zi)9AoQsifURM_iJEfW=}v}h*f@08gM$uyC9AH8B(39&GRgjHw zVhBnEWT^DHl5Q>tMW=_UW#i{ngQO$1#NpFpzspo5JFMjj{wY&pR zbgM6(`0dE{J8%Zd)lxQq&mb~l53)BQ;~`^Vf3jKn8lDY)#=;V%XxE9dTD=wfnVM1{ zTlH!KE6hlx+mC&5zw!mxy0J|7{Dbf3?>kV*IB9Avy2?BofZja_M2lHd@uEgL1?HI3 zv$O8C!HZ*RXKPGn$tnQF*8PPCaaHVXPa;WqKY+Hiv#EOgI`MaQ3Nvk70%=p6ac$lc z=KE8CR=s}^Hgh_LRz!iqFYf+zA3?fbmIysfbEx`T6e%VrkoE1W?CQp9(QIrc7OM)# z7S6@b(GzgN^DE*uSqo~~6u}*)QvT$<9~<>C+BH*DDb6sP+#OIhC0`A|}w zOLuhhV8H#6ob!~6x6eAzQN2KHJhMRjucMANONHZD{aH}umPEm&N+i>@ALK{NV*F4Y z{L8z`(zD9Bd-~J)mAiI{Kb-fo65qFQ_|08zqI9+YOCah4Ii&QfR>$-YDMzI!ln8#s5Zz?Xe_MwoZXPkS~41t@*G^-(mR1b{Z^9ngr=XHC9(SDB0ihqVn0d(#X3(LBBd)#_Ci%>Urm9T*ExlOy-DXSyN~7uh z=2xs_R3>))OrqQ)(adZ_9^2ULMUl-5h2C3cv~E;1{7UCJu=cI z)+sn}Cud3zO-44eS_n+N$4$6yf~OP>PRyp$TNJVXpj!5(ax__% z1wzDHMQlDkjn>?-q;;JeS*Df`T}soX>YOrI$a_F*T~-K7TC^xhbvPSha6??vnv9RG zD^vd#W1PkJSq1LT;Orka?Efhon%fxMu|_&%$NQ#cW}J`r9tv0fg9*Qim{)2o7%$=6 z_VgAZOH3E8F1p5A^5fX$=K(mt^f+h?nB|mXvXs@K35Kr>gMK=5D68x(wC5$Gv-qCX zTb+ixQ_`R)z7m3M)miSLdF*eVJ`IZ+i-x&KNrNWR`jtuSx!q`Tr)2T9+Gvzp3$->n zH{q*CFeMsHKzm!xx1Od;M=#~l?CT>*Zk`K1Ucs@a_f|ptjCrhot2C-s-eNI_cf-vo zljyQc2>shMK)AkY0jQeJrP0A-sd!)^?DMH%6J1jTXD7}`HJ^pytB_>%4xPDlJ?r!gZZT+ON;>ab43f2Oj#qVpAP+B-Dr-kib zcCsxsk74=GugqrWdUoPfG=1UR;#sRW-!5c6#g>I*bFD7v1%GEI-u}$%OBfh`ai;YJ z8{u7EFB{}T;=ybCMb&G2S@Z`6+d5@XMb=fgIPN9eca~=Y{SD~D#c*Ng^?A79!eGjb z9!m$UI%`K}`JwW7V;t-?l436H0NvtPs+gqA?9+_kXHzZ6kDNp5&LJqy{Rr-^v&dtQ z6NwG6Sh&!gd|G|**Mz&UXcIrPjc_OZI#)K(lb<*J%h;jPbS9qjp|aHbwa$goXx}YM zL)PTtmaeVK8oPT=g67;f0*9Xl zgS3B@Kn3~uXJj*Y4A@!IE)K>uJAIjJtUGN;+3#qxQ6EQ|tmJIY7i_(c15Fw>n9a@z zMwO~!;JObudP{*^u?!~t3}$WJ?sU`e5Bs{KfgOA%fy1?fNbYnvY9y+X%7F;{RC^7c zU?{DrUkye(_SK$UhWNWv4>Z*|M|yf8`!Qz}Ue|mnm|sa0_8qZ;5N$zxYO;j=ShR=xK99ry?p)*X1Tu)7={%_q$Si*NePh5qO>i|`z8en~8 zI;@`MPQhj&ptUpw4W@btX$NfJZHF{{8#fk%C+y-5A?^?^f636u1R*~Jf6tmFHoNI! z-h&6i{1?|)+4M?Sda_M0)IZE_@^h`}htYKNkPcqzwWmprn%W@F)#`D;l17s|WUDioa_LPj4y z!cnO)G&+O3R@xM~kGc-_pU|W|LoC@B8$-0Qu43oD9%0@sci7EVcSoK3LtqZ)OwE>@ z#*X{cirF>h)&o{v%tvE^??1CGVilOrUKh8UK z!6%D9!(SFoj~?D+?>g>6W9%96LYf@s8%@BALkgjS_s}Falrz_b&)K!sUBaW39#|gv zQpgUBLeDI17VB_~9c)a8?6nz~xj+l=Z7XI|GH207lP$Gn-THVkY#@yc$;BG&AXu9^ z10RkKppZrRI6P}E=JnIWIlso!Mb2qrDf#%iX$GF!Wk~ufILD^!ida{9qGsZ>k+^?+ zE-WZ^q0tr#!Qk6#q3lC98(`3nrfR41d#W+{IS-->eH<`5IT#)b5QvZcLo3#Rh>SoX~e+}vro<+Y4g6Zi;O~{FIrT1Qs#NU_iLQ}3jIs3mB zvU(r0jYA%Y_quB#sOKbXyuq{P{XB4+%S%?+6NZU?2icvb zM?xNH75TvLHzx!ild(8{$wPK8NeY`M>fqpY+^bbp27`BFCJ1r=&BKhB!*SDlUshu}i?Y1?;=~^w55g84mk72j%v-p$F$(6>Dl?)1Dt7kt~6tUq7_0^ujfb zgD6o|3zZFKLc)5^M$G>Qr}t};YPJTReP)e*mxE9Mctb{-C$?T2%24HZ5QFJ?EsCefH*@u*{@hb*Qa1r*G7?AP}xlY=Tq`+| zdRN$x#q}BVGJH7OJ1>hG?o6X9?E;pW8IE!} zWOw8?do{_Nz8si9YD*rDrwCOg?MT|?m|DCPKx$e;T?xvzyC0a;7IzrbS)fK3B#NEqigMUgyTK)`@phpK`wu;{NLYx z>Z>PhZ^?zxX)l?3(-Ef;yN&Qf0H19g>k!?Zo8urQJF+YI2JYsz^r0~n?f1VD52}Ze z>BX6~@!v{>SuYatp3xI_X6iNAd_R`TTovf{9)DyWGvU;x*HHf2na(5(6dX6Fqxy)m z5I@3GxV5^K`Bg4rb9}SV_n|RVjd%rLBj;ihXV8`J*5mJ28;bZhf@E(;V`u>9r(zn- zH!#C#i5{r3C7&8i{=)blk#ymV@MNexdh`E_oYno%Yr#^% zE^HP|T|AZg$JDb2>%1xdT0Zmj8BPzD`O|Z&q+boIWZ6cZOI%5{5-X-WKHbCU~e7qHH z$La&~aHBf0%?7%-+EayI1R1dDB{C#ACI-LMs?)fOiIi<0MF$#7*^+gZf_>E}y1r!? z9q+vk&GqV-p0QX6Z;b$#TXyW|(b?=<(M0AyLlbozF2U7hjx?dhnso-A6Ll}~oa+Ne zT6S(G?Q3|?)D{lKS$F)%WnUInFFRK=wvQot?CpZ`<^1n>xtgtd_JdurHX`p9J!(9l zg}voUP`_N0yA?w*C`*~;4pgBq$x3!i$*8uzf_s+U8;colSBs&W0%+1wo0?9^0j#N= zbFyZ-&^6>j{7V<^<5_(PL=scM*(d%@C|Mp97OgIk@=l4z{dk zJ*({JO(oM3&~fvAX1Y}k+W0wl{yHUE{8$Mm7+TRbElKkE6^rfcv&oVONfO-wqqB%-jB6k%WSNqU7o;}o9HJ9ycyv{=ZOd^%% zyI|`1eeA+bFWfgSmoBLzSuF_VooET%c>6f)2sXv5CD9nQCK)|<@tx-82eA7jvHk0h zF=HzyEZde2{~gJ}XBq#AYhQ-(oXk7Oey<5(YeML5kvqn7N4AsgHdq^{PRCbH!OZHT zHKW|}@cVs7lGv?Fhg|N83o?cYr!(i%+K)UVo&AwDo^BLgxt(Rlj5ol?b#7=A5J8uf z^w6ey54%%hg1rwm!x}aQ&10#wc;Bj;O`jCV>(CLfPw~NoZhe{|oM-!f z`=a;n96E7#8ZC{Kpyhv)z{*n<8?NjH!*LRrb_02i0C028AUx)fgRbWj*(taA^jXiE zTKcO|)Q2RTuU81$Gt$8FTPduL$iVSmwCH`7HYV`CRT6g%Om0!awksmUo;b|@of$w; znXhXDyRNcMJ2xz;dkL0D8RRJ$QM}JU3cIS#U7J&>ao6|S?y?*<&n&aH>8?KJ@*SV? z@O+vf(aoX^4uF{T8QL_WDDtH@74?syr6UuuqOgO7TX(|xdz`QJCY-cJkHSMs%SD~^ z{k8Jr^k`j>7v~85Vy*6LoSHjF;EmaXXs~P~e&f%82-i5gZo%il4@Q&5+TW1(=)d!m zH4ZxckeZ5)kCgD)GQNjPjiS{DV(Fba=lYzFp>;OHXzyBO+JB%(*feqsHvTv!>NQ+| z;ulpAJ$@y0cn4D4&hbzuV~QJNZU`xBC&T+DZ{gXEM=au23O3x2r&5PUjQM#}&aEM$ zuU`fG?(9N2EQSj9=Cd!$VsN8M2*sp2!}-FOd|r743f+GR?^ea)MB8i%IMkobIH`b@ zUrOP+fg6n5t%YimW9Xr&8(qJ`eGI9$nU23Dsl?5qXvY|M_h>L)I@IF$nR{t8mE#F- zFBe=!PoNs_^X$nrAFK;grRJ^PY}K$D@!U-vGJnlkG~R^}nWP2#pE_af(J!zz=NJq$ z*T=V&GwA-E2pqlrGFzju3Q7#KXx}H|`9i*zaTvu6DTM|7%*IG%SKvEWxPCzz_w3m& z_IB4gUD8dVxm$v$WS|B)#+Jj5tK(~JGs4-M8<$zh98X%wv&0I+yrK1`26xCB(lxzs zaz5gY*HyQO_jL!IXZIJgQ*o==>#j4kMvG;^GPMBOdoQv}D?_l}qXqUyd((%~jcm19 z2zKQrk@~GZ7#BGMVtAfY;Y>d|aoSAWOdnb6<_dO7HwRU}@_ESZ>-x(~nL$((>z%*UEY1HYI>< zb1f80!b9-W@(=Lih&paP?2l5%d0)Hhn;2^4Mpv#sV;c33*`Ir3u}JqN#9Nm^Dxdr*PhxQHUfS%Cru86_$wt~P;)q?e+q%KV}Ds$n=nR?=B=`%WPeNMKJmHuT05H7{hEw_(`2Z?V-jaJ zjAKQ5vM9A8iN0&32rB0caQ%r;wA13VkxMqPkh6pb%?rVg@tpOnrH=P@DPYFa_t2&5 zh32g!Zt9Vv?fMBAe!YNgESANlHal^ZP9gi^!!y&)Zd9onL|^B91&4VD*dRx9+H^Aw ztNqNGs*4Oh)V#_}vc@p&w+XZ*T!HQljHS`P9B6Zh4c=YuO(C1|Fn(JuR$Y;#e~MYO zQ^gAMm)1e4(;Vmtc;MJpu~jH5orsUrU1`?pNP2IRhDZ0Mu*KHHu+b-*H0$5Mo|dt+ z`qU{Ib&uUrV`5Pl}w$=vdY3NI~8BTcNaVpFR+6`sn%;>Yw$~Fc9DLLMNIOiwA^3%F# zs=9^k=6rjFn5#^V_mzv!erEm5otgGQ30C~Mm+j27r`J1_gqJF-A#_0udDZ0!E8o6^ z#_QSms9GJf%O#+xA%Sdu4=w$OR*M_D7qK$Va`k>bj}~0vT+|6sn165=IIN#Q6U6V}n=}dn zw;0prgr(5m*B7tTbQ-#JGkde?6%2ZQ6l#*M!0RVEIBJ0mz1*-JYzO~@rYCav>URkF zhc6d8{bDi3>b02u@PT+nd$rK(cikzaUz4c6wVbm$*0GP#E7;}!n)tI|1_sL}VAW$s zG>p-~0e|P>?;S71+1DZ|mpcJ`9CyOOa(OaKImB`tB=GNBd5Gzoj5fvV*_LuoI=5*% zjJVk(n1$uhwvJp1mtN2Q>>EvoB4*I6?ek!#+)%oDsXt0asNH`@*XA9-ea~Doizg^RnLLP=#^~auFp(?h0)(TyTuzT zl2Cq}5z^m~*GHaiB9?S4zTkbzK@q>SUoB+(*%PG2y8 zKe(yNiN-ggaQ}sVaMXGzJsaXgt_lgLH1is0?8wD~-}+H^i8?vlR3)R~cC?O1I2XJf z=Xj#wAuMg3j3pZ|ZQ^sh_komuFA=`|j>N8|9;h>WBb#cY1_v%jlhI!8RL;$1 z3HB3c!11AEqG5(Bc1WOUlLelxl%(n#n(#RHInePKoLoE_H!SaE$A$)w!tXF#tjVA6 z*R$wOET6G@c;S?Rek2_7b^7DER{^m3@J;qJ&XO+e4&c_ zNi36~*45Kd+VYUFW?3yWty91$Z+Umvo#$I~whF!~_SE@jIzFiyh{oa%a9%Ws*2Qt2 z{Fi7rH|idHn%kGo%*cS|)D9*)DwfhW|6mtlwNS5mGA6}L!4VPZcWP*(A4h@bPFk`x>yBg-wyiO|gn-tAAJgVR(*Z zP3I($wUzAik5F>U$^?h29V{SB8C|$D#49(NC=k#gOo!&S_OO_lji4UJA!7+2ySBp$!{lv6j8yj$${MTyliTQ_OK|fFJMb%%dQU=`jCe7~Q?n7Xog%)1Ffe>`<2%nLM;1xo^uv zXERHD9?=C)7l&efy@2amWy#%j9Fy(+40HJ0Y1pqkdSw z_4qExPxZjy{BZgdfixj35)(Hhp!AUCa3!psVVEV&U%Z|z8L^J7(I3vN7xaV5VpW15Bi!7XN0PtZGSv;2g%1*(k^g54Ait(aKCeFCYnkTtv##`U_fw_gTAp;Rjy zcIh;{dLvB|`aZP#%p=%zg}X~vrosp34&mhJHs7DE~O zBcG4&#Z{15X^CGYIRi#_G%kAT358FFL%(yypc0t`#rRKL@LQ8whdHCf)dg@MmvcAv zjU%%Y@$@HWI&F4IUqJX#-O}=2(pH`>_+ElQEFxue!o@)k*B}F`_YjocPfA$ zo|*=}mvqVEtQ);DHK(~noe<&SN%{J-(5}%4mrhg1xTAAuZIUE?zO)VO8vWP?M`Kct znuueym9XgIE+_4{7&>k-nN{dTkntFAS`?&3<6M*B6rVr5A2ypRm+od2$KJt%7muK9 z+Y=_YpqG{AK7rlkM)Z1K090Mp$F2rr5_ndyUa}k7x}SsZq95$-ut<7$C=6E~4`)uX z^WeZ-dDym?cfR9CQ_=EPIK9aMep~o3;hqnxk+ounuFfIju}N4}I-g9R$y2z*GzxJq z1lE$lb5y4U#b04i=2!rkf(dE97)c9x-eI->JN9|OCs#nY!UBbujZZYl)!K-ik(IGjD0Cj zQHz~?Z$abQuRBT390Qvtjl$8cygNDRig@PoFxWJ9p76$Mu3}?`zJxAc^bQgxjds$vpIyKmtiBeuJLh+hmynDEo*)uf<;W2G_8CWx}`S+U&-u1vGfSD4>j0n;|Oz(d0Tv`IC=r+1RE|2|V{-|a&KmrLW- z|K15_IB%yo^f@!M;2HTI{v6Xw75Y~7vcz{0G=1G-2w3x1_;x!DH4a_moy!N%8|#au zAqQFgr+z3=`H;P~Z-s&F^Jq$4D!J~PFJ!AZQ%6ELd7lcvcWD~*b;SslSvQ4Bo&9Tj z@{Fi1@d#V}LJ}qhkE5A;{K#ED1*?0W!`7N(Fd)@}6j!EGe#C7tI-nE6k@K*Zgi^(j zi8y@6b}_PMCi$M`+%C0f2>S1X(5F?K`3$uuvw4@;?rj#>^?Vz1w2Gko^hr!+Mj0Eg zFqaHAxbp1!V>o1P%3kQ0QFXXC$$B)xq4UaUeDNIf8lf%d)R(}~nUgX1tu8k49#_AQ zj%3p?hI=#g@p2uIo0BvIzh25zAVfHRYAQA!UIUwW*6odI5R?=z1&FvP{@bsO5#KI> z&+1Ju>qS2_xYNxHecr*EzN>|Wy=L4&a*Zh^O%lEt`qS#=Ls_KaLNLEIlKiSX*{ppH z(5gI=R>~;jy0b?GBl#Q}JMSNO@&3{dd4D*#M1$QZD8TZfXAa-%97xo8FKmxAp+0eX zWXtzpYsVPE(%0kYWPuL8{vv}wS8lvpSGwiF$0#H_{64&!w zL77%GKH?pn53h5u+By|$&*oBgqXM0OJ%!+|1DZ?sz~`y*Xc~&t_F@%FeiBEjrQDsg z&IFCKbMQ6iRYWN45@IhbhSMIUkYIsy>rVuF<#w`)nnW~z3A=Y7D>2$YKo&BnKz~-!!rBTOJsN=jXYu_w~+szc%l6l2!u6(+9sK*k2GMPsWngU^4^OvMz$eFb;>rJ-1jV<~Wb;N7_nk6gjza_-XQm-; z;GBZ|eIG>V6(BSU8w2 z9(W5q)jZoXN0JQ=QpQ&v9;nOre5EV9U~tr4wsCkbn1>$$1zk-lwZ0Bruf|Y+Ll3OH z&hLwfF-$(@8vDX$F71(OXu7n39&PG_#eMkRy>$+JSVD!XCrHfWphq5Fv91R!bd*+m94* z#@`}v)qV-OXWNA>Eryi1cqC}vnuu?d4zln5wc>}bqw&43Cu)BQAp4o-IKtN*|2waM zqZg{ve}37tDnyS2i6IzMt}d(!`Ntw_46#kA9^M_vpcQROWLz~Gr>)JU_*KDZ6swJn zttDWn{S$O<%!0nN`=gtL4?AZs0kaRyB!fsFP*+OE(y>M;QzcLLcAOKZt-1#fj+8^! z`f2FEcTvV~o(aEHr{JlgDQJAog2GQakz=9?l%CMTxO342+#cl@Xh!Y5lfmo6dZv=v zBYsTFWoP2cU?ul*%n6al$d-d}YQb9;7nVHX=isZQ;S^k6zQaU4D|OvQsze%Q16 z6dP%wB$%D)f{yLlFkC?n)y`UjqMa+{Z(ah=>Xw3MpRsJ)`-P&PhdRuAp-bNi24GxJ zHh9?Ig{UK?!k*lhP-8a<zmGR%#Y4(-yLHd=17Dj;LheL*8G;!s!*>c=D+} zf{G)2M>~@4X=hb$qv-Tm4;r{D^M4eb1zS~J6NUu|X`}?{QYk4xfwR{DiiAjtG=dmN zw<2YW_=?zFpx9zzarTVZU>9}=wqjt*x4u6B*Tv!Nz1Gb0+&A^#*2M1+;gHz9fn6?o zkLODsiqB^z(7etB$kQ{Xs5Mu_cVi}C#qkGBE0W)7FRf(Jm0vJzo+H*poff~k%#&)r zE(WV{e&UOPPnj~mud>c~Iv7+&fwx{T`#x9j>Ci0dvAGo5dXIr)@4m28eU&kVcU>;( z6w&PU-gHrQER5|okTl=Tf&1K{zI@nO=}|{x%I_B{C~COTzCQ`nRW^{FR8ORW?R22l#cIKFqL_gS9HQ&?{b%IkgXi7mMsj>0K*J(>}qLDD{N-vs*ADI2PoS0mo5u*VKPZ?4v>!c z?=suk?F&PgK8@vEhi*ZioIJlWzc-%r$=#Q=a!>iC#rB}oav7I*1c1_yFL*iFhjLa= z0e$X0sF2=aiXr`k5?N8ISm6a8^Z7f_Fav)Ksi2tmHtbxB3sWxY&3q+Ig2m$j)c5FM zwomgEyH;0BojiMN^tBPc4DVvh4(t*62aUq53SDSQ~cd3q44<*95vz> zt`GQ6JYgS1U0e3Ej@)seV%3VjH}w=YDJWpo>TcnCKO>qjp+6HxJQSw=ECa_eE7)0{ zE%;CD3n}~=d}`xou~;FQtnMg~zhS2kH7|wzH_Mnxz9ouvGcGds{<6$Z;jHlY_+Igl zVl*{;4q_djeED2$AiO%gjMdbI!j{te>~VxT%+uq&xc!xMaj^qUOfZv#H}nL%MTL;L z#+*B~yV;docTiF3#)qdb;iu?vLjB}OCh&8utg<#u^_nhTS$`B2yd20w{U-YJ{9yUu zCphb{0C(Q{P`haqWEmb5m9G{s@y=ih_HkoU;VC;_GyyIZw6Kwbr=y=~v7^$%ZAZ`_NnnB=27%=yA|k8f@mwuJG)wI%gMOTanDBROA3& zSdCwtw=hA&n;Ls1u%At@(S~ND?PVFJ?^eK?WJlBL!Diq&*OFeH3BjAS-7GEt4cqsA z8tS~4m+m=Y#?R+k5VE92jH|f94(lEldii@v-i?)m;LJ++clbHZ<}=v*k+ayeCv%xL zJ`s(kIzjil2h4d(i!?lW8?#n!W`C{H(9!m)@WW~{2_6+KDn0g+5i1A^!J$vneu9&J?V~A&Z0Nds*s_TbF|r#@BkVS?nSvK z7nyGSZu}G+!Fp5$LsRJz);zX@4OACkQDP6K#^+F}YjkLJLL!rGGk~Yj7Ia^)7X;Xu zl3I&1wT$O18Y4cB{aZ+H)m>=e<$26ISc*^d3{XA)758T^5mrzXy|&|?dG0>2sO~`z zD#kQe&|aN8u@qEVV~rjwlY*7xXbP? zn+j6VP%>k@F3zk9qw=yjC=+yF{M;A_w`UE5@o&xumfhdQSGN|kR$Do`Bl8~%-pgP) zzxzbwng~*RPgq=T1@al0Vt&wC*1zky_+pO}JRFk(n|P;kN&v%6Mi!JcESK^vxhGUn zgY3`z!}!7h!rmS|Aljn`d2ZzIx#~ig*Wp0h+zL?USsXlkctmKHtHb7A))X5S58YN4 zq$d|o+t8dmv}2iSfd#dD4Is~;3rt3OLOjdjBh&)NR-n#}!}9_@-Mgo#ITKscH<9-kM~zl$kK;s z-{3=cOf_ijy4AR6`!%$^SjJ>!r@}-h8Bl7?0k_o468UvDG*vty8pP>WyK{28k- zTZ2;LGfAS9NRx`<;r?=enx>@>g$5p?Yo<+*CV+)|Wk@hx}{dXxm%%I`^~GBgYg*?HL4{wx{FM&!^bI z1Kvz6(}l`jpTbIe&Z-uBLv7S0oLM&>U*7J(pm9%yg6ZKj5+{SrNL@*wvpnsiJUo(n z7^ghTrtF4X+S*ZzLrz5E%*#_quGyNRn+0kxQ$>Y_Z1P)Tj3KX;!6xe;GciwR?E&6& z%7phfUma$>IQMzSL3@~Mp-y|9CPPa}Fq)t(W=~avxPHb^w9FIomvG;*Z7Rvw&ti}4 zgPGQCU0x_kgpxZtAoq?t%GCWKd-rdtYP2o5bjsqJmr5w3-UsCFoe-1mnqs|85M%!N zP(C>vk4zYijoD?8IxbHf7zMny+3Z5a1Ypy-^X1S2e2`GZQcsw|Ku0AuN@+O# zlu@P2T{DF>s>Y}qHkC#==t0iJk@W3sG(GrS$cAM^()aL4`aNMLUgCL*sLR9Q?Rjk+ zoBM}3p7?={e`P3d%Tht%k{dYnl8VihBWT)-V6kvZADZ|v6)sfgQuLuD?xdB2g=YtX z)j>m8Fz_p8nixZ@W*&Sxro@g#7Q&owTgAqw63n+-!Qwm;pmx_`Y)owv&%5`=f6s03 zTuT`kPP@*My{F^N!ZIv8EM>E**0UiIoL|y%L=05-gsL;z5bko8E&WgeqkayePc845 zg{g?~QqZ6P18cj9c&QMXG27VQU`j2gCz&%EuZJzXGYXA(*97d13 zzO$*XUkSr@y=5uKdyt7!KUy3A4i{et0#gj4N25kV$8J9gd$b0Z-2Tm$&W?jhwuI$5 zbm9CbGBh~{Xx?yn_E{`M`L2_qxJwR-k9FV%%aiO{Ndqcmy3^7n+|xJn4Kx4sg`GLr z8}nUni5qVuQp9_8Xl_~~dh&eEt82S)Ot>sgT&G0_pFXm6OImTo@tzcMbe~vqY8mTs zOCTdvGs?g5Qd-xk1(}D9_`83Qq}j@tc}z>Dyg&yi$&UbVQzKpPEg1W{SQ3|b3U{7* zB9<=NBMtj!DKtj>66`MK!_49o*ypnpyX_V5)~$ikL8n@A^wm&U9(aHauDi<4@n;D9 z8U_uuS#am3Dd&juZvEr2CoeG^FI1U5N_MwAkE)AEQ zWmmh+;kdMbBnPCzfE8+N%R?nPx7ZN!ANOR9_V(<+enZ&&+!T^a595K-t*qqPDm<+E zQZVEE57o0#loZe4pw?dY#_=I*?Uw@G|0OXuD<(OcIR`VJ@b2V>boj%&Tn%e4V1>0Y z6<+7_-d?KIM}IJEjlaoOtSEv+2X{L7uZvB8`BVIM+yGj}JFvAyqo})9mwr!P!mh8A zCG~K=1MuNziQ@gJpdCjGLZ4vH*CFgrn*#M5dkk$QlWFdY%R=q+2#M4z2;hfN%O z4Eiqkr1(*s#$xR5)}=l^Lus*Qe|DrXgWRf<;P;q?xK*`8sONwGdq@9Cox6G0d2}G` zTA&WWxdrqhu90nBZ;3kcPW1hTI(HAtQv8t}LPJ(H8|+{TW8S=H$)6TthP4Z|S%-69 zp&VScvw)|~T6AXJ8kWF)Sm&;+$CXdA*`th;(%pj%DfwDontyXReBdme*}MPZtLI^Y zNlg@)eR#sA910`nRq|li=mdp|(=plnEZSM_;|!*Dc4loJ*w!i24UagQ*y;ti zXacKFJAqe*o8j>{dr;@!1so9?Ktrw8un+Gp;!D92ZRUA`cRxFD&)mR%$*PlMc_<#~ zw8kK>$+Y{aKOOUlq4qWMICqUFOSg1Doz@uGu-=?@ALx(WuJ-i%i2>M{X0Xc!cTh9Z zh6;4vHH$ZcL2jZE^)*qD+&c>RBhi}%$QnZCpfWhi&v1W|{b{aye>jk5ARa%G!EVh@ zfL+BJ_-oope0m_A?Em${!mkZjcexxhe06bi&v;52Gm6y=ccYa0)ogv`T4_nec+xM` zrJxXhbbFkEhWm3-u_K;!kMM#w?YZ>p$0VvR*v$^eEx??D2r#<29h)rfu$u4fLiDn! z_(1<3jxsHTqFN0|S)_;4Q=8btnbFklmIA+wGpNqSn^`QB6K%4-Ngt$G)2XGag*go~ zgvu!a^k2CGMeLnSDHdPw>O5_5e!w}1%jz)cZGVFykMYRyb*zQYnr~PyWUnV2 z#Nti{&-(8|`onW#MvhEp*(exR=`Pe9P@^qBhrnUiQLw_(mP`_FF`p&X;*|rFA-ZXc z@a&TpEG^T4)D^}!)gptgdFhDn1`njLdwl=2*dw%z$qDMbU`!e=zuTk@)mMKKyKI#e!j+;Z$*6Y^@jxG1XfnTf8R0S^kVpi^_x_ zGTQLzzmYI7;i7br%WwAjlMh|gSEuoQd_M3!3B1<0k<+B{wCzqOnxsEu!B>rFr_MQv z(Q`|bF|!AA$v7A~Gl%_Nb(I}`Yeqk%DpY^VAI@@b`=ap!*;2ma(9dwC*D5CPsne5t zG4!d3yE)FQyvA9DlOc3h9qx!2NOB40R6i{rbnoAgRL#qv^?df*XZ%;pu*+u6A&qQC zW)4-eyp!%wSRpncRzP(ddJYqy)&8qpeS7MDpdUBI=mp!Ea#FkWLzO<5_d3VEdw1dg{jTZ!3_F^?GiUA$CWW@^s3^^Z za|!pc*F$@lTsfJ1cqU=eJnpL-HL3Nc}(cSG^Bza09H)1jU}PvG9+Dqy9)lf4>mE!Y;-3F)85 z!Pol57_dMMw}VRBr0hP&qVqK2)rEK}$hs=+x|2i?K?Wx$ zOak?uMwGG8oz(J*aMkG1c;S))ghY1Z=_&>IF|H@v(cCX6yhm^mT5v2uNHgHB&4eN3^)H)eo1JiVy8ur*Vp+vn7xLOw#RfR6V>Z3A@zVWo z?BXfL+Kxt&jYcpOe|W*%rg?yiY!UcPb|tnsl)ldkrNbo|bVVEt8M1 z(O~&RAceX(&egRCjh_7ZxVr$1c<(Rfmpz=TP6KJje<<^LWms z=}#a`ed&k8<-FMNU`0B7A{72vJJN|m*%ETh*HL?*;#~Ra%OiNmQ zI)u%$97B(1FJ#SDAK0_(n@ok@i!u^4?^kq3IRjxs}s11IBgm1NCX_a3Y z=QGYiU&S$Caw`>NGjfG7`<>vh%_#V#m;ndIxx&c9y71^1cWK@K$ZVI-WpiaLSha5* z+UeXB4@Bw1Lf1&#Z5@dr%0=K7W{CNBW@6RWzM%Tkg0$ky*n__97%n@4j(&-xYfeTq z@7ZPM8Dat_`%i+d@A4$AcBM6bJ*iKZF_}G;iq>nx=%%d`M6cDPq^B`tn`27SVVi{K zpYkB#R1nUa#(l<4p5SY>09&4qhLxjUO5VKFB&VI-cy^~2%{ipOGF;aQq1h+VtgeJT z7$Tux#X-3H%}-|Y)|y$wC!%yCcYa!1Q2$pG=|n{(t!*6y`7azux=)@AFZxjb{u^-2 zbv;<*8!W7)jkx@iiC{e1i8{kV$gq|{Wn3JE^p2qA-#zh})hCR2>dm%yUBl#^Sx|K6 zH@3SbVECs<_GRcqxcwyn7ITK;#-F<4nLfAi(<HpH@s>4^Oh}Jw|YC=UA*=lf#r} z&JhbG`y}scJ)pYBK!|NoAhUkESyPr1bo8m?9in%FpN=0HWd?!aZg1Gxk`K827cOX& z(D7RCK&soo##{?$f1=*7fXYFjJj5J=bbq0#QE!TFs}sV`>w@9J{?x;971K(qguOmf zDDLqVR#N$dopTceC&O^&zV#;VO9_Xq6aI*8yrW>fwVjQa6^o6tzYFcV*GUHDJjX@j zWZAy{z0i4IDEZh*Ah>rg+Vr>=>DS1!@@vjwdz=GpT^EhYdYri|@ukE!Z^V1OW#PGA zI@DyD(GKN6&{(ESNwX7};=*}sZ?_RUcC?Y*`Tl}sPr4|ol}5qY)|D)9^#RQNF$kkm zWGHs7l%=+$)4l8hC@TDmt74+z->n0vJ2acVyTrhceXYXGhfS<-lnGRJzF|-I4x+se zE9j6Wm<8^<&2A)bWNABGD7-e7KKC0B?rU>k*;F&hVCMibylsoI&&JbD86^-J zlS!?AH+~+W4bQiJ!tEEd@#F7itl(!d>7)x%zDkD6O}%MpX)3F;v&Y|Emqd#T2C$Ux zp+jc|P~)s4n6Y*Z8#2g=j(E86Ny7zJggir?KSf-#)1JClPJ|=!E|}=52T9fEq`^hE zS%1zT8yM>da4a3PKJCDiiY$2JR><$ATg1jib6D;Vd-047&(B@w^S7xfaPLoVcx&(s z13E{rOBrd@^U@aKWbc0PbC5a>=<-He*5xHhl<- zhEdk0lHD$bv}Aoci+#TjyH>~1fSIWjmo|{L)=vTRhC%$!vIBiK@EOsQAi?OHD>TeD zf;e_l7&d61@bk+WvDo!9+r9Z8j_^CnMhQ-6zh@#^clzdTYaz*|m6V>_+x`W+thgxs5{)#Y2m< zKQrTOriqoO@$R-tJaA)$1m1kaF3l-a`Ta5*Tjc_g8{FvHND0v)TJM zH~W84gICi%`5j;wy%{DWPTD(;^ecLUQ{;c*Km$w2)Tn?KcNexVDH8rVTtsPbA{2$3 zU}MK9(%?4kPnhsnTsy`SQ~r~{;j+~%`L_c^hcnVqQ>FNgqu~~3YBg<(hYcT&vzqC( zY~V&leo5}a&);SA@y~D}*I+j5+HXL|CjAgXT+6B4XGilgcQ+b8+X_b3cB96X4rU$O zDU3JkOZD9Iz4o3p>`IWbQ#?QUaPM%zZEdL3eUB99dQ}PfLHC5?AJ&MwyOe~gCcsbT z)l5UXf_c1i7Avc&@bFANKbkvB=vE1%9pk2wwWl1kKO4kUT_j}Iq(XaiQkimzKQ(yd z(JEI5c&K5H?}xfEO)#Lkp1bj|6X&6?PleU3e^}>uKai1DO1^K{hrYI~I}UZYHd?9-^I z)D0`nOC(!fs?*`O-`LwDB{b+%5mp^)XIB{_ctCM)|Ba$e7)WJi!I>P3-mMRf8}8MI9p4fD(N;hIYo>$r0o zC)InBob?#$by$>698M_@#kj z|8X>^u@>XHK6o|u+sVY82iS^7tI?C+A09U3z;V+Zm>E(5TmGw{xgRayCf{S5_Wr>7 zDyT~HwLg0_`5HpEPXZZuIm3EEgLIp#gr1ceY|nG<8=W&wxaw&R#y`}^e6a@D?&%97 zyVqjZo}YNjB#1&X62QCb9qU;-1y=A`V9{4c>T%bKHi&VsytJIgc9ejRwlV$dD4>sH z_u<|#b0p7dcCtRb{b-O<3|QsW3fw;hziRps?g)pYwqbC7t|bJmqA@o2_- z#nyvtNdKu3lx)eTstgyp5vC)~8C1hMRISLy`#kHFlSg_QE0|@56*TPpi_CejMALR1 zL(U_&N>*g+Z%kq-o1TJSiF_bPQg zsQhp_sxOIw=1_Mw(#8yW`X6LZ;yJ_Yx&y4+KM9&nN8^~OCFDPN0IYZ^Bc%VzfrBpN z_$+Y;dtsmtk;~oTiMj?Hv`nVmGiLJKZ9jNi_5w|!&0%%Re*EbNRCgwYY&zn^7d5+Z zx&&EwOD|a1(uhuDdT=-O8XSB(8&(&{z?{{d^m$}5WaT-LYGRXgE@XiniLhqr z9_fMakzjwsfw?8j#1BW;vrO+y+`CzmW-XC{ZSy|iwvprEYjFWwv5bXnopG4@aR8J= zX~Mgam3TSio9C!2q2#-`0-8es zEdzu49b}%0J+a?}5un#{i47>*#O&gButRG)P(Q?v((d09bC+t;&W*mT3pMei|8~~kQbG#9p5T+6TQT0;k$%lv$BOv*Z;sD&u_I>~_*9Ld!2|D$5r>Yl zJ!Ruy*1AL*eSJLU*yWSkEFCPJnMFy}qXAeJOlqG5pI2rxxm9&c>;4El7RWHYmk|U^ z=T4P%S`fT09PYoY!ibe$B#m>1fac)`?CalA;MTa9br+g|H)rU4l(U6PBMlgJ4}+KI z5~!N~M3$?hESrHLs_zh8qr z=I@lYs6P|geF+6saUT5=GfG@sgMTk7QjdIR7IM_fv)s1^?}kOt;;;T>XVf5G(^aR- zd-Un{!F_C}(qUY_-VXvRT%d-(SNhJ?!?NmZ_GOKyWaLvn7~cHVtIur`jw*~G)g=a~ zd?HRvn8QdlZ5sQz>IGgL77w>x&%q6s_&s|29X7hSh3Qm$#h*`hZ~}RMs9v#}rJ{tg zkBova&lqx7i>I4;>4LoMOX2OrQUEJsT5gz+i_AkYS^f`Oef%VfODnZ zcL?J`{tCD6A7u$=Jm|vDd-(lme=u3nhZ_9eu=z0)VS{HZY22R0?&cn6Dkb*xFi(w^ z`Nxsv9BXPUOeWVB0sRA%>8*4U$^S@(jNfylip9UseS<8UJh_iFwOl3k0vdW8%=#Kd13i6E2x`%6+dlNfn^=C zc)_Y&8Fyw0_Pa#i9H+T$&U8(;(m2~Hss1FQ8m;H9Q-E3n2YzYpP4m;aF6cp3qIn2`V@Th zK9xN8sMGmZ`P8(gKO|0wfPd@Wi0aDE(RllD9O6+*-`DYM&#(?0E{sE62!Y0L@5-z&vJy&@v3I((d5%$8r7i@)!mM%TwG zq&r|2Zdn81+pN%C-T z{vaBDXaIEbpWk9@Cp6#HgMK_e@1nq2*9QcM;4?qZs=X{ishM5k|JSbqzA^*-Rs=y0 zj@_ChTv}vIeLr5sP5kUH9kY+S8Flzf(hE)oI6~3-lTsOrvqFUHQkJv)1gk6MYy_tX zv`J)C)q6hcD~Y2EW?`th!kTvWyUo5m91CwU%xUnpB;2pQ3%B2$zk@m z!q_^ORLY9)3kp_hV(dlkhUUD=#?2qZQqMg&G0hJ4&)a~VReX26`7%>q*%RVb!@zjt zUgnwDfTeujV|p@?G<`SYn1^|wd0vm*GC9pYlpB-G;gjh8EuE?rSK~$H2SS(ECNcbT zDoMBtrfg&ySnu$l&cXY|S9-=!bN>!oH|{@-S?*5JtA>Ij|2?_S3H$4pK%kW>Nj50q z$!DWzO662>)@v`iW9SABw-vy9Ll&jGn?Tk0Z8+aKLD03U#J!p`gs1VtVAqQ|YzOC$ z_T4@Vt?~k?i~Fjk$V-F|bC4b39)ahaNgSG4g_+alFw}B_`vw~L;jRYytB13QD~Xi4 zK8c-NuSl2W&!O|sBsx^7G9`@O{>X$YLE~xX9UakN z$N-vA@P#Sfu?Ic7A+T*mt+2$UFRVN4E4mjXkmzGa8vBRC-ZOUKAt{Axt1huWp9a%U zcYA2soewJvGHLqR4pG-v0^jXlVoT3T@}8N*wk7*Xq+et?gVYHhnA~Uef(v?fj)Y4} zj&yI|B>JJkKZ^;`;GP=?ozII&uSZW(bEv`u?mQ^pc9|8Poz2+3cjBmR+%uP+zx6 zD;Cg}3rB1RK(*jZ)(!^r#p{u?1$Huxwh=VMb{0EW z)_BV)^p#SeSnfPlD8|!m>zV9GUqT^mIGrs$&VC%3N^Mn~%Vx1exH~EtsvWh_&?%C7 z_*NOg(v4yJ0+(YZL&MT|}X% zFI6@az};;%xK8&Uj{Pi;mzHVJi}O#=Ub6txZl>~c<#7y_H>UY&tytqa1TLzX!!2!7 zvYj@b9;_b*6`7wz&B6$ltu`EX=-Y0#y1p}j9LC3!mR_$wL zN!IV!>S#cd$7~U)N7luq`KpSM~?21_G-slNI@_TXl;z9J6_li4b1jEs$LP@t# zF?f|eXWIflV9dlAkY6zhRz4jDKej8fIn{f_Cb{0QOFmiXGqexIHsnHU29p*^Jm6KI z7*>BgO>&lVX;(#XS1IphZCYarr+I(gei$$AW#!Z1#AB%NItd;)db5u?v)PO=0}3D8 zCQKMRkS)uaf^A{}{3wfHZodpkY3xXP*tG&5WIh$QK8zBcxks|P8IiO@;S1|KWB^S2 zvy45=cSm{YEn!G)KkA;e9rf7(Ry|DxyiUrq#M%f78uf^&d<`S@szzM#u8r-o%7c?_ zm#~sl*?CJJO7@TkIdy$-tlrJe^(=>X`#-Wa?^ZUrBZ9 z5y5cpk|KKhXrm}ZF>Klw40(JPc<^aF9AQ}b}XVky*&HWBVW7ywUB zSkV+T5{H%WUP#+9wwT`qGNz3L;kE%hzGw+;*K|SUzo9Tk=e#JtbSN9U_C8bptcR_h zi3y8}JhGyFN+l#r$zfUM0{KkY z$F_$(WA(bO6f$Tarp=!wZM`s#PUgkZxcCZCIHo4JEHl7!@3>p-(?w>#au}3Dc@@6fSDfCfOhOali%9;+P8P_>@vwO*@YHXTV%OXOm6xUrb(b z1O<%~%sYA-TKw?C?2_SZ-P_T0_`_Dz8(_=N;5!AyU|*QBFdZj+ZDDaO8fd$x5*}sU zzz^%EW3M4P?379*>`&`rCJRzvX^8=ZHU)xz%W&{4W@JBK2c8_BBUSiw3J-s`rXGA( zd-3r?=C}G9-p34KzGFOX+O=Fz)f)-@cI?2Kh-_LMSIoqBrt~-1vAHy03|sA$!r51f zusJYEIxm>}K_8jIo?hzkXoer&)=dTlrIFC#cot;_tHXn-3UqDx96TR%3nz})j(4py zXqnc3>`2iTjCdU@$xb3+gLyG6^Q&&|UNV+qU!P%e5Q1*JFZAk>J###*1=YEB)Uad; zKIP6OyYdifOV)%>2TCdDSP411^n$`9clve7o+iE(n9+(otPnrr=3+%Ke`GC~J&A;^ zM`B>idQTj;iu>sHWY9=Sl(fjapLnTrpD;ROJe^ndrisnX(pUEaA>7uI9js`Srq*^z zrtSK`_C}Q9{5ch9?{JL$6UKnv4Lt~swSX?AwJh?|cEMp<31vzz7!t%jXiUUqFh!&l&b|{Cql?JV>InF z)&O~ozzEY{%*A3k^Ze!v5=p$M6rDm>RCupDTZS$j9Sw>oM9XU{Xz=(bw*p7>oj^T5iIn3+Pcsg$17govTkjLJkR5(R|{4Q10bvMNH z+G1EPry&@BvZD!}VWd1f3Xc4mLNnjXLZS-y(w^xLx`#On(3rbdTI;dmZWw);Is&36 z1VNJf5vhWnGWq%($F9v^1-aY7@IHn2jWoU3xDY|Q<@#W@Hmew7-|{TbJ!7yebB4lG zi-dI{dEofkk|NzT;Z{ijj5_&SGBvtH7}T#s{OG8M@>i2-5$76eI!?t?8#C$HZ(ka$ ztN~@w(^ya)Kkv2e$#>gM&)m1`w$rXIM z+OhOmhuCm5h_tSp#6+Wac-hYysR6MeoG6gr5n@E-#+kIb{m_|`Mv!^f3Zq= zU*U54Wa_=x5iRyw!V1Tog3G%b+(p!bJ~pO;UPB(d7V@Pd4JN?w@4s0+?++b){*|Q$ z=;NAYEAZ8E?w9G~1TUqEknwCGbJ|o&e**V#*xGEtzV-$-Ze}p1pC(N@--YY4-r|Mn zlY~OGEHbP2g}H1gyI!3sYJ8rDf>d1sFB_x}NB5!Q->u-Bt1o!yX2Yf{a_~>nl}+es zV(X&pvDeBW)YvWxuKHua%UTeq6zkJAA1&qg7foYZd#sa~XrLdCXPVh?f_XL-PC(=ninF>!0oM z`u7G_(dz)a_+bouPVs>%%Ym?Ul@Im1bd;^q))U*OFU6j<`E*W81MFK|sk_jW6c#?i z*TdTJfI%Dk^z**3WOEwx9;yuQJBq+OG?4FM9WnP`0Zff~!-(C-CudFI<+r8yV*7Q?SE!|SdZRG;riSH@UjkD)h3YwtWT zFCPNQO=Wb%KLb1)HJO=?JeZw#hb1Q&?W!3JH-1$>3TH+1J}t#b=}U2nTml587(i4ylsP|%>`i@zhQ1-h#r8DYxEStDnt)G@EXhHC1e7)mV|E>g zf9mJ3nx9d$;;Sa5b6?0 z$sxfkutJvFGKRswym5HUD2rT^5@6E#?Nal*9V|{c8m?G*z>2=IVCwJyq1`57zN#rvtaF_Ot9(V9I)kva>d`nBAj8=pz4G8eZNLaF7<%UAT_3 zXJ41PwJm3VGsocZRe6y8CtV0N|H6C)cXaw^gZH+Nfg`3yY~3ksaMeiS?3A8hhPm+Z zSRLMdx*fOmf5z%2o0ImraI&412yeHgvC2&x8sE{3MTetc&4v~R1z8Xo6$RtYHu3&z zZ}G%{?PybIL-z+Ol76Hwcxg-mn>sCF<*O)=5#^~3JmJTSeQ5jqDY}l)q^y@8gb}uK z(6r;emwr#s-bvi?breJ&&jeq$er)MHD;oP#8A{G8BWa>+wkRY8ghpHu_MfTSRA#_KO;`_F==(2x|J84bLq{!DovQh<*^x zmgdC3gWm!S?I{Z-cWq&=={t-#ewTSyXhOX57?>Js1Uj;Tpq->mrL%icd`Km&OiE$f zmpRhb5#vEQ#T^W}XE1*M7dC_U1(p`XQFH24(mnW42%E#_g!_9T{&>&2E)m|~yy4SR z(jY+QJ3G}V3va_@D0J&(A>HK&qV-XBZuAd?;ANct*o&DKY&$9nZBY1I2$PInL%BI-U ztlP&}*bXDs(V$2!XD#SfBk!-xcZ9WljxoQ^G*alQgZaA67@Sg33t;{<%l(Louv$u!jU~i&3T0QL&IyrA=Oa4;)FsPJP)Fcz!9R~g( z(-;H@@SC4C+CxTR%iifsHcw;+;s;8ew4r*PM z#Dw}=qSx3Orof-=imS%cm0v}$Yu5ylTltOEwAf)s+(Oju7?0<-g^Fw49l)ut2HftY zNimW4@ar>M&OkK-wWjg7`0`z^?ePm(py?Q>dyqkEmeh;$XBooF@@!TgUItf6V_0*j z0_-+VrV*<1@JW>&6i>`0rB_Mxc+o_VJ+VMMnA zBa`qz>N~9uXvs_k8GeWSb#EQJIApOn_UCXhNz;U;Eac7_+0pF4q8R?Z&;{Rs7U|N* z9&l)jRQzR_3>RzCF~&j*OAp3DRKi3EHuL};%^;9H^#l6_azmeZhNbX4XKv|vwE1}+ zSJ&@jFJFhy)Sg4>UYs16b_UV4r$@2Z>nOUD#&-eSC032n;#IBIT6mkzp}AcVQ6C z%RVhWYa31z75L}jJ`pU_HVX%N|3rMlxe_zm*sr!=Mj@48=TSucPX>UB$5hS`@uMfh z^zg>)Ah=S_*$Gqz+wZ4>rpFhyVC6u_J6w;W%=1Wls2XL>T8Vqkeh_wi3xp0+eLCy^ zO+p5_@bzRQ7<#Ev>)Dm2 zPGQGJb(*QZUfh1sgk1Z{LT!;6gshCE&F$}S^3SoPR$&L0n_JN8YZknp*({PuCn~!& zur>AG*x=hO#yygizU<)-@<;3_!7W0pPBMjIoy*wk0u>zfT$#ovasOD1Iec=g-^H%)uK` z`MauM_U4zkd_zAmU2imnK8>K(bqR3bsXhHF52QSm!7!sz6@hoabdSAfWhr;DsOLjg zWtap}8MQdNB$rlCkiY;dZMd73Dy;h9%7z@+%w~>mXXpQG#4AH{3G*ze{((942vuOl z1yT6_p5w9WZe(-sAR^Dt2l2janYmKT*VH-3Nl&v(a$yTnnlyyW`?oWl*}+6EDs=BX;xWe9|`V z9gP_T_c?FJKV%fF_I@QQ)y`*IdFDLLJ)A6B(xC}Cv$^Flg2E^C{^1Mm9s$_QyRxq| z6Ufy6IcnONve_&Bp{LCp%rm_wN?yMaE?zl^ZP#T4`b_esjp2Z7Y~wo`ySBM7&~~k9wXn zm}^-Im4)h2--2k8R~|&&lh3p5OMRhN0nesQcLUQGAyn3E#4H|-<_>L79Q{6vz8fh~ z`hUgHIp8+4pK41Qxys~Vn$3pGnv#a%2~1ewMjCsJNxUCGmV@SF>BAxzIVJ|``((4- zTStKBgP%e zrwt%nQ~^^D6vK4?3gPIl20Uk>LR*W!;$;U}GT0l4-(?4o|Cem4(%*^c%kQDa;#a~S z?<`0^z;iPi>U`%?L8lL^k-}7UlC?XBXZo&`s6KBMw>59&-1H&vDou|XgZ_$_&x~a! ztt0svi@%p|@iScQV&Oc0FF6FPVcMq>!TGZzoZ>sFWtX3bRcrWp}mZV*6592l*$2Z@U#20R*pdMBuZ4FU}7b^dw=sf&#e7`tOdlRKl3Tcz1 z{oLoYh?15jr6LU~4Wv?J%M3;K-b8lY=g8OID|_#iO(OiR-yhJc_1w>MUFUp0@AqlQ zt8T{y``clXMkFRaw30RFB(e3Du4wyCxJ083aK_I4prcz3y(T?X_!~L%=8rFFbJ{*> zrJe=c+>r#c&W=K*1P#2uye}@h8HF!b2ZNKM8@Cnw^_1$zl(f1HBf0v`aoQbx9#+>^(#EE1(6#SVp|jco=1bKuy0QTd-PAx$ z>qu%sLw5h?fn(S8BI8?P*Y$J`1skX0^h;C8Cc~C`HJ^hi3)=F%IR{|tZdcyhUq$Ta z?nB!rZ=q)o7x4U6Deb(i$~H>Ppx)9B-Me83Gz)bFG_>zm-#1PQx-kHK5_ zrbxTj_T~KM6h5rm4(E^XqwOih2UrNp8R*)e5w20HSlW5InW9O=-0)9*37eF zoA|FZKPM0OX&r{|i+p)qv@z4Lw(QgH0lhn4$iJ#Tz&Gy5n#Ehd&2j{Wl~=-%`E3!$ z3)aC(6&}`jOkV4;1GJnh`O0$9T~um>QvELwaO5A{=q`9T;nS$&%iZ#)(|^c#X|c%g z&z6UG55}cqchbMp!*SQP48EsW1coz2j;QV%Oqrr14e62~+(v`NGjyD^{yztPKZJ+A zyDD|)YK68DfMTNYd2nkV$-F<;Z)48R(oT{gb#IP!lCsm zwTP;I{GzP$B|N$7OlY26L{md%z{SyQvEvP1!2a_O+ozx1so}sXHu5 zK1a0Wt#rpt7gMKo#Y@euFi$gwX5`q@uf!~D`4Z3Cf=4y=&R@@x|7yVSlwc(`WYq>g z-b1thrs2zGA1=KAO&VL4fG#7gM9<`i?7lXNucbbuI+eXp@hXd3QYM0ucW-ud`2mma z592i=ld!n3kvcxFrm>?$e$uQx#z)POlUhX1b9@p8H*^-=5j}kEa2}lSKlr;jQXVs_ ztsFCc0R?~9PKWl+g^pwFc}r~qRh%N+Q4aL^+g$mPGsq{f`(bx8B0fP(Huz!sK zD?Kz|hm)1CyXhmjEVv|J%z00BUxoW^hBDriQgNG!2Lxojq3|Oo;9}SUI8rPe5}IOf zl+lyRL(U7Q@e}fGzXuj(Ti|f(KnN6lTccY)!Av&gC!SsD-o8^X;_^H2%y7UR$Id`{ z-?#8#)~mYXeWu7&dwxRYuCtPoN>@A+J45RE!@gS8EuSeVkc^^^ia-hn8dnepW)pJZG2lG@_H*3l23j# z4lJvsy>-5LbICY#>8*q67LD-Rem!NbSwhRUjKkz}wrpyx;FBLefQiV^IM()omnVfw zMyD5Fu^Y%fQN^@Lvpe6u&=a?9^T0jL@s#M|FU`BU zJ?z=xg9(0%aD#={EcpIgC4LcLNQU=*L)vD+f}*zAe^mlUU$+D0^keW!X|9yrQph9w zYT>@#9kI>P9B4J(2RDS9;_I|fT(vQcZ4Gs;e+%&wSqb z;3w?1>j>2P2@KDgQtPelLF=YEVEWEIa?R2dKGHFgZVi6`jg9?q(Xt{=IWrm)wTt=C zoD-xb&ZVCe=i&7lS6Hwop4VP6(oyA6o9768TU|fzVHC%3YiF^fK4vT*+@Q+M&rU*{ zv%{%r!)lN_o~rv7B$$g+gK*&WxpXhqN_g*5>A`^?FwWbHivKMJ2jv7Yce^P+X_vBi~b4+t>}dMpNobs)zxI*aEbnYY0qU-;&~m3 zaO71l3Nh@>XNDc3{^>@n?=v12ry&n+rOkgdwo~1jXiz>|O?|(0;&0@o_z^75JKG0R zv0XKkXw0R?ANxVsjzI3!8zKUo;oG>4@M+2^vU2j{!wx1GwZ)Lv{L`jnf8io8iiZi$ z>tWaH8?fwGI{p>2)us{on4r-~^x`A=d}}AXDrSY*d$Y*GJr55nQu)@BcBs>H2s9Zd z3*TTg?_Y1p2Y=e&Pshi8f%<16iPJl`Y&2hM#U_fr;Jl+hFEqQywo8C?wB&o)S9 zmfd)S>S&xXZ8>cyw&MLWF4kq4JfM~1%V^`(M=&hp7!6rt%KaXP;JV^7^guV2zkM6U z^~L3&a(MuLW#L!*P|PC*kEt@@pd93?Ns~9YvVBz;{E2SIgCZq(|M@y?SzsmqOL9Vg zFHO2ok}TNrKkE#-45wX*Ww2K8Fg-9FVlsp9!gxC#dOMP%$DRYbv3aI(KzP1hQ0#8D9)duKo<;;UGJg0BR9dOI9ejJ{x ziPH*17GMU_*!BNOcJY01SnO&V{~`s?_y~5VpE7nm6wD8;2k`cJdHB6#r>ykSi+bAl zaIuFwyDs_;msL>m5?#}ZlXiHq%?O^E`b{plYKlv@OYEeifMwI)%9o!{z|R+>(SKhX zUgc@dSumAeWp|-lhnzVjVh~2Xm8s_Tk-83fnY?aZhWL5Mpmka+7(00#)$SKu66X?B zE4)A#%C&fml|E(Mdn$#TCYsZ)GY8CmOg{fi;OXs&=r&h{LSB_Zwd!yw_0??X(_gSV zJDTFoYYIGkw1Bstd@X5jp5^KLj3DIEYp@W#vo1eR(+Hzu(A-@(=`u8hci=GWPTxxt z@e=qf2ok%5SZV75C8{@n1Cz5R%lDUlqZgjjp<8`dF2B|uYLBc@Xmv1=J*pk)Rm&(W zxw{TJ|NrmivKPKaU6#8ycf@naBk25uah!ATH)&t`RqMHU5_XKPfr4Mbc%1P&nPGHDE|Ud z-x3dXms{~1S|?X(t3u`8)>yp5gm0X_NCh=|I8U_*zq-GMRTkFtdxtyw7dJ!h6K^yv zxC-`jd}W_CTj875a?jWG@i=sBA4QXyJ%&ArlRs5l2KN!3xVu7h+$a%WG!#i6PmE;` zKWmgs2>c@qapfFuiZZn0`E%#WU5YGuVL%W5a&l_j1q3_l)=-<+d=AT%Xy)L*l^uH*rhP%xx%IE(?5i4-#w#&>yCiKjyyPauPw*c4&}yg z;aKaig4!;;Aot#>OR<)MXTD`E{XXA^ckR){DKDnT(zbjKZQMZ3`8m8UUV?kuX3)bn z>fHD^66dyxgtZ5Pak^kQ8a{R>`*A+}Wb`ufyz-jXtaj$kQ%bO0aSF(1EA22jD#rw@ zlJ2%y2u3G*fLr!2YPmC(#@i*McGy_d{93?wVzYT{(;7-!q=T`iMsh~ao${28qwrOI z0>1(^ws1_~*`nL#cXku>EE_E!RBcpzHQfhQV{%!m>ton|tb#sFpU9&>bP(R%;k@jN z4|bfEg`Q(i(UFWIPG7Z!Y9fod(tV?3R~gPX%}>D!<#x2te${up-{eqC8Xr&7H5&9oEr{DK8nFZ&Fi{og=FCry~LLm4xNBy*l+ zDtkp&(WyFX;o!EwO%)x`S=_}+;%nfSw>svu8w5{Yda%ndaSupXM?N8K*=zGCG`rUg zr&jmp&TArIWWOqEeef(@bFLV zhFf<%NqfpDZaxyj=0_&bmow!sy%wZ*>aOT)XpiGhjzX(`m%*lI8{84uiW5rz6J3J& z)OBGYdoH!$n4kVMd&v`0sTf{&J-Y)QR@*Hd8>EH4r+w=Vw%QK6lEtoO$VBv*kO!MR z#O7vY5yu}1#w*#AaFD`;)#3u#(WZcpUR^-TlZS%c?iAdS4e}{qhw9PTXll*jn>%wq@fv+Fvw(j_uyi$Zk9Mkd zHlo~&mW+P|54ZZU&aftGmt9WkifG>CI1aaE2k;WZR%r366+ScV$77{LEU3<^EBKL2 zd0zMGj!nG*TRNt2K;aT-8<@py$$~DVo`U#%TO*iQN>ViV@~e9mQrnku*0KE!k>^( zSJNjElbQhYmHF=M`@oNiNvm%@X?pdS6HB}>Nl{fdOs5Kx zvr<9zdL&rH>A~dry*YooJ64Bf;_XeQ((CQVU|{NWdK~_dd{1=5!AniKrZJP(lsmCk z!CuMlfu6|QE~7Pz6_n9@r*6UUy;PpQ1HL^nsCyw8ukP)_`D4xn>UzQlf7=OmCC9SI z9+5$at)O0O2H>ATCAg)v$l99Dk*tebBsU(3CJClQliJaUh9D~LI9JX~4G}yTeZFKe z4H}cQD8*8VuSHU8{44pc9)M%p1jE&5N9nQT#g}ReQFB%`eNUfOyTRL8KAfJ9Dz%g0 zVgJ8U>dgk}=3`wbRP{#Nhezc>GfJU8C!IX)dho_!YIy(cVL0U>Jak%Oeq4Nomd{BM zySgV*hj4ce6Zz9ME#0_g@D1#rhKl)rKFm z$JYjvu<4{7UB8h7dNYrc=Be9~!`MA!x#_Y@KXo|hT|MY-Z_Rhxk3!e1c)>6X#cA6~ zo{%bGD{=q7H0>uT>q*?v@D6OUi4{9AD_r+`1~@M1Nw!-HvA(!3#$TU^mj3-%`%Ec3 zIg*A7RnZqeHUxJcT0o-)bjD78n`rs!{+RK)m26b5!)p2Mxi&umJ8M?f^`CPGPFKfs z-J%rOIc+Sx%zpw|6Fc)9;RA{g_w#kGjZkdUT~=FAB+a}xnBN5VV@olsS)HkgUE2hU zOfW-Et2CZ?u^UDGf9LVE9e>%sTJnqx#u+uf)Gxu4tX$ObVYxjQ{8Z;Xu9>KwB>MOl zwvbw!3vL*ahx6y`l}~okLr1wY^vM%B%gK4XP4PiwDgv>s(rW5;Y$UF}MznCc5$>pa zNh2ooC#UEF?AU)OT2l#Lba3W=8*PM}$Bz@oh>Y)P8SFf==w+Gc^QoOUTIw}P3c1t? z;E^x)Qh5$_w~PeqCY;+(dL&sVEg-wro|M^lTV3otQyv;Ad=rZY@axF^^vqkIZIiTM z*sl9hL1+am>YRrIF0P;~XA>%^sFMzVGl0Uwsa*8_gWR#_U&tJL9W^%w@tz~CyO!a*GG{VGytn@CUB|x5UsSoD@Ek5kR}FRp@e7Z80Zv+ z!!BE*+S6nhXWx)Q$&TYcR#3skrFFXP#2IU69ZWr`%h?TSm@=|~YFoDz3<5vY z{H?}9oQ31NpP^`n5AfG1ll_)`gody)FnydMF0P26@QVe~0p|?<>C=`QRkL_f{y4B( zaE?mOse4A;SWjbp{BZfD2n-5fYQJR?&mZzmFcJSref!kW1TkmxG&Dr|sLg86)v;yt zIyk@D5h@>Nu7-0@%hh}Vc9DUyyxS`k7@)bV^Ref>>fyu#(ClHch#`wNeaakc|+|=C-C>`gb^_X zXzKD;jtkOvBbV!>6>*P+vv5L`KMw_Fx68Saf7hy9H@!K#0o z;Jl$G&HvX%?i$+>EmVDRhfg2;(tSbQ?>VFSR>WWOHH<-5>o9I?H-eMjq@%@77sv@1 zBI(y;WB&`TsJE;D?X!ycgr*6$d?q4a+Iv=uwMDK!xyx`~pzV#N;(cCQk%b0$MUNx(Fne$&`UTC0ZkoA_BTZrLwZ7oAq6hV@vBigLT4RyG0NLt$Vv)soey`Gp z9d^Y);#?*0D|f;}(?5{=pU%ATUJ$Bibi=y11nmA-6INUM@yyPFlwOjB>)%(&7sT(O zQ(*%K-L6ZK^?x9vehf~k?0y?dA1eU{&&v;IwmP-LRd5i;m9I!wdx6TB1M{T4}w*AlVXXjW_aNx(Ix1zIhC^` zUAgdiIdGLP_q7;;lPez3@;_f`qE!J7eQyNvh$ED9Xf7RJU=DiMN>E=pg!fhqQH1Wn`3!^LG0Y?5-BS{rtfYx3>sb-W6_yKaFu_LS1XfHwH>-6kqqe}Nty zh~dK~D_|8yV!<9wnDON*O!6OtWoO1P!4p{8$^~^zG-1v7FsvG$&s$FfK*6s2a<510 zq3`31RNf~S%b!F@-wtVEd0~GVbEqwb^onLLJ6lY14CSPG3!zf%Eaz|6qKS%D9BC3t zyQ+$KO(Zo3il(i3=xWphxp0di9?k$1bad zV^NPu`CSl29ZcjCWmWK~R~&ovStWKbU8+nd#wIbqEAEVlfn!;}A-W46&(y6|8J8SYa???PX` zQ!ol{CB(6@kqY_UG@_N=cGsCswB&7nDxow!7ru&JNWjTp2#(O<S2JEf9 zpfVbh)Divk3*?d-5Wd+MtjN*;muH7*d&VR*dKJm*#eDXh;W_D4p$jVXWAUiSh5r=X z;z>J`F;`=Y6!d8zJ~}%ZUkK;=iFcmR&-4!sGn~k-hsAza+=2Uxy~$5kN0db`;o-I! za{0>BupnZV!au1P^!E*f7V|f>_3&ZJzLW`;muHikx(P0QCo&vAD;{UP%i}9)-T6pvDa9RC!{m%ju&ImibsF!cCMSR1zF||{)&SG@a?-tDSM}wf7)0;_qs?n^xFuV_lwv4fJwMHugr7M zk%d&ADtIkCm)`7f1eIJH9@sy#?qlpWYHi$^D=H)5-+v!Ht7|h9b@gW@y;r^*Dcmhy z-UG2i(nGKeGUG?X)8KdcICNaRjzX&~@#DWAQieu04f$|fuIkqt<7{%JfjUd1f~O1U ztd1+^Jq*Dk4MQMJ>`@;)O_$n?Df4_+JQPE+L-6U`NW2EAxTC)@x*2M4*rZmNJ*WoE z*BN2+yC8N;tE~(B-2r^v5-GU-INoyP3B8%2$=6(0LEDlc*f=B^d;gk6tJ+2KX}fZA z9yFol{*liN+)iyPu7kgp9`D|$AuZCl29e%2+-7+r^f_jZzXMF*ivKfdT1x@O zJaWX9M`ueFEg$H6#{)F+iyzIl>50?5W|A&M$!=37;Kggp$l1;nKmCcO&qn^d_vm}6 z_MZ{Q&v)athbLmck=}LfT88kYWn{cb!qqJ z<#b|w8hqKmTc-9$X~l&`SowP@1@3=A%XCNK(J6s!HFqjxtlpwfY}ica|HOlpg^{FX zUW``dzG&HFos@gphcooL;@Z*0d?BkF9&(=st_E5-?BNM=>y^jGL;G>1u@a6tu!P#0 zl(6k5ORU=S1|E;hp$#Xi!OYAMdnH{~gnUzA)u;|Qdvr79_s+vVw})UKO2fu0pUBKJ zSUP^ZFJ|o&Y|d3Zd1R~xzdYEQ&t%=CDgL>5PJ0}7TyYjQPuW7N#s%Wi&@4V3-Se)U_Xs_yXY6rfwj=oFzmJOCuT``lNQ>45#lh*h z`H6lQs5-L;)CU<+cvmsU_w?Xl3NwoBd78A_%%_B+<@D{b=oT+_O+ zgATQKVUT(u49M*U=jQ%~KAUdPifdv{H+lt?S-Qyjn~E^XHilb*qWDek^>tT-zrlan zGs$RRj9}X&p#INQFnebz?l&vp#0Xz(wq7GsuT^lh`5G*Z8BB{idt>W)Nw{|KWjfV- z8dhG5;a)4_xPSXdTtCtlL(Q$YlT9;S^9|(@XEQi;-~#E&2vgj6^&HLks!M0o$8*HZ zMBJzHhMv?0ihk!K>6ZO_SvkBjYDCPY*;c*y@kTwqCD`4bO`Z7uhE~+;{6D%DG9I7O ze0V!Elbt_xrs>aT(z{R1wU0N{lj9UW9CQ6F?LPJZ=1o~j`T^eP6{=0!HyKiH{}Ob+ zu7n*#R?9B$6z!X1!|$70pi8PgtEQ;&vHub<@S6j^`_P*^@D4cQDRydN=SjatNtsr& z;p#dIHahFX%N|)$aHtV$cAO2TWBqWPK?lAx!5SBayq723*1~PaW8u*eXFlzv#t+3A zPt#C^pU*L4-*@@Ylz0%n`G~B&l_$^H9)qE`7Qx%67b!K@5ceJZPGRr6bDMw^_DoqW z|EFz*FAfCK{dp^;ujx7RnUN#VHf}KP3wuwSs=L8=zi56wbTqfVc0oE5I~aFaJMdAd zMskZBgk}|%o^@&UWPfTIm2Rnrzz4!ZU>OJv;4Pl%5G*?%DmSTyP+@Q*9+=l@6SaK3c2(6dGz`5RC=&Gi>djG}892gTf#`Idf|GFAJ8-kDe?9qy3}=o+%PoaNX?86Y~LpftumZFOK#ra#U-_LMKd^W#|*CEFpJt4Jmytq%Uh0CW8Q{TJOb!7=Fwtx8=qu1#C802?`;Z+K6o4OV?Yl@M1mv_c4Y4MY>@4 z>Gqsa=Y~0^!%+373uz z;5m3$au@0YI`a89p14r2f@&7&$?X?-VbeY*{t+{lA2+=s|9gG$)wTO{+P^!7-QF!9 z`rbl8Zl0VyVGOSuPjJM#6~Bpagc8vUQF(ri8ir}|VAC-?veiTCa;F__w#k8{?1#{& zv=>i!^Mk&e(Bp-J4#^|;8}MzxMn3pTn?Fsr1=H1WSp9hnt}dNRX%Sk~sFKfV{(a@a z?F2i;Vh|>Muaq5*ZXngvJf52`46mH4FnrjiAv_LZIEJ8<6Th58ez^=ID+g7%*cD|2}z0 zu7CVnS~ATOH}-er{O4|d{MhB>99&Kw0c^o%1@ql-spmfxyP9DV>cRTkjp z){kYcD=s)^TqGW9@Z~XgOR0MK2f03I9Vll8v6_^HZLe5i!_iUjMRgPCPJKo{P6n~w zk1)lFvcK@*V-A>)9>k!e##>FDSTpxM3~!n(eV;J^9nxB3L(OO!x2`v(y+1;Z6@{=! zSD7O8Zc=;ud7x`vPD@lwaKfz{;JoJsu%jWSJ_un8+m56YYr>J!SCGB558}LELyvkJ zy!LReJZ5nqzH}2^2S-=YPY#ef9O#HAJVl?V)oXceAo3#XFEF^sMcS>Ngw>TruqaQ$ z#Uoql_H>Oxe?MI`-mQh!*ZQ&QqfN9xt2HWG4$7%L`tuZfTQ2;%ll(<*y0Xh7MRe+6 zT3UA(_N^212rmsRe`t-xtU`Yy7fMD&n%HYy0?+zsNmcLk8Jfn*-&`ydJBp2|GEE-` zWVaKU*A~cnSi0$y5_DX&2G+tgg|$%UxE)4I1pc)b-<5>-T2D+ z$MmaYI(;fS0C!d!^X2QRJaSV%xwBgyY&}&?<5X_RBh)*vXXSh8Ld$pRe#J_>K0F|} zUp!c>3}^qzgV?=(I1IEMkC&(C@U@a~czR6>pU)BR)tB7(Xl^ewvFVR#u83E3B|h(N zfS-G5;^o&ps8_8OhaRpMvo407mBVnI{~Wj;ZXgx;<>Te__UQ2Byzpwzkt<&~@SQ%d z6;&O)spRzx30nq9i}fo(PwZ#fNvix?WP(#-b8*$QQRwu+pI4k)4nZ+3Ft~IJEm2(w zwpWU|W?me{T#A%!(ic;g!!@$Ud=0LC;K@;?2H;hl&qKsHKWd2$mb6(&;X88CYg%`7 zUg<|3Ki1O5cH87?4WZ2N`9U9?Z^+U+bJ-`sm)n+hmQvnsqT2ior0!^rZi(|??MDYT z%=$*EA5vkU>u{*p)(!{mdsO$}Y$l(rnZW(`dBI4rQ`|8%2A92Qhd%Zl@KKY<_ia5Q zTZ9b4vp9!dY*6D|t(>|V|7`wpvw*6UGq`Q#DA+b~FSPl*7Md>)$45`DNPRuUPI$r| zkoP2Urf>`n(YIl%sf+2q)w#H2@CkC9x&ac5I%4OKUm;~yAi9Q!vtnrwFR>eh=S~K} z!?G^?_x)q2-KhsTUyN|ip&xQh;53@3w~SUkKMaAb?eTg2GivsC6FuE991${u9y>Ym zMe~93!8fB=Sxe*?zvqM0T!5KkcW!g?2o+nnz(n203eAQ|?aJ zQ*AJ%xIbMCysT*aXUvN`i4L&SRM7w6%{rxrWtXsWn4xh4_Uz~g@!usbb$&rd9gGzr z!Qq0t+lCi6Ou$1^&Po%X2cXsT&iv|46z(dYOy(JFxi-#RKKSdt=s)!0f_?g&_rjF5 z(i`A*O&ZU-N$_RDJkm7w!oiBIu>S3Kc>LiEwO{2anAWSH`LD<(jgAujv0G#>cfbLS zC8&IH0zXjFWcuOAD>OSoW5pPpJVTqR_Eu8jm^{qU-VD=JWw6xDVstZRjkxjb=raXY z_i3j6*gz*;9buvH7tHV?h@&Tx&m>r1;*=G;Pr{&xOn9m zICsX8Bk%vFz9Y==z1J|ZR8yjMcT{=pRFZl|MDq;$P>{|f^Sy=xaI*A0obHqlk*eE8 zzAz8}3o*irHS?w1o>OSf6L+={9;e~Ib;JyyHQY>1#I}Ww^sf9NsHa~9pQNrdqgx*` ze*F&eyQaYA;Bb+_>5sO1$IBIMo%pW3=zgVX(4DARJadQxYg@H&_nQCj+ffv_=g@KySlSiVsG4c z&R)JV)0&+Zroauykh)zS6M0W_F#cS&4H^Z%%&IUBlPtfHhRp;rx&2x(>u@&h**6}C z4t*Zyy*7MnO#ejYt zP+eb({|Z0u%{M(cJi-I-L_4F-iq@QUYzSw1e*@dl`Ec{28<*Q;^ME^}*?wvZCFVNg zfXWB7W|lZ73>k|WjZ0|H-l=rzc@>1J{-ZgT=eXBHNZl!V zFxgUC{^79{&?OHm#uTz;t{tZ?HR7eO-Ef+%E%tlRllL{J!0uo--qt<IlBROx`aNeU-1uhqc;!3gqs8(56r@A?sJKhk_>=YM1SJVwY zxW$84=r1YE+L#TpTO`NWM0ozm50gHKIc`-?k{*4Q*Y!`qp&vZpwe~F7oSBU3r5(X~ z&ss=-Fx~T1^B)LY(}`PszYT*lKgo^x`FP0gE8G|+y1HBSx#m(9u1cRyW;WeWch)Po zi4$sDnf``?K2IlEumfecF`|AH-=Asu)^M)@3gv1JGb10FMG6cM7%%W4o}AN z*i61z`Lgz@<7i&@yC+(!orTule^W}A?wA?x$!{G}u>RCON|`7FoQVUk%1-G2D4Px6 zCtyNDYwj#~h*$6bA&xpD4bU~<$j4?lzo{?RAG`$TqLsL8ODHbi6okpMgoCq3XI_8p z5XHJ#(ts)1oThXR0*1b#_oA;^rW=jBFHR-352N{&b1eRCa^<+)EwsZmkgq2{RNNkz zj0d9Mzy&d@FMB?MqkE17YtKTyYGO{>b^n1!ODq(p|Hwp%ou zin|MEw0iRMze-qOkb^FIzEYOjbCTTrB}>w%>_+NupE!J;`3nS zC>`#;(h947t8>hiY)l(y2Hm3H({24RnBi_F@5>k7`D<-B-$z29xKmVd6gWccCnp2{U|sX`uoVR zS5N4aqJpY9ltLc)p{XjzN~g_;2|qSiZoNyI$Q+(*(nBO;#)17HuLwbvA-b z(OcDRoDb)w#dF{zwIkzdqA>14wY1r;7@NSou69WQUlSX_<`Z3T*Sy7a{rX&~Q*IK! zI^{|q`=5l>v86CWWG}-{r14<$U^ePgfURDp*Lhm0);UOX=+*Ia}5-M;*aY)$f6-F!VsAKT$o6(^Ww6u=9br_hyz;T#^IB3$L)Vg2PF zlykN}j#24}Z>0)=;Xs`?J|#6(6B=giDDN!{$7vO*d}5R_Z5xJ|Qo07(YTSd(t%DJK zo$*(la4Jp<;NGiiXq0AO{P0kf?tK1EhDZCO_uBF3+_#$~hxG%G#)0yQ%i{AyH=Y)3 zdryNs#g6Q|4b~f2q*{Yu~)U-X`KW|RW_XH=utv4%= zjbV*>=U~*;HaPR{OPK6Xz}XqjT)n`b+qCt9Rj^m|kTSvXQZ4A8C`4VGPNLdxi_x9ucOF|+y(m&*x{`NPp+S2 z#^0<`ajls(4NUzhDf`@@mp71yes0aB3#0g+xhYQcO(wVN#k^p7C%$64nF7CL(Yaq{ z`1E;CX|qlTP%X}Z^^-fIwfzHV=w^(C6Ma#6w*_qwdu&}rE@!&<(bQT`o?Lvu^P;o} zy3JH#x8Z|$$@*5Pf8LipQvG19O;?UxUxXKabmt|dBTzZPj9Vu8va^o0tR`l&Bbtq| ze93BX)j9{mI|Rtn-CZ%s{T&Q?Cm8Q-lyR|OiPbC$t$lAT{FT9dS@H8S-Dy7@eRQg% zkX6d~`j|f+8qfh!C*5$;b*jEi)oJh zDQGlaUL3(H@3eWy3JW~#63<^m&Or575B`zm&m)UQ@{xn5ptMsAPyD(7J{PL<#`Hn3 z=3;jkJ=~H_PY>X9c`Y?PoJFSkR%9OWfYdc|uwA_hdi{(+ljLPE>qgk7}^KUCVHmx_|HKTrk;g(e zy?A`hSt-~(nKfb_yC-~4<9|8*Ibg9ReOZ?-=54Fxx=$HM>p#%HX-T}I^FV$wB@G9z zJRn`4HNURI^CA3g`Y2R(x^SW|$$mfqZtP}-eHO;E zl1XQJqJE}sZuCcT?Nh{k#>jG1O%yEi^G6NUVR%?Y12Rse^8oK6QtXdn?KK**>O6bC zJM|n4T;(S|1NPKa&KiZ|eOA!2-&wfnwuvkq-6L&U)=7@or6+t_4Rw&+mJ72d@_!*8 zXoGzWztPiYwc%>GXLma5CXM2_OK0He%sl9qvJ?9My90d`7C1NImS>T%$d-RI!Z4Gy zb(czn5B_B*G!`D-Ek_67tx8LHS>-7gcQu3jpPQt+HQBOLOm9WP3nlJXJQ4j;CgMvC zAI^GahN;aZ*w{G;Z-t(L*svPPYiNT5H!r4NTkEOKmaWq7Ew^R0C&hSV+Z=d2=cc4| z#uOKKw?lK0VGg-x%@@xH@kh|ZL*Bq~Vn?=lVgdMiV87rS-6 zM2Dtm;oiH)A$E0t@>Ly*i-)@KhJW_#V_AS#O|oG3kl$pd%7+HD3X zjaw!^du@z;MIUwiN;Pztc|l&jEE3=Ub>&6E^BLoPlFIko1^)wTI4^k=Pr7+ZR+;Z1 z&T}Ve&BQQRvCs#9e$K?cwW_e8Sa?Wnf5CT=9S-Z!4GULprNJxiD^gT)&{;O-3f~{J zEN2p$t6^@G`W!aw=vAsO(v zEx44A!DVBf!@QsE`1_Xo^6QhGIBQcRE3a*jx*sP%Nxx>gf1r|nso#g|3V-fiB4!@X zo+=KiGYlDjnjXq46qZ`!aYp(m%D!gHvl5W&+>TTK<$1W`?Qq%CHUV3#ig9JAH)@nT zqsm}?d^vp%WGQ#VOKrPR#=}i8c;#~#`*a3XTMc8gNnh!g;s8ai*(EPnKNSpxi(rbP zg-l%ckmJw{zIj@Lqa$yURj3lSZSlg0-KktA1~;1kxH~&@Y(hx*4uT2ey#Wd ztB?58+H$YNegOmB@0)JE$i^JaANvS8Xsxzgf}vr*1OT zPu?k?TBU-^bGyK*E<^EY$t0XO^C?}P`vUBq3a>}T4ai(mOG|X4`Njh!`tmRq?%oNP zEFW&6&>AJ|pK8LZ#D4AXlJ;cq&kAR(oKKU#YLl|hDeC;JGu4SZ#O~as)b_v~>ZN3j zNsE@q#tUY_gBM}&AY}<1j5VO3FFE*5&zwj424LQYmGu6c3Z!`_I2E^Go; zFa9SD%PUp9>*Y=2LW2{e1~A{4A?X|{;XN-$bF-Hdj{N;te7{0e?$Q~TSJ+~Q4^h}~ zZ7S_fd?NjeIZC~ri;U&|L*OuaBLA3qkG`x-#~w=Ad}He|P`wt#i)xJdpw0~1739sn zMbYw_;$-|#xdF=7YV*Oquc%M-HcB3v3$K-z!q%5vdDN^T@)G;B>Jj}&N&W(E^ZHQa zcQ;(?U5 zSFS_$1y3v%tjUzg?*I*}>6&qW+#`AicHSM)xWf~9XWtv}M(HTM`umzzoqs@qt4#5j z(et{p*Ey7ZRdB`kno3JkMdxhcMJP{9!0U%TQ)a(>uJVe&lKYKd{xn{WI-89%T{6*S zh3Im0K256%w#aif$6%M0XGll2dwW)v$iglD&8hx z;oaqsG(Qw)e4N0|Zi8{*=OeHqai7$1=^q>y?ynCyMm(qDg_v2tB%SqYyr^j)4m0jv zcWJn2Jhb&-#}m;wtz$BD_8rXbLE3y>TkJuy+}XQFcW!*;fg6H^4==qMf-Pg%dV(+h z=<^0XUc3Yk$GcKb(RFfc(-zkc(TBi{1$8w$M|1GL-7qIB8U7X|;^s+CNc;3jy&2^g z_zIdwUqhm3=P4TIDYAaEx?;pG z;Y%D^3*oV^K=tY?8g%KOICnmvxY)oMS@)M1- z@F~e%usS4kJws&ex`}qLH|8ZusW`RIH)(R29fs(aQgV+%9Qe5n{#G)8DP`}#cFh(t zE?X=AGg9D`m-oTx;3#%AP^A|h`{3H)wPf&SAR3?g4L`~PSXa#K^z(99e^4loO74!y zPfSVk&}p*HyhiUPb;84r7WgA5p8`7ek{rSg!De+Oc5W!ZP1?G!?W6^+v#P3_{$D#e zDp7+^&M@b-lO97!(M+glGo8xDW%0S{hcauep@VNWNzKpSP)nLSkDVSSRUNA(mDHuu z>tW9zxSb8n*wvk0TK zA@9v}k4Lv*rhK!n%t#qr4RiR|o73>n z*O`y5X^_efo`UFKiFn4@56||BMCUhl*hSNy0^{ZY-`A9bjmFZ^$dP=_pcB7+(Uo;0 zy4NgN-kPt}HHglGCAFJx$xF)y@igZ~xPNgX54LEBjq}VfUXg+}In&6)!5AMYRZ&H^ z8L-1A5zF%u@bSz+_;J~Dk@5H7pM8?0r#gLbgjF}}@!B85o^68JIt$^GMGSYCSBQtE zYtW{jpCw(77%Wx~#9155scuhy9MC!i;wm(-Td)%UeYApn#s;y6&p-%^?8wSxtwkO% zfs1Zlm1fM+VgDuRm|zjiWs)7fR&nLQn-;^}%qaeL?=$_IW{mgOdkJUDS84N34-WnA z%_ibZeY3~}t!l<|+lFam9k7UI-&-gLG}Xetlke$Ta~D1!nQ-pVj-;|ogKvzE=Tu$6 zr@j))mkq~qNVXkMn3trO{c5;e-D@b0|E`J41^@4#eRE~2KpqjWl{eo%R$W}@zK9^koLU--Up1slaU7?o~r4v z9is29P@Cfdq4)LbxuyJecw`FfG@5e~0DzPi}PVH$kWn?|Lf1Ngn` zI2lw{D*WD0;)S9YI=F)tR`nZ%W*ap5`^qM1`>hqQYQb3gvd9O!ev0O^CDEuM6@hC> zJ1oC92JT+jO*#2`cs_0fzPuBG?K=trwF#b?b;*?8D+E?A$A z-&({Rz}R6f5sT)A0Z3 z__7I@wMiTGzb>L}sWQAV=}SZ0Y|-36!jnq~q~?4w3flv@4j&+)$_sWpFXY}sJF!)r zHP0A7gg>Xgfvllp__)_xu(YVGd2(;G=!1Ch%Q@OGSv8Vhe+z&nw;aAb!Ilr5{{~fa z&%zWtYu@uthhJBZrLu(v;JeirPgUt-#a>5j+BH(XnW`aJm5-pxEFB%xFVL1TI;_+p z`hoqG(Ni$!cTe3(_x|(cC96jBz+IE!Uj0M*`riksVrv>X8jqt+1>QLFeL01J5_YLF zQxqoT;w!5%+B_)%w_erd?+Z;S{^}ZUasRrc z*g;cw=y3Bj!D&oV=S1y;urcQ{2+1Ox{WBDGx<>JV4cnx-{^PKIzcXv^oyrUSPS^a#8tAoC%f zU+Lsk(UWTZ0ye($#$>gPG`#aZX5S&D)4gim@|VR?!HdM+P|#lHGDNw95n9;#8$ zY<+%Ft0C?Nfjn(yA)Jp$l}DXA3T~l(LXqX4XwDht6?qJj^DuPS%6D8H~z4Uy733(0F=l5S* z=4vIpKgkbXhr6-V|C;iJPo3kUcJ^!EzE)>^Hw^!0Hlbq>OrA2h{|vV!90 zzC3oV$7O~3p-ois{0MAcHw0$~orD>n;HCePI60vc8VpyXHZSU^U;RhOceCb;{q|5$ zhZ{7on=f{rI|HUD2rY-C!Ho-J=;Y zj4XJ+h6$JqE`io-QrLB6-7$~6P#W3W8U2$bjQo>?U%!rJ^(<%H(btT77a4KJ;(bu{ z=qv5LIEYP#`w33FI(Bzy4_bGPx!$S-&P97;^!V$rU}7i_Y88t|Rt~1<36WUse3O)l za#$}wo9qt+qrv4U9@rs{&%ZL~fq#v8Zqjhs?pGOHOLOKCCp@spZVDeVzfhC^pAKGk z$mHjT131#fiHqiG^5aMYE;*fod)zf?u%-_Oms{`+^El9|5}=3=*P!0%64+NxrPXJ9 z;Swd`%V_7sm)#^Zx~7WmFS1c7#(;w_x57ZX1cR@P>M82r$!0ld5 zVWpJ@NR4(}8*9j`bwVV+sku08awqn(zb!>WTxwXQT$B2EvL~%#Z zObGyXPNi;!GsGS89i7kWg05nQx!SQahA&sa-=8m1=)@uHv)m6OHm!z_i*+c|a;{`h z`HeEKPvl;emnbcMAYJ_-_DO1GWbw4m*0IZNFd^~e5#RnM9gOshVUGC-KizZI6Z#e zl+2$dD&g_D9g%iKb7#~2is1`{xBTy4*jGM+U8d+t#oe9YdA}Zf!F?npAIzjuqt;OO zPLm#;-wMHE)_b%rfR}{k)1c2~ly&kPHGlNL&yE$aIB*usm>tjOOTy9SlnIAFUrpH1 z4;!fiOO`%RDT$xgqQwy1@=hM*@KmztZwcdrAIo{2-pD1V1(W$o6kZrJS;{uGz+I!Q z@a5V#bgNFs8sYR+*8Bx6?gaO$Be~E+%skZUWxY0tZ|+_N$3@fJCI%dW`{%Y&kN*yj>jbp<}T~YUX6mGURgsS98*rp&z_Oi3*eeO1L{ACq>AqS)1vhQ%Q zzXeVx75}&JPFC_43_nG-^kSVV8_qi<4Y;j^ZY$z=YM=J3+Qn3S4+N)YcRw6fttZ>H zRmGzFr)4|;b#lA9uk`Zl12}p$iks}mNU`E)wf2%87H|GS?ar&>%gY9==@tw#+#`6< zfM8sqWy-0X0YSXQa+5Rg~o@+X_$=y*XV2_Xjex{s#ECZK5yQyt-yk3!@2R< zQED^1x8z$g_N!OyP{iTDqD|74(C(Zwt*x9GxS3{xB~R8ghjRDr(C2qJAHQBiMjzf#QRo~B zH)+e4yooOEZGpoN8tKrm)}Sc3AO+6#z`DE~KK02JuO$Rg%!(3nEeIsHf4iyroGo{_ z+XezRD@hNpFISwpS3+r%GkDs;ZQxnBv&QepaM2%_2nH>y=+uca=v%3Rz0+qyqLu}H z576bK?Im1MoQ6a6E(@ljws`IzfQf0P(*10~Y&MuJ@(+V~#*A0AXsHgrU-%MqH)i0? z@vG^~y4Ex^Td+Aw-^uE`v$0}rA-?qV!-MmVD4u^!$1!DzT-R*KPE}*DU4*mPBZ*G< zR69PVbBOwC#G|URIrrF5B5Pbg?)myLJU?<$TJy}7f7k1vM(_*LCH8{jNY#xiYW2*PUA}jp4Mvk>DxbRj&K%^WO93yr=Uy_`C0< zeBPpp!Ws>Dp;9zAW_|!I%OoDFC0sI^g8h`$i8pLXzXMEB>PP#7nlgvPcT=-@RdrZJT|$er*#Ny|F;0%TahmzZJ)Awj`IY z*1Yw^XS&m}MpmA60g9sD&;jKPac7TZ_fR*^`{aVJeaCYjgB*@b%|q+|qH)qDD>mP0 zkNfryr8f?3IDd5m-OG{KB0Ze_)Yie8z-6#zbuM~ErP8=x7UDCsMXUDZq#HhpcQ0$E z?0!BNoxXy8L)aQX60`M&oE==njxt1jnIyR2}0@Z~n_FDRqj z`+hvH>%SUB!WnQ&FT|~GOTbIZn7cOHk(re%Yx=u#>-0VH=y6M+?rf%Df_9{gd)>fs zW+*yzcmuoktfB{h=Rs$u5NTFFQw(2p6*?tO#_h%Z;7z3=Yq;67G@=ze-aVY12G3V0 zopt4^?!bmk;g^2RwCI8f?oksyD#7RP57-PM0R7tb?UCqcHf*P_*#PLd&o1@zl## zU|`USj|7!T$v%RCV;TtCY_r*>F`KWu|DtBCVhSw#1_vki#$^V8tIua(>b!;YrZkZ| zP%LjgoDVQ)k~p8<%RIFgPg4u#FilOob14U|9*Lp~TW35UWlm=tM?ibGaf%FSBULyy z!xZa(l2Noij<`3P)Td6DjLov~&#(six6h2{6ldbJMUMPH@l&zrzt2=Rtwz4Sz76y- zc_jbQ{YfshJEYRAC?0VBHhsC>QIBD31qoTX={ESz0y-#1xoA6q$ z?Q;}f-%#VcSWm2W)a8!$zPRW82I; z$*OlDsV0U89&yFBgNC5z3(@QUGNi_4#TnQn&Ry5$0Cexwfd~C~LEX3&hddd9^TMvt zAJcXiP>@NArICEE<`F=WuHa|J7LhdrPZ+T%Z-_)zE*^PdahB2ZTMFC`HcDXf7FUEKF1tuW z9Qt#UUl|M@HWA!5{Zv$quA&D=2J`lGE&RT5F!bx5iu2D|v3;MZ7}(eke}#Dny{8-I zrCIP9=g**DDY={FJWs-Dg9B8rh@zZNhf6#?wF+T{W^}beSXsTYDb)0a1HWx3Rung zvwY>r5!f(oJ>~opdp4)mXqU5AIy>elyjZhNdeN#UZ+M=8$;wLj*6IqFUS3b<({xc~ z)@KSx9Esm2w!+&shS1|_CiFThcD5$R71pT_VZmhKdo48GMRNYKWBc*cz`-aOY?9y9(A(G@m>m*e4z0IlS@% zxMXaFkcu!oKdB@4J2OYHxp%|!neMp#cNZRUFAaNq&Bj-Ib!91W8Z2}k2!niOa9a_@ z_3JZ4Mm8JYE=%R2*1cKjk1cPF8-&_|Gh4UVowUZg;$U{e9Z%w@F83vz@Ee5FJNtmc z?Et`SDXE_$l+k{e- zX(A1sdO&atPea7}GDV(;BkuVA52B(rf&STQ@XL0@#WyU_t0szS$J_FhBs>1le-)H| zPT}#Xb@Bn-mjFBUIJ)jU-8|*V+gIKv?+QacS<;Cg-Z>#}FshbbSS+X+wfhweHcsU9 zE4^s4Cew%CN8w5R7FvE~I;hEUY`JI~)eQGx$vKfr#mu{J`2x5kZp~4#160u z#oO8`SbT2-B&2w%>9v^7oiTO2ctxdV5WCyOisKWby2`Q7jo@tf zZwk5)elKNyJyh6eu229rjVCl8qf0w?i$;dn!DeXGsH8# zorfd$%j?Ajt5~*t=Eojlzc)rFp4>X0B>mknJSg3be`vp;uE*%Gy)?c)6n|ek^DquUdA^wYsV)tUJ6UOwB1 z`*xk_L~bxhpzqPX1cp;Z54jLepV~vKJgs2b0~K2QXBF){FpHj-=%Id04=mJc!iQ&Zf%peD0(5K zp3ufxU27gKc1|C|w0OubiD#V=94=a*2zr!)F0u_Mt4(Fq_j@G=n99Ya!?5(X7t588 zDX2`AZrXcePsf|`0f*x>>cV?6?0^{Vutkn3OTfuDNAUKMKA>I`%W;3IYbL&%q&Q>N zmJ5z_mF7%0!sQE6V6*mgS~GtErPr(RgaJ+jep=?Jvh7RWUl1mj}uG5N-U z2e9_j4mr!VJ6g&?oU`kwlvr{KR$Tfd4bzs%aGebQj2B5asv0Ge0}XPq$h|*Pwy&uz zbK}t9QgA)%jSovlV7t%aIsP>dX@wL1*)>z93ntuQ@k4mlJrn0g33hDsDAxNqO6;;y z_}l7Zu-aaR{7<8(?{g>Km#~{2=I){yg16Xfeg&0ft%b(AcAWVfIquFNJ~K}EV@zIB z(8eYR@8ix^7oxG%z5RlXHkle)hKjSS8`sYDfZN@|VBW`JSaiBGdrzGSvqtyeSMTRg zL!d3*YdIl$x1uM0X+H&|Xo73S7CHT52#>uq6+Lfh@$^;Qj&*7&;3;QaaOe0!de@}J zKkwd$wlm$hBD(G!ovUpTFaTTdgB+YJHuq)HqOFYq|6-*R+#hb^yamG3oXqaNiH^p9MNt|#B z1}Ql3YZw;??4r@NQ|Zsl*6?y|p?C-D!ksrCq<#nDxSO>hf6Hj5-<<^)x@R>F39qHi zW2RtkMKg3iXTmBoZ3QFm2}ONy$I8v^QLkDRb1OUXn0AwiDns$wPJQ$Z9ET6}kex0p zf&G)R`0x!M_Vvp^r-e73|>K6-EMOn1L5 zmI|{j)9J)kti3T8E0h1yFVo8sCTik_b6gIUFTKIsbvR`cnr(nbF&cy8#2OV_M{ zseSx|C1A>5CroS7MR=T zIi99&41o<_uE;Bsd*H)4!keSfA3t854FOks^YW-4)b^Sqr#@OjxaPfJo*tpL_BY`9 zq%M+OWGsH^a{$Ubyl`M<7;I@0{Lm(# z{K9h>TBxSrCgsUgYbG3fJtpz(FSDhmD=HMTR`!#7`5EB#!!ew=#SgDOvSq&pd%2_{ z7uJ;pV0ia|m~Hi)hQUgie`a!~dmCKi)fdP6JLB~dZ7R#V2=Mrd>z4t2Vbj1s4Ba@J z%C%>atD6P?ne>d_UmJ*#8P=@QA((a2>g3@YGkE$!1t0tSLKfa9EZ)(D(@)sr2feL| zr88`)==3pYr|r$(p7_&44>g?K)E4eU>Y?7lo}7Ds58ZY?0;B5^(7Jyv9_Tlg^X|Wr zFSWAgawP-SiTZKOIPV8c=gB;*KuSzi6Oa0$=|( z2J#J-NDC`ADtg>?M6WZI;JbMX%-wIy%RJY?g0Q!+Soo$s&xnH6gIBZHoT?zPPft@amab@X_-H zWS5i4v%MWz$7&3w=x&1IyvgVgAI>I)vG}^nQ8-qyi(%o84u`3>=gWA(T0^8h4*4{cXaL;K}-AC(BNlU_;k`#F7Oq;lu_%+`L_hKz86!; zqA(1b{{Y5$j)fgcPB`O+GLM)^Zl-+_=GPW_vg$EJ6IJnIx$~Vi!?9_C;Kc_FBc(2J*b}I9l!LOwL``1v$!)f&E zi65rr50D0_K8AD2cLh890Jyt;CcS07`DVATps5tV-6kQ7s4fva+w;_Fb)(eLNe4EF z-eQcdD~?T0;q4CXrGNh8<(O^m+%B;_{#+Kv)vfe+tY14Gmz7WR4+N9q=^7fJa*<{? z7jVqYQ_|fZ4KU@D;B%=>ra=l7oVm$F@=$GsyA~aiV@@q1v^sN460YreB<6hGpF_od zCx#_rhWx;kpNFVnX74Ffuwo9aSQ18!?=HcK(}&?#vp-z#Yk*^wrVFoQo;=P=bY5rA zA^X0UB^Pm?6_m$wWLOVuRR0D4R_vqMk>jz)cLQvr*#{cx?9uYaM^KyUhiB}X#OL2G zxyPE2?ovCB&ym4#lrw(Qya9cFc9bG$I*bwf|HjkN)VZ@2e*N^Ds^{JX^_W_!_o|Y9 zeyoMbiD%$@v<;m~I0oN>?XlC#-Lz|0IzFB=j4w~U10h8*kUww;Yk8lhNin%Rch*GC z+dLQI4*Rg{3KvYe<3uNH-S|%AZSak5hV-ZUm=zd|wxT0>zvFm({7a3sBBs+%F;kn{ z!ynVT_oSqw#^`+f59N#!E^d1TtLnAFS(zK)*JCqIIX)V6!p%6N$QMtAy@$U;Rd~*m zFVfih1Ja{pQz&dy=X-lX_<7f{_)a+iI?pvnW$ul=zuc0)g&5(z>rNQ^^b%xXf4+Li z8ux85tvTs%l*&GIr;QOu$oct5?pSvTM)u6Yu3Kwme8mwYtKZS-%^mRgK~?VIy^BsK9EYW09Vz>u z8oS)T3hmY$p_eZA!0g%}!D>AR>+afeRl#@aB-Ym{7Tv(cEf&sh>M5&tGvhS&XZZU< zS}mQIil=r*t0NnLKDy90y)Q6F<0qM{-2fL`D(L9KWAr<@6_%yQu*RK4<&lg_}KS$33bntN1FyU^#PcNpfqfYwC!vFh?_LjA0qc1A3 zVo)NlKdR3oV)Ag+*!$$vJ_SCVD+cFeH*B-2t9<*j@G2UF^TV0h*jHqEE-1&sp9^_- z``a{m_OD5-6ySyW1+mhZi{tTtb`;x8^uj;h+HlA~{dF(gR099>kK=Cb)y(@;3 z>gYlkJk^G+F8AXlfupIPyE1ft^HJeFY7@N{+>f4N1k1|JuBPkKUyXI(MJAse5+ z4a21apOVkV1F-dxF)pmOWvg{x<+=I7>FnYS9l8&u=1Sp58fDB2vjTWicw2TV_^p^@ zQY@=yEGK{MUfBI%doKAA#REHqwnI4E?RJ_8ZCtqefwc=P114N}&lldEnzM zoB?Z7aJcAHj@iDk#woftJ_w#8#h2^ji=!*x>ir(5x-g0RK8!@1l16$hW7y=lA8fIn zCsk(avqRKTIKF?i^yo?kH`oj(&mR%8snrG11=Ghh%aSnnu^Rs!8;(l52Fli}E>WjB z%Rp(sR|>Xq#MJJ7>}zyOQdeZ)5|Q;Ub&bJKtibv$9=yC+75-ao&Lp{UOVN2L`%XJf zxfp|80)y!E>Sfz{tC*z(64aQ{{;r`PXS^hgo?S^*+DE_fvH62|qKi|9qqS6X@`}24y;Yth}A0Lb_d)eW~6Ha(5qZSG~rP0-{ z&WcAOuVa4ITt46@cxRuz_~b4ve&;tE0-aC7x7L-kDf%|t6@7>qMSgHst2N)=GYW&o z6tLr-KXAV0E3CL>k5@vLgZahYyy55o)O@s7@pg3x*xeRq@G~9E9`pmE_IY5zv<5gm zu0N;tos1#x+Ts~+Yiuoha{iAvez|`tJIz+2M<0aSerPD5t0&e6<|3F+;89NmD|h8? z$yPTRC$F_eci~Nvg4)B~g1_+8JD0cZHK4*YRgAtcS23*nX4rOr1~j*SBlYRjN4Uw< zc!OQ8oa1PQ`_s*7<0x;pJ?4i=H?}hl7C`f=2qoUE)0yk`562p-be?=Glzl5QdBiYn z_Pdiu1t}z5Szsu6B+Y_|%5PBiBZSX?nIiTWyQNeN=b4tC{8UQ8?z$pRze$0^|9+s` zy%xdKUXxj~v)Ccp&zJj@cg4h+CEz~pE_9o(Nkqb z3=(|W`l%S^n$J6ysbQF7Pps?^hkZUhPz2>8KM&jv-W72yhc^IZK7gw5Se!203|rbc zbG3Ij3~GwU?Ot)rgWGWjQ0Mz*x?DRFfUdtU`_6j8SJB z!7J>KIpmjhn7UW%vEedx+BGu;zKZ*N|KE4OLd=S9SO@S%b-`I1?1@Fw!#F|zA;k^s z$%Q?`*`R|t7g&Vh<{n9C@28FrUV7o8ByT34V6Iel<3%&ZM%{CVzSb^7tf3cFORfO5uS zke>xh8yxbmoBu{I9od%}=NjYc!;SQ-C0m-6v6%did-BfSDfsGOGwoMN=Q*ct(C10r zK=aUV`FO-R>K>_$f3}R|)HLBU`P&*7mLG=MNj+J!_69sKGr`cv4p0ze!P#3K_(QZR zn{BhgQy(>W?SWs?poVlVyPU%QAO4A5z7wD6dY*15+G02L5Xg?24xfG);6nTV$Z_i@ zC_E!k)SWYAXIoE!V9d>dmiT^XC;Cz@x;>|?Id5KnJTPG!IBCz8FHDL@HH)77#cT-Q z-fDqQHIBn`{9z1uqgs0{%g_Zq=P@K|3>M5y-)8vhO zA)*_O=r(r_CTX+@dhpJanv9dIQCy4p!P#+!mR|Mp~%?XW9 z*V5b_GVCm}$Hlghcy?(PnZNl*J4Wa63DGYM3cpF|XIDVkw=Uc(c09)oQ=^@unrYF` z7>?PPjk~%TiG7tlr?tBTn=H+ENPxJ1n}l(XP$P1<;fdBC=1b?+XkxS4B)(R)5w09p zLYJc=GZEgN-Tr2Ci_dtx5|fS6Emu_Sg;*`lk#lLYL`MIE?6a+jM*HVs|0iSk&9Ywj z#;8a-H_!$5dfCJ3o-?4hXT22vHkBKH$79>QU3s}QPfF?gnmmV1#1A8E_~ijBRO|CV zIyv?g{HyQJ7j9&ru}UP2oc0+mw{3&I;?BAl0 z4~|2(p=qp=-;TNn569z-cpNv{fWv~<(fo~WtW+0(r}lgYpM}FHF=(;;AwyXncPENJ z4Y9|i4OitogN0KoB!~Sy#|RdDdn(*Bl;5VR@^0-=*mqTAW_5mmQ_u)9N%Y}K6Q}Z! zQrd%Nh^bH-n`^DVSE-q3^Hju+D2CG_*S^@A>7;+cyry?x+25*x*T!S2O^( znVoaXoOlmziW$UhGFB`!sG(5NgLBeumO|^s;-S+Man@FSc3)S(_mWiDRUsIDZwIhq ze=Tj*T_Ek9?uh0OQ>EbFS0L|W7jAvX8%;!Cv8Cex>^Rg4T~g*zbbWWI8hDAGrdshw zK29FHM&fUA7jg^=WKVHU)xU8@{iiwjhJ`mEcsQ!IE0*tK1P)jqMFzvo(f!{BH_Z?m z+*2mg$89@h7yooTn|+HqwpmE7hr8hg*Aw8jzQ1@U(dT(D=E>EgCW8AuJK8W&Jm1?} zW7|3t-0~-aD?eqx8IM3F%;lE2f6&I>9@94^vhP$2!TnEx*V9Mi^f!^vs-O+W+Vw}3 zTx;kc911@a_n=+VLGT&=jO;fqqXREi(}s0kJpI`QvMC#heeU|9!E{yD(=p?QuFojn zb0lB)wBxmdV&T@KS2XRI==pY#rPPlBXuIqg^>Xmwvu}rE(D_!}y(A0V>__p3rab&N zW3?0>RRh75?Q!}k9o!UP1DQ{aaZpe?&RSN8P0vc{UvFh>JzNbJ99vF@-Okd&Eu(ns zgd)0mT#tJ{Q3wZd0G~=2h^jk=b8TLC`R^4+bR8?PGP?6f^-i>Xy$`*+l#gHHBKh3S z2XgF?=TeFkggcVk3)g@dJG}Y{x*>hITbUtteJ~tmb~lwz+n2zy2S32w{26tax`WPZ z59P2mg5~Uw?w-HE_Dmm`Hs`IR^v9h~3BP+@i$5MSDd4l`?ttgGwrCloiC;gDWrwW(P(MS9 z8){}!l${przmudmTYQ}!hnn#Ek*Qp9dlNLT7S7kBLs=fD!C}wNkmdyq+}q-UMXuWv za_=$HFN0H%r8)#PcbtPEwq5YXslD*CrbQa_Q;QBZu9Z&f4`Tb^-|(^aoa`=^bm~+M zCz^y~FSr9JYaErH`@fOCCS~BEtIBu^l`+rsCf(_FUpm-9mk*4bLL>I)(UWe8{JyvW z_I+~0r~8w5wwpeWG)$tQ<)i4cc?z$xSSGihZ^SE2y5mjFRdCYyAr!+Q>XbUWCg`a) z|8&vCTg4aXnnqh1(0v6Q6Faq%#WRl07qe;`M+biU&4)u@L?U&x<~ilU_0l9^FTXC3 zHPr_<8&AY`FO`LpDv6yByVI>Hg5S~iA)UT9UB39u9M8tgfct`7TYYB$7j2J~Zbyvc zDx<+TfB#BZNpuRrA8mwJeeb}n%75_6Vb(9MXm1YY$JdvWYv&@_ z!Q55MFs9@??#AK5ZrVGk88$=+u3fASZ{N8>@)JDLM@Iz{vciC8ZPvuvMekv-U_NS9m(v1+ z$7DAP-5Oo{a;i9|t+IQeX`XO>2INX#wx?pzs!DP<`6&IaIU{x%$-HA?nGvJqmI@1PemM9*%3gkLw@gMtWsd9LdQu+?yZ!*ezGs_k&eudOR?tKigUW{WDHHbKL~AfErq4Id0BrdDbx@~#2lyxh=05hZwF9X}YNJfRh@ z`)r7Mt}9?qK@ntRr{P7{>vYS{fVa6$sPXhpLa0(_wccZ~RPa2btk%#b`)t1L*^i%R zT63^vC(Ox8=XzT!v{P9N9nJ`+US%jvStX%klNrA^2x&wccka4id5L zBU2n>--(yzm(jjBEuQJ{jV`+sL++?RsE_u?i>0bGwzL)6jvLI?1w;6E;Rvw1vkDaM zrl@EA2y#M~%BKaB`)z6`_FL8-e+7D?=YRxBWmqTt{BAa^Tx*XDR>g9gIngvuHe}aT ztI3x=d2p@XoAy`GWq)7 zV4gK#4jjq8AUWN7CapP_gqO58!nTrh{Ppse!moA)Ov?YsBR- zeR%cHPW;&ACQK2okXJnlXyMOjUawyv4emaazG<|`H~!tMxmb3C_K2CXfu0H`oj2xn zN^1OdcPmKVQV)+GF{~+flbXC$`SwI-zGL$aW`5cQmJhqI?v|Tyz)%O@i&>_Ir3NZy zsAEmeXt+ChH4Ix6fPa((&-~U{zHlfFI&8{@r*HK*eBfKDV44L_aBj<~qxX}~q-oNz z*z53powAs-nRA|(uVUi9^;E491O3KrL%^83&oCoK=bDbsdQa)Ajid3p@g69l^{dK<)i%m+Q2&19|kC>8Co;hh)9 zvdcFkY)T)*?z>J)mLuHJ$zdOT42tHyPjpx@S{r&LdZN9LF$HcTa(%oHeA3f!Tb?%O zKbkN9d7Z%fm9%;2*(!P;)ERGf_re{56<1k6igWF>Fs$w>{N%&(A;qB@C}^Ob)_17q zhV3+<+8iw(>+^th7OX#f9&KLY&HWE8r|rS>%FV z&B0yAYpH2eHeN|?4~Z2M@$bY$&YSX)_C$=qx1+v6yvWhaTw#pEKUPZS zaqa2q{;Q9U?mlFzfXl2lP`Pp+d?j!C}8y9VbE1OqBpR&soU^1OL zTkWiIx*9-L%P&Fhu4{7N)Lb(DbB5#vzsT~>WBPG6A5475VaTruIH*LIq!4S)@r>fG zT>3$kG?QD3Krh?!kXVcIs$xZ#=oJbxrlO zyYkGXy6pB!)P^iWIel3t+agxeXxw@nJ>VbnKWm0L?>sqsOct z(urqjthUXFBL7=hvwThfP3yaro}Ej?%=Z#hor>VI(?ZcXG7pbT?TN$smBaDRPFVNG z4&JIilz)#kK<#}#{7WYXYZmI_rW;%6_-sLkLj+U*|oDsmsbaVNVWozCneve`%9e}&-+v4`<1r#;G z8K*bjlg0^-&a)xsDYfS-nrv4i8gzl^YUd2=mKpQK7dTjTfErKk}r= z{&;c5TDo#nbZ~Eb^D>7%_+rjd>aZ(|m)dsWZqDh@?eAdtbXIiSubQIO$pdubw(z)Q z=Ay-nIdFAZ4!if(;QEd6xb?0UoAy%v zt}(3ndL2z)ogs2=iNZ-V22YH*3)fB=^5QgCHrFucH$O&V!&?O!k6a1H#!R!cPSEZ{ z9;9?Q4BNL1WZ&M8{-QvanbsQsY8nTayOcRBZ zv#73GNj#hXhLD3#Ebl!ba_A?`;Od7qs5Z2k#%}olkI|m(s*8B&^4ZWd?H<%?^uas- z?m(iZ$YJysY}ullV477Sy;$tQ8;-Vsl3pyoe5uWvIlXYPWdshi?I&sGI`htkcDRss zk%3V!{+qmrJYKZLS`8cStoKabZV`mntUkl*Ck?c_brw%OuLl}t-uxx}v6u^Wg1v+4 z>DG2t{4j4EE06T#9+k7<%u;P>!v0R|(WJ?%)eUh?QUf%t(WDW_40&R_E0hgtBGnJs z+`inBKMs^(<%JJrQg#mahE0#Yh-;}lTyRy;j!RTr*2*z*J!Jqdk=)UI{ zT76Uq^hoi*bm*I^&M8lB z!7Ga;;B|E+930Szd*9N)E~D)6w#cEFok-vp56kFd>^R=yW(E1L3;1+#6skF|uZul) zoAi$P;B@z~Wct>Eo!m9xt)>r0AB(E%U%mpur?jP(%sn(Yf0oGD5k0OPjhpQ%!Toh0 zZfvmPJz8R~x7mfaX*vpr$SQea-7Cp9@d!LDSU}114e)y4B>rm|iEl3lbKjW?p0YtO z<-DTs+4h0FBQApzM@LiJ9Yg~fu$AsIe1ceA%w#Pd9rND{*|FTc}q`xY`au)ecqdO#ogl4^%-?Z znqF-5Zzb3!-=fEDyz27%pRcQZ?$7VS=0WJQY2a=8MgBRw6aKzbfIUO3`Tl+z{5fSb zZtqzG7f%S!MEA+G%Q*-=A{Rs5j*60h-p7wor*%7Dv)*;V{8}t0{BF(w~mh&nl9WCe%n5Q{R!th@WJ=%7JOF39osNP&sZSY8c1k>uLplEY8N{ zD+5V1Y`OK7RU)^W!H-+Nks=$)>A^5R=yKdtvDW4{C0L~J*Tp6{sctze+hOI{9?PQ@!a#$$V4Mf&fH2GcCPd#wn4Ia ze}R^FZv$ceDd=<|9SYV~(BU$3!9s4L@-@Jb-#$}Z?jdm zv2&`LPn4yC9y70zzR2%C+;oeML|mo+s>|tYSs3f}vzI&u{0Hk=dGpO@3Ak#x52s%+ ziB3Z(Cg55`NRPMU(K4pYToph~nkzT1a_m2wy$64+bwZ<%To-6cVp9AA@ z=Sg>{NK2$WRh{|Rk3sbI@f2vD^-S(x+KHE+TL_CqPwhij!Dg7aQqFNap!i(2hjzS+ z!zYDe#`DdZKa8FXS8p1lfx$q2lQxE)MCIY8JGp4zyO2#zTJgb=62G|Q%{52vK~$VB z>X~TDS1JYXGxQmq?eq!GkB#Ab8pb@p#Tx@oD5zDRR=hk%aOp2NVYX5rU+Lq=^@l}f zcYKQIK`jtnP#FFKNpA&`pnDj(wvxacynPJs+bnS{|1SE!QNBQ zUs^`>Yds*^I*!|DkEFYrGu%PN7s@ zz|$NAzpz{#?N4f&c<>#Xa^&Blw$y!GCDP%4T}hwPl{d@R9|(p2*~M(Z^S|3xnA8f|>p2 zw5#!U1JPx*$6X85aAm53w;#GlS)B{fd#5h1{5g%hO>e<%TSM$P&JY&6u7Y-BPlMOa zH}aL?LvhynG2FXUpIsieMeB`&P(Pt7pTFjh@o$V|o!v2*&^dt9T1;`tDN|^3U?Rr| z&Ov2tUpla;8`)akfD>j`+=lkTi^-k2d|!y%D#=^qe8)p%@+;78HGs3LmdGPQhhjj# z)o}hyA^tZkijPLFr0WG;IN2R$E1%O+s+13DJ=l zUF)(WApPQm8vAv**1w>h3HicIBv%l8r86Bo^VwqYtyB%*8$3Q(s!dwJ~(3u%(#?? zPt*p%{U>Lk)B3<8jp5pOr+WsPUw;L+CXYq)ZL{clQGYt4HyjQQQpV@xBl>QU7~6IN z_x0B1vM(oSsGlhwo8=9kJa53Z;{DKEEBK@&ctvf;kkX$#o<1s)Pqi9=1tpI__dn64 zdDRBn*dBwc`!(silM9>0=b*OQBu<%}imoop==+)+uD)JJiGtZ2oLeuC|1Nd|$2ZZd z=Kt@p9oY7#5~j2fT|PMo7b$eH{ib9bXx|5ykBrIw+z{$FouEC~#|4 zTDK&6*P%FXB^GZM{<~%)e4cs{25&3kFOnf&{?L!^^#~R_!$~l!){M29)i^3CLSBC3 zD~+(v!nGGhVUO@I@DZ-gjKrBxy>bK=8(xOL!y2G>#9i3jb0of8V$8n3g87o$I5<|D zf)z97le(!p(@8sAI7H&1pZxftYA5`7Xc88tttJ2d=IrzI5bfwXkRMLp2S%el(BXD> z;7`qQ`cl>fm3K_w1?Q^iVM&F&zH_pydE+oW{XPz5e`6|crvaZ$Z^-q+JDvTc2E3fy zc;J>sI#!^|Pg6SZuRD`u@1Orbd%e^);S2LdjxXYy#4fSk2#O%+mG#3)NqyM z8<-xxoxhdsdWxt~>UO?@qHiZ-kM~b16Go zpDXvjgH3_0Fm6jXw7uOM93PD1hbMcZ)7M%szZA(+i@Z>K_BQCOF;lh}(TfUyr(($0 zJhW|flzN=5rKJ92;8?{2F#a9H&-%d;{38 z=W+RKLjv3pJ&Ce~Cj3zF9QBXK@io^BG;mI3>jpKNJJ60x?R36 zyoZdIRlg^Zjef$z!j00N!U<@oVuRKW8C-boFnqk;O)grM!V7?H*(K>D6`OITh`*|pAOWGx75VI5uatl`KG)haW!bRIAP679sKOA z$~mcS(7Q4lXO6U$-eo1irX#EA^X7%J@_&)|)l-M{-wQ9uIX%$(?#5N^50bm1gEVi! zG4j>8EJtZ&@+arrv{|J;2mEJ&o(YpEPcSdkuj}Bn%A>Hs{|>EgZjI5`0@1D2P3g+E zowRPnNH+1@AiUoTNx%3BOm5W$cO^ei8=du*OyXV~>HhN>DKLwf7pSTw8)Yi#}tnTr#7 z&5szaTvI@iug9Wesw*maoP~sd`syynW4`_vYpB^UyN#Z?hXVt^6Q& z_3`3`!%cbr%F|NZ=O)-FdV7nG?xA~O7wLD`7j%1F82#Ru$D7|AB$NBAq;F{vJl4%Y zy6b1dX0%(HUpD}ktt%ncPJ^mG-Z=GbJ*_{jk4^n%fbOR;Sp4Y=x|6nZX&wU_nooaqmUnj2clZ)3Q--rMmqWc zqb3}IkWq%&EcR+uB$$Tl7H!p25^}WCbtwHwWpx*2_mThiOPH|hF%N1v zzEW=O6$}nDGx*lg$@ut5D6fbG!fyF|Iq8bDZ{TuBU6z5V2d7Gf8mw@8{Q_2A+D5l^ z@^R&j3Onl1b|Npv9l-Z0?&!T%(qoUkL4I(Xv7f>2DD*b9U80|{+TL$|IIP}%U6 znCsr4fQ@Siy1DTzCmRk(Z_Ur#0`TQ5h3Jp{Ase?9a+F&dm+VTBp4Z0kB%O))cVjV0 z3+-^l#ia^4M3ukonM;wIjUf0$7}4I2^sm_y=eO9Pug6gI)tgM~O^(qZaSr{cmWjjG zw8Il;g#%!A0g@F5dEA?YwAafFrr#e=PCYz0v0@$=d-O%0+17PK*VVzI zBVBOc>}|9vcD^*S^8~j19L%3i?t-tk_Q1<6MK~^DFn@3r^DM7SHm&ai4HGpvcCa=b z)c0cN<@%TzG5Uys_&aOoKz#dnK7C1PE#7$wzEKj2wv@}|ShgV}5N1u`8_Rk!?}2OhoC0|Tap;QWOnMYk-0$DVpFHS2i7jP}+X zcW)2fZxKD%1$z`O5yI=O+8zoWEpcI9n&<^PKxzIjxzH;F+e|KzqZ`w)Wp^^)Nj7E0 z>%S0|cb`m8w1L_@M|fjn%}re_sd)LABS(83k$!3I1iavk)}La@WkM0Y>s$<#YsTT7 zH{IpzK_6*|;M=V)_ae8at$D^NJKn7N3wnN2;4Y5g4}$kuaNHL2t{$a%8*?x*?h^5( z09^9?93@TsOAeZ?Isa}eeqia%eNVT-T@S{R!de5SX&W%N>Bgq-zJvYVB7RYwNj_nc zeC6~8IOU#;=NH7$!kH(@+pz%EU-rTCpMCkfhXE%3iQ_F$K^LB6^2G41q~6Dyl>ME^ zw|yO@+QebmMKv~g?S)Bmu7H(R9_rlq4F^vHHuptIGYki9nR>tA&p=W8kr3qSfN#q_2!sX!ld|Z-| zgqt2~^U1Bn5O7Sv&npVq@35hS4+L?V6&cC$(Dystms6`hKs^;)i7q5HgbqVt%7yTdlY+zrEc#a0Kbzwr__uTe1WaKKroPD@+Vzev3& z+riH`LmKkZlB%=zkj9<{s=67>&lh-bm8&OLt{%!3ud{H*#R&eq+K&$fTqdpGM`_=o z99q>zxQ6z(M>E$4q-`94_jYvWY2_y=OE-nh?HCpW5636YailzI0Qwzo3xC12?>tZp7~{Dse}PmlSyMy{y_r_-^Dcvh#9ZPF{Qq4jwMVfbFYc zf%{D1AnCnTYJc5l@_N(0agg^wfwUgHvij3*T)KnqNgnni)^4R)j>S`brBDF7=~Uq zcFN1zw8Pb(lR4z?Zzw#g%^jlp@SgHaQ1kjGx9RDNBM-#GV3T4PtaXt_=gp+lF{8QX zxES8`TZUV$n#A1Ihf5tSc=O-_K4YsXJgypSG(8LtKhI#p<=ZL#zg`@D|CT(%z8YpJ zHpp#~C-TsP{W&?igUG=6Gj0x|Y??1^cWt8WVh0!hr8n-pHkMs_q(Fy4AGAw3Ejex1 ze|u9 zmxzY{DYItDbt-t0#G5`uV755RAAa>0NE>mSttH+z*-yFVr{L*@P)HFRmN$kwFb$qqd&jmam&g$K{f#U%&# z!e%>yy#-fk+%IjeuZ~8&+9K>VrweaOOMw2zGU{$tRzl^bF?{n-1nhs|jn5qYr9F@S z(u4_PF}tKp>fYNAL)}z3wptaoJSsxtqGlR?Z3QTH=W>JBvBSaTnfUm(DVE+kOJ2L4 zkV*I%GW`+3=LJh}M%Yc***uaLRhCnB?@%1@FGfyV_jRqUD1G}>tdk5w_3%8sVMjs!2XbNmbY z!yPzo*nN4172?MnEsj2X3@mhp@eaSupnPQ{G(YLCD6e`1XAV85R6W7wU42;6?mY=> zdv)USK3^cPDL~Rbg><5eCZ0RIgvQhx^KxB#R?dvWLB*Q<_l$Th(h9j~`V6`!<`z10 zd%>?jkt6-G4`#eo1_KX$dMk33)L)es`ex(vjBJcv|A-bg2@jQaALt&T$5R*DK<39N z`M`rHk!{wO*m@v^?sdn86XV(Ct}2e4;>pT_Yx?8qAo;-Eh2&hdh2ppAx|Uhla!rX2 zdqkAMf&2N5yMsaz7;Eoq|$IY{R!T5X|oKWyxRy(zo zDqm&cN}J`N;iAJ2Zk|(|3wPyy)=4-jGn)F{?Zc{9t5*F&52uERzT4hb>#6fC~e*?_w3vsI)}L7%DEHxP*@I{l@gg7IU4Z7-YRpm`2hls%MRoV!CB{e!sr(KIrBG#(vCKBkE^r8KMK z5L78PfhF7Xc=W(c6nS_uk4;wL7xSICs>3JJ>o5hc`g+jS!|&_H&iA9}lBYEK^=Mo= zrGd6P7eJwb$lGffv-#k?aICm9d<#gDTCQqhN9Q21ZwP@wFSXHBZ9O#PU8Ik#?AR#Y zihF3Yq&KUcmjBk{uEHCt8zJ%qdDG!$lnY8XPK7P=50UtZ_dGnEt10(vbz3>Lo^OG8(-LYI0c9QekMt=D~qN>^K~P3(74GuDOE&?vOYfP{zl=qe-x(lsfN5pJO0<@ z7i2r>@a-MF_@wViC@N9oZyMiW){r*17;n(1+=;OG>0{_`Y7Hr#lhEe3I~&)B(saW@ z{8FgEKTYGgeB?an_InGB^vl5dgWgyyxU={Ei{#$nIgmK8Eqv9Vz{+u_rHjeJ8Tu#= zt2HdBHb#k)#Lj%lVpGw3biu2gx6m`SS1>H&tTe*)Dy*6;Va9VSoW0UsG2A_Zf6Sag z(KpXQNE$auL#7<41c^In8eS+J5i62o#ghVko8}9;>uG2m{9f-k|HNl?8(t9Mz8!9yiIm!JPvQPU2Vu&FO7PqG9s<9pV3(Wg>bj|h@rhCHs9QIg9cK3u zIn*|ob+#+D&-BKF2?gldw=Y+{wV=WEUGdt|30T{_gi6mT=;2G@-(Mi`wTaiIM zsxZaSHR)22?q&$IpC;}a!qpxcPx?6y9OD6;-M9)ayJ+y^`Q79>4+U$>sV_E(`Sa0D zVQ4pDJcpk)1VRUq}X-9}1j zU2*4<-{9^yOa7|63i@nx;rGgdk9M|!CVuV;#xbg>+o>LWulA(x!6vX@^?2Qxm;G`0 z%jt5bTk5EKawJx`eNx;iUkC3!r*Qb@1@I!HkTZ=oE4KFT0?J*pF;KXyc+Or@JGmKZ zU9&j0DwYcmTXXvp_I2je0oRTiM|VH?+>_)qgL8SF5`X??dtU;8i6$1|G~p6^vW z68w-8r5gNi#VjhTsDVq{ym|ZA9^CPPGM`%*#6@4s$g$g&BRQL`Q1e9zy}D_F*OnP_ z!0y)kLN64fGQEKRDYL)VE^_It&GDi`+jFuDPiX7GK||A7|9UQtkDJ8HtG#5WZsGjM zY&M*I;lNt++M)TAEtFN9#NYLf$VwF*@%oTzip&?>;{hyn{}#h%rdspmoo6K~B3L0j z*#Bqg=jp7MU9QXo+wT&0D_H;?0^Irjf19aEw}S2!*>IhA9B%HI!8cc`VE4m->T5dl z4i_ynElTB17P4eNB3H20T=>(CUsB)aqGO$)K$E;Y+BMw}x{oR+_xJA7%2q{~|6eSA zuYD)nH0NmTXMb4N;zxz`A6z3Yn_=;f8S<=z^19CAt{Sj!9c`)I3oFZCli91CP@{7X zUfSp2nqBTvZ?Q!s*`ulcxUi}}uDvp&cONY|c z``RKK6^6g_R?C~jS;MU80hszUNK4{{4|4EGJZELf=Be=3A=9PPJ)`rQ%z@^u7azY=1%-KYFu{TRyz#F$x{)Re9g5k$iFZ z3fHgpv0QTexV(FAH@xyb0UI~u@k@uP6rt6cgST9R9Xm7l{sbR3sH=lH>of7&)d3iI z;T;*au|j!-0)IBIr_op2@%l9rfNP7mViVw(nP$A>M>bm=xl0QtzLJU#u22lzy#YQy z%%#2gtx%r4i@ItLgB9ZJ`@z8&hh0BS8y_2S*ZI9UDXCamx-}CgJ9XxukL|F!`2sv! z7RTt^Qg`-PKio4o9TgKYdFJ+fTAypcooly~^^Cnz;^lELE~^7>x`e#zl>7-Rk8h# z{nFeCsU$6S;iu_cuxecaHTnec>7Ck~=IM;;XFK4_o?2XWZ3+%pd6W7Ugi-Xlv2-gp z9d`^ZgG#*~JWuRaa=N`Dhi&R|mG7%M^P{~KmlL!(V@e}Tdg+Mw8>6MsmVv0bPRvsF z&xDZa|LFKxLvG&l51!u6Jv(yLiF9d#V=*MZ^y53~ML0%q4o{vgz;IuMbhn`hO&_`O zn$^A8bl(}86SbR?yB1U8_BfO+LUHkm5|}qSAI~Y8X;yQV;CB#A-JyX73;Tkvl?z;* zDrQ{fVpiZ+M>Rh?$pfMvjvZE;{i;nH%$b9VOosTu)SvSiR&*H@lxYRY%y%bjf=E#{Te6kyEqkh zCNQS$p&!zE__5LxsO8+R@529nh+-C+O&$hNrJ5 z!@pS}sPZYZwQGQ|%I+@&7}^l->dai2Oj zkgIk(!EN(Xu7S%{#Cs}|?e|4+MH!H}=S;fLtibttqC2mV!t0vr;m{-TeCnj&jCEI_ z`yOSgw)cTU+fLAq(~qG~Z5B@1w39eW74ELNN0*lQ^XyM85Pv#@Z`+%Z3uHsZ?k`fQ zc!$>g8jVWfyC~=H4e80p)k~CkO0b zE4<;&%P2=r;zcoeoRvF)&v%#tA@fI|dRGf>=MzNc-Z5n8I~ z&koAq{cp2*RfOQ71&iEAz%Y1mz9U~Sm<2&AzES#7L;M~bjT_q6$PU9oxFIx!pS>T% zCOWA&Fux7DOMS8E^#IU#JzX(sv>_j^7?0M&4^gj&epp+wL9utbGB$Y&P-nwC@(=T? z+t@XV)kh^@!pBJY_GuO2&gzFI9URD+#9XCmEhL+`bHa~SeA;mc|9QP0G=vxBjZ!Fd zSvQC?CcKcl4()|2w%TF)NlAFPr$p`drt?Ps{qPcg!n2m%;=b6Cho$In+*&srl46Rg zMTU51=q}J$KZvVm^`bGsyU2NVFd7c;OJQ-oxNWQ(s!kb+cg_lb- z^BvIi3+vtuUr%S}yrfNHw!ZFxIS#0~0WZh?JaS{c8xH@v2p-QHgB?4pg&DsTT)3Iw zVSFh6``ceuvrxxcE~E3kRdL&Y-ZXZcJ6j3{MdyF&=-`scM|x=Do;ZKHRjfi&<_y8A z#T{_!zyUn8og+KH@x{fFT@<=8V_EHQAD;Dd78$(phGElu`00JY9ojGWU_L6?lMm5f zeLJ{4B8o5ShQfpSYFy^AL(V$@Dscf-1)R0alF>7vqI0ZH>JMXOr6Yq z@oeQ7?s{5@O9P(Q{Sn-O6{UONrkO3izo>=17DOpVcnrW`hhXeJZv^TG?xX;N<5Ixf z<5220gSIZYFJvu6i-;sg34a-sP~QQm_@wm($<5{*V$l zho+U-!HDRE6xm;y*0)Yz?O&#pG}@FFeO(DQJIjwaKh8o5GQ$?uMxP00JbOkG9tqo4 zx97odHhoa8u=5*^$KQ(Cz|U=QRehh-W(Rx8lV07BDjI0aj2gy4Ez;^ZcCD} zi+U#81$~k3n?>^Zqb_7~*%#HO#d5dTYMe7;Fs6<<2wEym_`~r896T3<-)@e;%I|Yw z%bY-7v1}2Pv>%CYD|f=*4Lx}6=+@l*YBm=ro66;@{qgmhIFxF7;f>4|dTFo_(vP-< zLowl8zwsk&iBo|B3c#cF=6G8@11-j?@b~M*5WHj+*-XoY11sVoEmafaug#OD9&zXS znksO}tviPVE`XKNKIokqiPijG5ix0G`GXX9B;a#qz1#pk|pH*JIVLN1L>uP z3&sWqz+2X-o30T1ppbF!XrAP$e14Sk+1hhZuMS<|vFq#OJ3 z?sXBaT`Zs_$NS)qxKfB- z_ny99n?-3u{HWvo7_N){MHl@Y(A>N)EAOat-9C37%*o0Ex1=2u|0t09Py3IoE=6#B zZU}oht&;LC-lNHS`dH*B?tL>2P)rTM+wvDx=ezNr{AL(wc9d)zl&NUeHz`J(MUK51 zhJRL1K)=@qq~&gfxV%j?XHFP_Q&w*Qhf8|+s8)PipNDY2S(TtO*BX_(eSs6IqVHw9 zjHX}PN@JPj%8FrO)koLdT9A9KI7i;4bJ`pv*c3wGb)>e!1onMqTmYMOy;- zgheXPfl8>}oQ}8t`wFkuFFmp*P8)ydm}5rBCz@WcNPZoR=&YM5MY$iRWy*C_l_b0i zf0AHe&yA$_&`YqB#P4-xE@@2t3tgrZQOCi(caq;*A8e)`{1`m z30xTv!kxr?t32$ztowE#cE0RLW@!V^9iCRE zhTC#B%kmB#j7=?-&VLm-5CeCpvhKwf9VT;-$rAWvRBk?y@RsMGP%Eog3rJ0iML#Nb2pd5_avSh?}zfMg-9PNO;f?V-2}hIy7H4YQ5+W6nxFPl;N!-f(nce( zpL49G7e8$=C+D)PJbDjB2wr~WQ+2$iA=A#=WiVuZdu+Gw3Kh)G#E&o@EVRw=aE_fU zKNaw4$JbP9)t(}*0A^n53s4!ul13=K+4YUoI^xySGIBai0%+~#IrJV);6ti;I7JtwXi>M2oy`^DeR4{n4 z4j-G;0gEQ@r8lp|H*Ne;h#A|71MDiOrr{1nwwg&X7Q;0S}CP(Q+ui(f?Y+!-Ng{oapT!oPrFgf>dwyJs zT>SmL68440QU1|b$xQtZeOPl!`eoM(J8tMi9o`RN#r?_j?(-X(c3zqD1oO(sx;x)H z2z)CjMs&OH!na?d-*m-~_w{}yJrq5?r|Q5r_878$_EKop4;K8eA?R}Q1+_9vV#7`; zd^{x@wq5y4FTeZI{l3egTbl&*+-$`*X@*e!Q!uroc2I{6Kk4fj3!K=r0-6Gc(a25v z0lxb3xIdOKZJeFp0Nsa^dY@ruawP8<*B+1A2cUgb2JZ3-=e5(a+03qiW?>|*7=4l2 z{3x#r)c2w8*S+|-Up^mLR4o10&OonnJ$U=b3`gJdY3{T@$k+Jpvp=dl6I{U^-xQsnj-oHV z8jyd{h2r*4mzo8C=+yi{yse`Ve09pj@N-`-ya8P7v6H~Cd-wf$w#&v0#VAFmP zPR0#e^-(+i7OV|&R7Av%MIQGaJPwV6wfhf9|NWXPFAcNAUF(~vqHQ|o>U6}SxKZp? z@C>$9bwqo+YI&qm8z|4e0mcuj=yVgHqSi$^c`z1dH5{N}%guP~v;d0Q-yM6*cjdiv z#GcMlfw6ARc<^Nzy|r)>t~3+btV|v5UbM!ww|C2$EtllnT7oTR4t#cHFIH(k1Ul*5 zkPqHl4kNTXabBu5*B!J+lTk^W67S66cM_>{uLWS}vsZK-2jZNV6JT=t8NAaFkq+jF<71&F77D*;D(t!h322_dKMJ+p9hK@K~VT`^xINZS8>Tua?m}6E_+fV?sU; z2UFXShcNa*lGL2lk+afI(Y2cA)NyJ$^N^vuJ4jYcDj9(_bwT*PDwh{(%!Ib?qlE** z3JrBP!IbQN7-7>HZ~1BR=GEWp5*9n*!j@zTxlzQ+#`ZzuJrg0w+k)(mN8ql7`mAnq ztnOi1AjS`PObdrN^X?OU;M*eP?+aB$=ejSJ8w<{6UOZlJ9Enk1l`;Hp6&(32?%lno zaM_Z#q;x2Rw?Fsdr^|9-u=+oez8SM-Zk@d9_ z3$KqTo}BE>c_I(f)@DCl$sEU?m*e1}##h=fK9sfQIFj3=zS5zZV$j%-iyI0b(D4Bu zrJK?F>1eeQp6c5TizRdHTGa)OS5A`~*I44CLrx+eH<+hA6TV^hEUr6c%`>}=aE<3! zZoFfRyT_-(e4AS&`*eWwDIEA26yCN)nuECxASabjX9)a_y3h8y&Pg*go z4ac@?kWZH0g8ebG6)nrhz*BEK?&x-yR`#&Nqsqg0?35ymvJc{3;&~fTPkvo>NM2dHALd=l zr14|EK)bcSq%J2#k8Y7WR_$IujU9ZXv_~V+&DfqdQ2{84v zBmP@qLz}#=(5PhtP^m7Quce-X?4RB6j+HN)JXOLe<3uOOaun=%`hyOAjOKG^EW}LN zhScVZXY1{KMfze3{&Jv(Vi)x0BYzv<+@BDzj4yCn4Gi*G06YE4z>Dsq{hC%sWrA1P#0=K?JBwdBu}3vm4` zeZ|S@9M0{%S+e$d?D|=Fn#NwaN2^B9roVT8(uA?vsNpt|e;jY5!=gXKJqQ*Y7M`k~ z+UPcUAeU9^quZq#X>h+Vp4>Q^$9WMvk60(~t?7c7hq}U@|scgtgeIm*}bIYv0wJ7RORjCo`9}i5-$}#++D&=A3ob&x;Oo|w0CGA9|yr(w=qN2 z%1As~)du$m4dEI3-Lc?{8~us2NBz^MU}Hf)UL5y;w%fdbY1Lcl_YhB!!4}V?aGOl} zAl&gA8}04momgiCgU{F;aDK$&`TpPmP-GB~&$}3kKEDU9i0T1%^WQ`El5v8o zQbQTg5uHQx@#E%PoG>B>(ZEdpcg7x73%yXe;Tlcfq>0i|RetgKe-xdEL(bn9$J-?| zXwlZBl0;F@=Nt`bmzI`FrCl0|(y$5HlucH4R-r!kkWsR-$rfKLWQ*{-zki{p=id9A z^M1cxQ?L)uQyrPFN;R7PX#d$-64~{DIqqJ^e*BeV-=(&Ra}|?4`tk2(pB5e!dmen_AZkkAqLJDi(ET~G)JFMYq z4-__bf&R2j(Crt87ft(7pVm6zd6ySCI^^LpK^}`M4Tu>ZYJC8wDQ~I|5HPy3F!^UpyG^ zfXg!t(A4`jI9_yymwtArDAfw5E8^&@@R4oETgtj)RdGb;Qf4;55--%5Q?C#p0al9@ z>jSWaZixFI2hsT>YUnmk5u4XdAe+Il6jV3@OZg7I?v6bjjfxerOCPeYI*v^B?Hi`J zuLl0_w>UaLhMgNZj5QqZ5T#%AVO@O3mZH8K=0C3%YvWRBRCGEUv17P!BQTQIB(4B; zH)*!h*N_H`RVULc$Jw}{qe*qEBAQH$#mYC85I?Al3dg&G%LPMPW95$@t$pzC!_|$Y z-sae*P{ArK)`&kB)IwuPHM7+(#y`eSVDv5@{J1cZdAAQmo2Qc@X|5LAb=wA?wFc1q zhkIe~(0fe%-83>A&?FRwy<#?Xr8q|UAnYHJjcrE^QI)B{*MWU_wr)B}eQ=@{ZGmU^ zB5<{LDgJwQkQJy)Xle)V_Y}lZ_{C%@;C$2RSG9<_--3iO<|sR9EXp_@gHc<88_T!s zhe(eyoIhj($c(85=f@3#{)sR=8Q2Tv-i$^KmwT{sfEUr(W5R)X`sAslM&Dfm#iS{Z z!S?EUu+~ZyDt4E0&szW`wGSt6jZ{?n*1=vUKNqHa(udW1UxVk*P}H;^0Bz?&#ft4K zL7`lW_8k1d9?ZGJ`VO8>$u<+9-^2hktQm>E@oLy>^gXs^TM_k%nuvii{jeyko9!4p zj%liG6cuhpGlh!l!lGa`R17-><3H$7SA-w!b;_lClUIsw7c>Z?)f4E~##Q3P*E1oh zHUZXMe*!DH2w-FFPjI*zgbL z9ky$EFv@j|q^85=>~cgkgh^i(V}APJ)58T*Awq0(38npT z=eeXF?nF8!bRXp&pIKpaXNfC__Jt(9@e&L-wPkDS9Qn-rJ>1p+@{#>3j$Ro+K3=`> z@J@FacRmivRT0DgnlY*7DArN0i4xZ~HskDe*88jn&O0T~Iu=jD@JS_DbcDNs*J$93 z2b-a5gCq0gY-Yulcx>D;mbqI*j$9`UX4U^_{iT~QPAyJ_}HFkR8pY~#^b(lgc z^)gT?cCE0v@DRKRQ^$(iig<1%XFtpmnaUe4)W4a7zrGug!o)$i`nrtRG^3j>F@6UI z@8Zy4RhaN-f*l)e&i4ajT*Th5>V#JHDz$_g8{5_ouuK491#soRh&_)Y9q+cxX=){qQQHg)?<= zci?2a!Srm+1~JCF5&H#CeOeF{gGET|M3lmi<%0{57RG_rX9~ z@FI%)-$r7O`f$9rg1h-PWU=|l7kK{aDLdT$o@WhR>HSfDR*cfcBdNA5G%pB^_&h}Q zfG_Tm^Cb6$0T_JsJ%stZf|UjGV%2X`EL}Dd-u39>_i{^|Wmifw1|ASj?|aBz1hqFV zsh1T}uI>c+eg={`PI9P`nvPeDeLzZ274PR*QmxN$(7IxaCu7z@#DPFe3f&A5@gG>; zii>b=Pbp30-}BnoG92pWh*3#Q`0PDfOjdKi>Vsd{?U_UIk>tE^ZoDT(@V&$f-84M2 ztqoY}FpSa&!{VU_pl7!wqX0cv?@+tMUH8RtiVTjlZH2RD2aJ_;{qa);=~eQd=7 z70R?5iWj40v2A_^4G&I6>+Kr!W`Qdj)XTwm2&Y*wRjg~r9_WzH##w0@@Ghh;zJEBG zbS~}}ez|`X*6e-8(%fD{LjM~eziKNx6Mdgm)oo-`uGv$X<8aOZ^(3`@B75^E2*(ER zfFDEBsnv^fOut?d^3R-QX4k?azMLOt~T5;nTWq)nc6liL9jXk+ zr@s>lGmYr=(Bub?1`m zg&sDh<)olnQ9w&Q6LIf@EcR#HR6%apR#+XOAZ}B$qM^^93M$T7q~@uL$Er)XYsiA( z_VX-CI+CX3>;U_SXQJMmNVaxxA>BUJ8_Vg7*fVq>uDV)GcR9Q5z>ibHfBF)NF3hC& z2M5q+zu{P`s43oWT?i6;SsFVA=&|;5_IgS-CG9%L5*Br`^^MICwtF9I*}{FIbCg(l zUJ~U=*^-0>2)&LLk?yR^?5u}3N-dZT_vTy@9{9#$e4QctHUfp}b$NpFyhiX@HI?>U z>yKJJ_SY@0> zdS$uDd^oFf`82Hk%v}{5eT1;886YG?(D1LT+0Fgdcmk!dmhU$I~&| zf@jV4^~D$4Gw{VnX^gXz!*`jRMd7+7(>V76M#@JCdo}jJ_AVoQJIfMl+HWznF{_xc z=(u=dl@IE*UI1MuGd9=9f_AUTA#p$nj#w!pi7?K@4O;Om^O!ZBoDd6xKD`xZ{g{Cx zjPq#4)R9>8$QJi%b&7jF2SV#GO_JGU03knZ@W-rN`e2gAqHZfuR2gSfL}pTQRTe!s zv{HQXZ=tYiQ!(T#+=PMCbI9rVEoh0ECL9`FLg)NnL*)L^V707+y-($Hy%EFd_@Bwx zS@I8FY;quw&qd1>zPLVmJhVP*1M^M|+Eg@$?Ngi0++GdDd3n9b-!YP03d`v6w^?vB zRs~NhhOtT^hYEei(!h8HTz^!8_UojXj-n^k{c2>ZE#JbPt$aWK`VegRIu6H%EadlF z7#&+biAuFJphH{(EqdIwrr!tpO;BX2PTpAf`;Bl{ei<~^2GNdfIgoxNhko3#!F1z9 zbn)K{ZuOp!@OnI=vp?JV-4sBWYOu|XdKD2q*pwiF}Y+_`f7L_EH;WTZkhq@C{IV_iw<~lss%}9cv6dP zU&?%STWEd!oEg1|!r4pq2{(pnQbKh;(zL#`b>2{%uTl%GKefpu`~w3 zn6C5-!S;#=9*^ey)~I*vN^Ov&)p`Ogtk9w9F@Y4;6vM84`w#L$wXkxeANg<5r-OYL z!k3I1aO~26)tysGt>cF9x1F;zNy5S_dhJkd}NGxa>M$@+@GJKvc?YRFpAYfc#cd$f!_(A)|`Vg_TpeLYh@7)zG3=CCL(U~QNb zL6J#I;l4x@Tnb~*+clf6hpUqRz)w)`Y=kB6^U!Q=g~ae^yI9r8*(@<>s2LwiH}(0B zCdCexMEcX}SCioPvR{pxf`c$m`cz|Mup2H?OJ`DxoN&>CJ&oyG!_j?^99hixB=l{{ zqoY~ftk74EW%AEKa{mo`bTJ2+EU4 z`cM%K)-;V)scK)69?brw7Q1j+fgXP0`*Va1 z^v!Hs+kc<1x@8)k&`ZR~VMcg&j49bETaf8(Vx7HPF4Riv4agr-HU|Sn@7|-=~Ix>!mlW7v!DQNp3Jtim(j2c4|-i>1gd0D_RzJGj?>5U#t75NTmp1BVqCaK^{oe_8|koTJ{ zEOCQXKhk~QBpL0mg3q;bXm_9%?^*?rwaEg;hYwWxK7z)2X2R*}FbvW+!-qqLVDnM# zPs{*#9>rOXofF}Vk24;8*d)ICxrn`NeN zQoGJJj=!0VY5l`!kJbf9KUqp2Y9~P`-^Ew{NM^nZh6uK=G;r~vEOO#GVvl#~JZs$s zrdvkP#cNj3YcuCEwapRZkIZGCiSsfJpgx&~#OywEpF}zNJ zz5~Y4kMRIa2Pd#EyY$FN?J(SY=s^S2#$l0-EHfHxLP=xxv)oe=WEoN}4Ev=jS-f`; z965gqI!BEFw?--P0ZH=s5Ry=f}kn;QOXSJU8x%%Z% z2wjcV#nL=GMDfzf#6ncv7AJ(=EW*w|x^$-QDJ-31Ps<)9G8@-4 z$?ofW*~i64SkHMc{5szrr!85=Hb4Fd+N(H|JhMkE|L0CdNikIAJPOZbMPc^*;q+Mr zg^j0I3q#Hh!L@ce=&yehvV+QS_6JY;WcvelH`hSh&i~k)8yR?`pE1sO(+7Hg@8*8s z$-<$9PSkh8RNVdGX`@#BL>#4Cf|_6Vz~Moh*OpdBDO)8Nx+93Sk4(ecm7~f0<$1Wj z+=01W8%f7sj3S?v12AE|ES}b?1N+{8;I_LT?bo^qnIEF?{y$}EILLji5qw{Ahwst! z7qHz<{h+ngfcMoc>)&S)MA*Nol)?(Bw`VUw_KX)Jx-CBv#!9>D0GD)>gb z0?v-pz^=Z&L@J0EPC1bBttx1!TLsJG2V<|-x#Sx$7PlGLW5D^J>}UEN26tPTk*}t3 zbKDe?t8l`Ka|_wSDh2FSvdvlH_ZWu_FSCBI}-o-@p-q=UGY}u4yMWHm3N-YF*P|& zcr7&o54n!OeE%m9wY;8fGM)raSO~nhQ^ss9Q?TEKH?Y2F0v#|p1iLc*DeyFZPe&@V zAH7$BuUQ-|)cpt2{d{Ri-|G<9Gm2?U9mdwRJHq5XmZGZX1jwqMD7vUFV3{)$@!AF- zda$0Wat)s{@0=b`NXkO(J4N(?dx0Ec`l9anH^MlJV%B>16L)e=#7*-~iM@9fQP!m@ zX0ZDs#E(}Xa?d24h{H^N5C83hjZE!q4!c2pFw2;8&tpSy%8O-UW#1|>FXAm^*Mw2q ze`D}wWgafq&0->GQQV6tnmk*T5_hUW=a~fj{9=#zdrco2p483eT^K+eo4zq4&i;H7 zWX6_dn+qe`{)oz)y<<|Mg`u3GJcn~fD{b4E?yExZ?j24?PTUf%nz*5sVja^P?gF;o z&xw12Mk*5u)Fc|$o0BA`6hE9V2gAQ<4Bz#ms`Ku2WSu4p$chDe zp-k3e64A_yZcT&cm4cDaaCED7K#g{FTD^K0tvX~%MjF-0ih`VFxy*nhI!X~Vt*!YXHzEopv{7B%;(Q^Qgs_lKia(Eb!`BIw%h{u zPu;Nf@e5&D=vBCM*#z$tWMlHZC$M?lEhc@{f!eMAL0a4mCfD{7JT4XD#C_ecFgXG5 zE6#=o#){le{D=)gWh~y~OVQ>IXx&)|I#bPHq@^F-^O0j6szEq<OK_){#!u63pK@oqWH{*j2IzV)S)dDcuV z%o z)m_mryX3wgvA!$T>@LQY+xYBuOA>{D&A@Mar_kTvx2$I$Vvg+|_Jg`1>i!O=%br`s zp4s)Rt>62`;HO%2{$&z+rI?d#%{Sqq-w~dZ-Ufw_6;Qsg3Qh{HEOg}(cxg5ax2nj2 z%Zr~4gBKiUPkSE+Is5Z4V)YAno8U=pQ&p&M>RaeN=Pp~N`<^{tASGPU3#JK*@|-33 zP@KK@KFr&-9!{Q?qnJmZn5Eta*0(tu=h8LN>|8VZR&0jaXRfi9e?zEU{KsaVS;Kba zJ%Trx_SAI7g2y<9(P`c%ocd-tQ#-Rttg_dK^}ajUwY%J1+b;@#4i6{mA)_Hk2ho%7 zP7m)7q0!xrkkQp2zg!HU?6vn`#YdJZU-hXI43%#l{P| zR6W|2?EU=7=XX4*%vDFlmc?*-=}@q4dkgR7ODNXXns#p*hbJOW!YcRW!o84lpzl5r zC$%nxg4#0bN*_puS+dM4-Gn@gT+r#;QFgp{BKn-2B`(exgPJPGS(URY?cCW6*I)88 z+35(JG|0s9{oSZ9=R|z|^-|c^C`0463@7HM3Jc=auyohoOj+p${CK#Ld9915Nd0iQ zZW)d>iI>gU8V0OC#w02z%_fegwWN z*s)7bH#6ODHPA5Y7Kock9*G6UAZ{zPB=XQo82nLU4l$m6OnOWECk3#|TC zC&*k+rWE}x(8;^l4|wNfkluRM2bPN09tKi^rn@*QU?>yxoy2NQ1$1h5BelqtjTtAj z;Q#*d8W5+b~qO zGiBROeuf1fW`N%6qbzH~0sgN2z$_Zf$!gIZ@$KYfv~thDwsm)e{+(tlc&`FAHbhZ~ zQ#y)5AMOPSqbafz@LWYWCAXN1nM?cOF~4S(y-$^NW3RIn7hg-fZ${$pioWzPX(V<$ zEWzAi1IeXSf~8laDBClG(!2U%=;6!ky7yJ)wXzkYyE5tVSb=weBtmlVa3S%u9vqy^ zxlR9^Y3IMW;z>{v)(xYtjO^&#n@8mZOJ}6xIXiBu5 z_ivw%fQrR_SP*B(b|0I9dt@B(bcYhGaScbKG(DPjcMP5!M{M792QhYWAf;GOgGFH< zVAHZt{Iu#6ds^I!Jo2kxIPHh4@4awPlR9_0t!~&qWGS2;m4Ygxt63;ZhP4($*tyUk z*04nhzu3m1>)su#@b5ZSyJ|7}HCsY9d41{Fw?nYRVHaD!kKd7Fr_tw`4ifuILE znBtazPgdlkqM0MgT~-!A7$f%5PUS2sGc58tDM+3apn_0L2c$D7{6#gS*uP|>R_KiI;6q7U2iH)@m+I; za3^+}aNN{@ElTcXgS|~qHF`W9UZsO2L00(wKrLJTNL@JRRmJSDWWwKz{49&Gq|^PqseZsz z+$fWb-8*eytx-H_kGw0$hm0pVnH%ss6lmR2Yt(ss4e(F`dn4t8A>*S^yf=w@bEcc$ zZ*5ZjJ5)e_J<^;sjvV*T1C>6{;Q57(U^wmts4MdOF;Tuu;%_*LUjE$)fxKflazg~0+gixl)tn)ov$~sFWho|4p7%JO zLZzpxI9jWW{q)Nxqc~6ev}-$*|9K8^_x^)pO>L}i=XsE7sew2BIrCZ=iDy)Da8Tec z=4cv$>RFxOH0=HJ5|Bf z13zGc*>?DywH02LcClx_O&G{d#3@R$RQ|4-z3j*q`{nZcUZxD*`WJEL$P;L|P(qpF zMHU;!zfXB1X-oYHa7n0WT&lMO+WZ#4jM^0Dad-^5RCYn3dmmI#?9cDq8T4-HdN|N2 z&lyvkH~Pkm#aRt!XS;l|mRlnB?&BdbodV0fc{VXZ_;?@WRI`VVQ>^rA<5x z3Mc!Lppgd8+v9|8I|CZCrwFd!;yyk@BW&QVmeqTP!HH!Z@Z+5UCMbGh#Z+sieC<0N z;q!UpbR8V&n~lBtTwt#|-N`|Nd&n2mN(!xX&}dN9nz?Ug>Z zrSRSO+&{lmsOcaRzmsr~UG^uf) z@go*^q&KaOJRx~qn2%cZkC@l|JTyI!LgJ)eXz|60miXkM>~h`}KDmS~Fj~f>TRhPq za5|L@OQtPBBT4AE4zV8nP-asY4x2WK_OI)M8RcqNa43XcXw1Mz)`;^je;1l`zY5By zCbPe-^8}Tg%fi3E66*g<6@NdT&4htFA(>|~;tvF1y1E8#Sl0|Uif!0`x*NfD$Qssm z;{}Wvf0d1qk0LcS0^JK9xYV#eu6cA&bl5Wv-Fod1%?IVuY;z~tq6Sp?3 z{pidF?i-4Q1$W`t(Ww}9R*g<9PoZevEQ)*mk%inxcF&7@to&{yTdA@CsU8(26ozQF#1$IWNkA^crXX4W6dtM@U1+X^VJCx=nwOI z(h8c&eJQ|tJf0li3$kqXLBG7IxcPu0R;D;(XofQHROZ9k3$4(+y@(2V&S-p-KDJg% z=$hk>0|M^&9Y1DK0Q@?~Mtu7;l&kKZidt*qlEfRy> zjVaJM0`Ba{hic_edVl)_bB!6oc07H|GUdODty%eO@?j&2`xA(pi#M=~Sqa!MW;UEt z*a62jX27#Io@+A{d=ls*-I7 zXU(<>^rSqOJ(*fc8~V3!?&Co5mdhu}`o#mOh`nQ*_Y4;XP5dgZo;d}ZpZelm)k-czsDO_1{-QKL+r7-eUk9iynqUGoo0*?@an>-yb6~ z%&6}jPb|qRL~&^*X>x``@#8V{)W8enl&6q|?`vkZSxX$W;|P2V4-;aol<>v*E@=E# z&;BUOF{S4|sG_6?-LKL~dG0S!c+}3UB#KnIs18Og(R8~OCrVcYKc z;Jq~kIBwZgHs+rTXLrA0uZse4#ofJdb5()p`>YR^c;6HkzL+i4xbI{igHxCs_Xv(H z{0WisY*AGs8u!#2qwd@5!lQfLqHD7;oSp9l>rIEVqaRL7mYXW#jwDBsJ4&IK=W($1 zS-_?pvcbA_+(|!{vF^J)tV(GgobcBW2c4Ipu^#@k=UxsMUJEVU2elX?A9)PaNRy71PoX0_<+w+_j9s-hAfp5$+)>E$X@z$5_GQd3sh;~2 z*GTZjM_U|hbH~>!r~$yVd!1zZ|B2 zcrp&S7K6_|lt47&9e%T+c&=qQExW%^{L~dmvAc80qV6r5m^2H9y+6(7*TlotQwKq- zuNEfm-Y3jzx+3N-nhE>MrqblY=5%U{6E@4ov*txZKxS}+c-xD6wO79ZzfwE2N%|%n z%ykB<)z0LOyW#BW8{l#FiZJiFGb_(n#9oNj=oFQO>vpVy75%kYzH%0hTPM)LyzbKaf_AWfv4#$KU3xwJ-9ok`W z5c7!T8Ga_HHT*@FAq@#NI10ngL<`R3Hl z4s9u*VqF)p?D(NgMG!CF4Wmg5W=egX`mP2C}yP@GtTa>+2C)_g%2gjca zg=D2tm=+gIGW^_fn>id^dqQaUfH4@nAP6TGM^be~4CZHChceC25N?o9HVwQ7?QKC; z#v9n7UG%XcGspe%hnsm+)_v-tJ8>S4Q zi6Ot)I_0}y+#!p}zmGKl~*S)E@ zA;XU9e)nbu-wujRQ$DfX-#nnM4`;6@tFiquR`@I80DF3F4gBkocdnBef>J94{P!l4 z7Wrjia_4;}w=E6-PHKadBl=PO^h};RHAmekk?0vYfTYhElEQ3t8oy;L+xOoUI3RaR zV$VBE2eU=E?qfh7cXbO&g)*E~ULpP{>xJj$%V4dbj_bU6m&0+n`#9 zRi_a;ZY)DDbtzPvoyM9r`{AI5F|=c}6kWQV!PWsT6`=A7;_z zkws{q&;xTL&j}Nv{)lERcN;&*_)*5p!F1@?G;CR$gy&zKf$eX4Smu3gv@;op39?I} z>}MGUueYU({+B=thhVR@Qz(7@5E}YD7j?bs*r*vV**;q*yzjPwMLJKXf)sT$8N}Uy z*$0F>mJU$VZps+{yS%B|#eO{VW|>>GK=bHi=3*BPzid_u>qK4Zcc7f<@ZQ|{{6_ZS z@KRWMQw^tF^ul?ruSB~TBkcJT&qlhoL2*$ioj)~4n3olcegh}7^TXWe=iXk-!7vG1 zx95w+S$(i^0N+h{$D^Lson&eoE2EB*;dr52RYa#SMg zJ(zsPtYx)T!|Bph&YRu*-Ra<{VeHZ|7qpGyt_q%ANO_>b_Zx#*!XH;;Z6lm#soP@B zL}e*0y_B9iqxy*aq8WRY*1`Gj<@l}iTOpWd;mYgM{Sc_HQ2#t zmJea*J_VevAj4Gkmi*tB$WBzJve%|ow0NMbP-;2}H6s48zjpmdt;~X5xvT|61{oAQ zUK6#8htqbq2@s?*5LbLIp`T~Vurg@`YaZOf2G{#xbeb}4aTtn4+dWW{zLBY&3uMVx zBpe=A%MQe*k=@N5P&{M;4t|%28{>zwnK3|vBlk6S*))jPp4B(}>CD7Uauc|VIaIKH zW{V3SatDr+1@0KA4vz;8z_cwv^zLpRzIBMfrkV(x{Kkx&J{9qciAI zzo73dby)puIcw+axO|mjDoK~a7_WECpuZcQFz*GI-$^h$ZUlZ+)DcH0x5%z6N zqS4cQX_)N|n75sGS3YY{??iQ`xTY6+j&BzRyqqa)>~Dq*?J?B@@wGTG7&z^JH{69Zooa=NVd7!nT{n1xNk@fEig)< zn*FJG_q8U@@43UglsOC6>-; zhxvbO^P`n)wfkB4+@Z-`CqH0&#dhII&2+f9u~Gan6R2a-CFct8e3C`I$)?g9%@Pgq(=1Jk78m>Z$Y^U_?_I5iwUYRHI|H)WwBtOX|D z97{`sC!?k5dKlS#0IH`|Fun2D;bq`4)=d7iE_yH>ju}CBubOc8s12oA$KYs#?abwQ z5*ioW7iI70z>$kHV8McGOz+xIvL4tVHX21xNLw-KnXdq_05W*G6iWSHu_e>j2~pw% zn0)Il8*j~_gpHUX|;g7B2dMJD0Ds42D7A;@dWSI8*0nq00Z9HT)~~bleDlV zPoB14{K!5zeP`wq;^^WND^mSl#CIxwsAQi;*9N!3qyF8ny*d{SJ5}jIK`_ZCNekA0 z`7C(CBxe285%<`C6eY#-B%2UMU9~3k<-<6-pZSYzv(cu^wI1|%i!yY$y#%B870hBn z1U8DbY>mQEmO97)r#<~53|y>Dn<|EpWvLlN4U`co?nP5T>pJ+nD4edY;&aN~NSkl` zgZ(m7NSSjEf0sXEKeANuZEZMPoZBDwPT0nbrwo8uyJz5!yPDX`VUKX~Qh#ict%lB; zKf+KS3G^IaFQ#_zE|{zmkKGQ%Y0rjW_vG{Jy6tqy+OAmUb9WLuo#f2-1T&%CQ~@Ig zX~SVnb^LC73erOR;j1Cvp~6d=^m*z(i4Is-5Adt-N_F?}|C z2RVrXEjw}yvOkRmrxA|a^*MxT?~W%&@e+Ja9f_SOtC)VV8k)_h6Lwm8VnEV(VW37L zTYNQuO1?D0()|VOewin2TQv&o*A&v)06Eh1`Y2i4o=^Ly4#b#3XWX~*4g2MsftkUc zH~@|5p#CNCSI#s#>v>Lan7fHNZT6>Oy%pIFM_pmmtltdADWF`PC#tM00^3M&*y>xeNM67dD+<#c9P@!!agSZq@+{AqN?NdqP9 zbxbx~7xT&Q^C${Ph!+ZOIpe&?vh0A>NL*s%k1AiMp;Naq>$N3}WxnTZ;-M{&6Bmdj z7LK&pBNgpy8^zY}AiC@=L#NNHkpaI`cbi9{H_GG11*0%`^%wYYF^7)zy2SnCM_G%q z4h!cQ@B1T%qJ4P=))|N6()lkXUaQTpL;p9sCi_KBZ@qZk0-YQy0B~L zO>w+JpT-O40IPXc!llcawg{h@-?KpaFXxctsp%A4+V2Ht$#AayA{pl1;7pl2!W$D8 za2Hl?qPTuiBBtACqgk*j-QS(c24#6u^m=D#^MAlXd#A$Ah7?HPOvR?F_W1VYc+i`s zO5LXCpgH9U+rGDsnae5Bp4@VlrgT&A+IN9@y(|{%X7Oww&oUpJZH?x?x3kg9iyPP1 zmJ2tt2QtHLUhLHHVoZ0*q0=M0$o6g;+qHD8IHmjo(|mq{4IUhYWAA@}lv#J+O1csS zrD&u76oUoG%|{Jk)=kWWl$isrf(N)PHVw4 zyI*Y1Nf&xFQ;({Sd*QyZN>nY|3qKBz!{Y2{w7h?YEs2yP-Tfm_^9bLosIFsAmybiM zzp-@L-W`H{?1Ubl^+4M3elk=@ z8=;vt@Ak<^#EysBY{}6WjCiVwA(=nG+dNjh{%|?C?dW0le7}C{=}b0cx+UgqDuG38 zBF*sTJqB?DoN5b3)5j_J;mT^*y8kg8=~>Bcg>7IpMSW0d@o5-;Yb?of&Zg7pG1zPN zT8NU9W1DUt1}VQ33`}*webral(9xV@G-^2cDc=$HI^7V*MIIGme3~J4u_5f|=RmuH zERC!K?uKb*f&HVYQZ)uWZrp&%@KEeMDh3`Lm5}|nDfsDg2D*4ohp%HwFxESRY)$ur z^nN}5SOB*j&)qc7Xk-YD3Q}`{PeNV|@1`gL3!Ov1(fhW-W4}UX#M9 z@0QV&y6YSaQ8L9WIM041ePPX(31HH@7bQk~5*(K871M_4;O&L^bU?<0Zr&aMtq(L= z+=4ftA8JFr2L)4G$RxZGZ%FMy*Pv~)Et$*oNBb$guu01a+v^*I-0lQsv7>;R_Z6bX zc7NJfqm9$1&cIa$f%v(5HfvQHOmJ@o<$P5n{nT%e)9V0?>OUGUChO6xw>tP^WG(+4 z-7U;&4{DfuWFj8=u0eS6y6EU@g0)5pv@PW>t0r^$**~1_9ge_*3PA`CaaejUoB4(h zrV}6e@}j32-d@i_*_6%fm>`Y0%gfnXg91EJ=}vF&l#2e^oLQNk$#i!Or|-Vyl3fEX z!t&ZM_U%6n{O?t>_`drM+qmX1#6DUH)Abe6rNM)qN-5Cg(mjx++{8K?Pr}UjVocHd zj~$%<6~3I_%&cbIWgp&OW@}R7F*Aj`gVxl7??ImP=!^LAjR70ypMdkSq~ZDULMoi8 zg;5sn7_V0jb$^DV7e!-du|5T!tYT{y_ZC|D{dlg~mi&@Sxhuew^-|8kSyox7`f(CX zywVBB73Z+Ah9($%wt!Z7KatpSr@@3%2+DgNLm&4r+hIOi@& zO-w26AzODP5KB{^NPL6L_*+6khsq|AO?Ly7t4(7CC0p3_X8`Yy z3_{1ixvZvB`*hSm-kakYd>f3Fw&{7rNoVcN{XhQH^2DHHO~JJ#r=JA|EA z3Z~(i5-{rf5Q0qjz4X(E=D1vEKlq&D4&T{7j<+GNf$sS4nZtex|Gq_ zBK-0Gx40(%3v1cA0oJvb()-yan7K9s52R7_TK)=tjlKd1|GA;py8p@f4rZG)AkrReNdKfL)> zLPLChvyv7UytT6h{;Fhxx^9(t=am`@Oy}&+fzf!CbAc3B6_TmSMj_c>9fw~Y$F^v_ zVLKJ{@%!|8!KKg!-x_`eT>4%xn_5OY`rc(S8*VsjE>*;Bt2OEDS97XAWPmrMhT^Ki zc9?G&Ld;?~F1etLAz!p{{6u+dj(o<%IVsdq*dHIZ-D45^rqRTj8gcL6cIc=w-Ybu1IaQD>3m`ox6W_Om;k-ag$WHV|*Ud(dE+kR(@?>0g z@+fGt8<*)2820t}_t- z(FkaeX@@xvEScMK&Kgn}O*4MPp!CU9h|CLx$$1~y{*XyDx3WLoajb(0hn9%h_qy43 zp5s`ZT*DTd0~RRXV!L1Z^W9bmR`OGLey`D39<3sJo|_}oj2tI^e^W%}m5ZR}*-6o? z+>I>v1z_NVU#y&+XM=lcnW9sfV6N~KV*iXwt@?&Qj3pRszlNa-lNW5jf?F zJPiwt#(mNHe1`Ut4bis7=s6y!I7t!bo?k7FYed}99ZJiLHSpf0diK^*1G0Y`Q=-2C zdhfcxGvex~_T~<}*cXRSF6)w6$1yf1{}4N{SsP~!Eyn5pX~3|lJE7kxTe_cnntj&2 z#;Pjiad*Nf`dD>{Iom8|bvw_)^aWGU;_Z0I>QW{n{_flOc{kj7m5J*mf@I_Od$83j zgr0Wi(Bu=2SUX6H7M4uGvS0k|(DeP;O3!g|Snxdd;!+M8-nb!t)0+wR{&5CTMFxA*ZX>(^Lo9WFFJBt7d%W7A!Nr(*#ZSG+JA2a zowVt}TYU0y$%nPj<;yPU=v(vxUW6da8a{=oFdA zt&gTqR!J{>B@eXzuyk)v&iN0PXBw;s;^CX?7;Cqd!BH@KGdjdmSu00|e4Q!EFO{G7}5<~Gy)J1MYZ zLlzD?(n!ua8=$JJKUohLLEETU^a)-~ylX7yO}Ysgn@0291$E?>s=R zW;%5ypY)O?7D@3up+^0VsI%1@j0O%zU;8oad#^WJ@9EAaQ6aFPE){+lsNraH2i)i= zo%>53&==p~n2@7JgK`$j9F6rcKfMs6L`T-;^OMn(C!~&X^v`gv@Hje6t{|2e? z<0XcC(y^<=75XfT**;F%<82YvkNiyC+C~XVAJpjKD;snpPmYZ$q1>Y*@$4Ee-f#Vk z?lczSdAKKTl{gz$%v5x9o(T0XHC7%IB+<>;Rizpv5L6%hpx`aP=~l-jsE5WG}s^?H5(k`^Z-mUhBd|&x>eo_A{Ch`<)Cr z8o-QWdE93uAz0cW%8tySjyFrd_tWi~^7~Q-v{?Zkn5**qPlH%~Ogx@xiRW4BO1weJ zrA#W4}WU;?xuqzI7vy=bLIk%bWptA*hxXyeow% z%C2yAWLK2GKaIZqRpTM@K4{s`ijP)*q1p$dxUe-A?G^XX;gkt{!yyah4n6?&wiHa9 zT|#OO`mEpM5f}zJz+)GEiCOSM7bEtC3qq%LEl5q#7jl;3IW28-)V?$4&RZd_es2Vrkc3e{Fl+%CTsJoee+*9BGdrs@hjZBS>owk7a+ z&{kOUw~D$&=i>Wg(tdD44|2Sif@7UL;P$4ToYp4;HGeddM(ZJ=^R8Ize^QQNRRi&@x?P($tjF(iEf_Gz#OLw9eyJ-t+@p1OekIN1r?4@_at8-vk0v5ihFO5;5%AJEiKp8WW} z9kp(dp6Lg-gl;PDV6K(K&An8Jopl}X;WuN9EA)hQz3 zQnsjp9{UdyhsKAI{<`55;S_{JOYg!ycgpTGfRl~ON^)I;`UZ8pFB;fl!)nbh@o9Yl3c!NW%Sc&BeWq^anT+VFf> znigLZvDuRKd{xo%a;wl@mnw0?gJ{{p+wkXw47N{{$3us$_|wf0%I>pGwtk!&ue=v2 zF=B1-xvSK(td_$&`fK7=tt#1)h{tfYhZ(_GJZ5NUjn^Sn zm^Sevl^5F4h|bAaSm}zbQRQWOi91;|4z)4E+=<`z7)0!pdWD z-Q~H$N!=~*tF;7{tKOu^6UM^C@Dp^Wq&r577bIQzKX@qZ9=&S~(5-GeRQc`&|Ks`a z@UI^AmUfiJ|7GAv4ap1UR4)DQCt{M@RQNe133oRRV-=~lFn|1U(a|W0M-Nd$1-pG> z5CiZ1+!?ob+ejBe7SWC}Up~0EKPC+cXWeCnIA}v5*S?&BPC1iVODPt63`s^la$nRb zFope*Qa8*NciiyH8+%q7W7(EK{C?{Xd1!0$-r$Ec+RKgeM+8x#f)G&C>{6(@M$g;z?fc3Q?34$JA}&@*(nel#3>6@r~5f4I82 z2amVQ<+D#-L(PmKe00hPQobLD$q!m!b)yO@Y*Iy)55*L^i)+sOTn!o7FKGQh6Dn!g zK;ozvzF8!7Y{}$sYnwej&?~@^rMXyKx(tjwkBJ@c2jlLu**ItFY_hMIN69mX;?a_g zq7di^O#zwwThc-bcR8Sk+ywk(Y>m&F)bU4`@_CIer?=CIYUwe%yd2m0z< zB+EJWTyV`18ckIxx>yBUM{R(oz7z4mq+Osj$V^PyUqh4syc3>1x5w#uc3gGsAA}1z zxByIXec!pl_)AYoIME*yjUJGS#QG|p-y)@Klw>33#lt3Z6Yd*a39GMbiQUaF387x? zWU4m_6QW;3dDp>Mo+Nc_mP^^YvpQ(^AMoST4gc+;lvE zH(xsr*Zh9KDvx5?u04l-_^Lt9%UY@$b5v&d*%9t-k?uDp(tGdV6Y=DeU{s17#QlC8 zsaaCHM(Pr~K=bP-V8;zr{1!F=oo-9no9hJ}F7@5~?qiPfBU8D^_cwT2U4#YwTWGHF zpqgnVgE3`lXRgof&Aun{am49xo?;|Hypsi=S}*(( zuZz)`hK`PpVdsA%Q6uLx{Fh_NFWMi|_$7r{sPzb1qxQqtBdh7fSyeO(s|1_ayMont z4eqw~E=>&WfK@jV_0AdOrPn_kuIHJp4S`UW=mh}ex;dwe3ZEM>T@vcw2y8r=z+HP zUD;>#B>ZPBNLrj1dSCcJx?f*IYvyEr_m^Pk(Z0BOEz));1GsFHLPyIyd6blOSSaZU z0q!NxXT?+6^T77Lf?K&F=dEp^&@}WM)S2n=Q z$@giRe+4Cr*MyM84`hD>wt$#87K8i_QG(J5!Q+uXPM4KZ{)K&XV(<`Yo*Il>{a?_! ztBO+odo1hLNjdlxdtt#aNl%y{^(`e&V_oA+>UMSW|K54*LDKU$TM?HV z<#K(<6VmP*2cgjsyj-;hXuR10a;MLM{HM`;t2Bnpj}O5QR<8VIoFT7|v5~TZZ{Y9o zNz_N>D@%M;`j+MH`>#{BJ>xU9}(UwAI=(MmqzJVrh zdqFX`ooY72{F{-k%Zd}z4>N>4H|mc!m?%C;ik7GJh&Ocqt$;y)7zn_bh};{bJ3Mg?>-98 zr;h}UiXs{_dj#*i7Q|H}>S)mJNfiAwh94X=1J8yTtXruj+>K6#PnSQyr`g{G|B-rv za=>Zw{q$J4H>?GCauWCH9>GUGyoX!;r2Ol)mvl1v7tES&#tse>P-9$Is(#=BHa$B8 zx+%xQtOwxZ^MBw^{XTI~NpIdXI+UM#L;K1g#3X6Kz@SU!0KCoMPTZl|Pv)_J2)>!rJt z>HH!FEqf!ntb0Z`gW~wZ`Q1YDI3+BO*5me)tzxIQu2^oU1EH>_vQv6e4u7lx{~I?; z^v{pSLqi9n`bQr$-}qMicvy{OPc=~ZV8O$Ns?qOLM!3#MQ+($+m9Jeer#~;Es9XFo z>h<>)C3b!b6UMpYkwfRG?Q9O-Srs@;^T01! zyxpG`HDn-eR-v7%7SwFn7LDDnO5WBqM=UN<< z#fC;-=!D?IMcq49k0BXO-y4qO=1V#9{ZhAz)Q{1rU7KAii-e2UVp$3H?!PQV+W6t_or3j@MPO*aFB)K^lM|VU*rn0Q`s_# z+1d+_nz-Vsnx#T%))!%|g)J=IVaqbF@6=+e0C-J84Di9q!oF82Ahh-AV z%r3L(oN7<}n{k)ClfuPxiA`>F+?^L(k#V!`c#e;c;Xz%qsMO##r7kpuH+yC9q4EJN zoa@i4*6XohuJj!apM;~^f6;H%P%dBYi&mv=P%E=T_a;l!-!hH9m~_GJJOb1u&pQv5 z&hiyLeCDYeE|ap*6Pu&Kar<4iV@C?YnEp6Y)-8!K)qRS=+}97dZ)65 zwyrXRVrwf7k+gzOk`E?x*-(sLo`4s++!j7d-dsoht8~t2Ap2jvO{3a%IAi|=RPL3{ z=~AZe_J{M3mv@U!mTsoQXYUKgiwiN*ARprndD80cFG(XPogbFvW3c&KSom!%JqiB- zT`)!5I92i|#1W~q1!E7VWSqB8ft4nua^F*Vc+91c?0cJ|ww5LLQ@$rS70PipV`JWM z@rmT~84c}WhU}2qk4`72An6VhqKZ$`FS$D;#Ec}-K! zI+)zE4zBhS$;Uquye725h%UV-dATo^l{Tka!wLOQ!@j8Ry+a|!*a1u{TMae^@pR22jE56 zK$kTS&6ps9HW+Vyh13%_@y_Cp{YbT+*a|8$xi+KS>9_(wpA?aF7`4(!4fr zHI?qYMUb{Y^$UFQwzdHZPXnX+@V?hr$3} zTC?!pTDtFgg*ILwGSZa3dj;wK;NXB3(yqcaaxhp|ohJQsf4T4=v;4HCC*qU6q#*q}!? zTW!?jsIMb1)>zu}J(YH&{gTDIrP+MP2hgWhLpEcJBQ~zdK>15?G`He4RK1nHGbrGL z)yX`>!4hXqeMkpXO&C5F)3<$+mnUO09B9nocFTb{$!HsVInqogUiRUv6aP`__;g7- z4aXp3OYl;xgKc9QAj&|FXPz0yD* zI{NDPN9rT05DcaB$o#X^EjUw~=X!UiHxb6PZ@C=2auSJNpvNKWWIfPt?NSsrz8_^@%nAPAhQLelHY_cEGQGvA8O9 zD7DCKCacf~!YwJUWwz@uBrQq??~p_AeN-MFEKPREXTs8h$o6a%XDZFzlqV*T)zHzn*detUMWV&HkUY#l)nDANnKC_Ls%j>d_ z_Zd+*5DtG|^uf8tF?@Q0l+ph5Lg?4*%ms00#gLipwDO(Q?{QckTlMwv>y}EGCS}%l z&$Yx=BfH?e4?3J3_86jW4^w@!gU3NtV=x*nE{3*l3?lCE9iO$YrV!0l%q+N@kBoSplZ`t;C})-wHhh)y`& z9@(4U{_V#m&pjdV*jy=dITVx20@&>N1nhmZ7fdTz1fi1GC&uGdwZ)PWp+Myao#-Ep zad-2v`=}id)3S=HmmGuQff;y5?HzPI|B!T?F2goWB_8}{G%EgbgZ_h@d68BSCT)L6 z>1W;X#@0F+{V5WvRt=(ZiP`<4G?{5>f-to)h+T~$s3M{Z`3#D|E1?}StI0;>do+UQ zJ#fVLHtx9X;XN4FHyE|Pt)gGar9#cLet5cf8|B1gOPw7?c(L{g^*`TJ@V%?VDnCC< zJl`x-nY&(y%2-ajrT_iP{4xyKHwxc-=)$$TLpZ8pEHtiuC|=qq?bnK)!p&FGJ>ZQR zDFycDANzko@Z>Ub8as*|CL=ejKSKk-pvLd+b1=TE%fm9wIPiV}U(EBwJ)YY^VfGPt z*ZT#8S$3hf5?eb;>b}uh;fbqK40uC)ApXs%g8rXkQGWj|`1@7h{tpM?uHOn`XF3h- zCd#N~8ZOkYDkV4PMKp5jKS+G3#Jw%0e#R-P6dq`Z6Y5jAN3IJzn>d+MJgRBPP7ln^ zSq^y;i-<1|hdDbFXxE$BaKJQ;QzX8aXNe#7Dei?9S~B5?Q4|ldxlrR6Xa(1_1w3=J zMm+xeF>H7*&$A{sfYY^1Sy65hXsuW!UT97v?-)&P?^91b$Mm8*Z^uxAzZ1*mYq9ku z4^mWbrJOw@_|WzKTx0uADEieMof@@xM(}o8U>rsf%Zs7n*;LZH?EQ73Aa2_xZ{f*9ojVr@|^N1D0U*Qp0Pj{BqgYGcb7e za=KbR7}Vo>uu8yFsJHw`{_VEt<`s`7GpO$EPP#nL&+ zTsMp1+LalytFu1{X|rz7*iB<8$kYV~#dKe!47>yRju84Q)#P{zf@p_$o@MM|^ zP8usm(PPH(n!aPPU$iFQQVYb}k`G|YjyGUYGKH;A8snSKQm#MEmj8Ln_{_6y5c$Q4 zLk4$%^%fasPBr1iN;iD*aFo>BF&>Mm2E)i$M>yLYz|UuQ=gCt$L8+u2OnjZiqS7uh zR2oO?PJJWiKclgKgEwH*7RzS`+=3ZhkJ8N*Qy|Mk6}nEy=3DxOkodYo)Z3cJW%UMpvf>sk zZ0d_jXO-CR+fVU$xi4GwxkQ($qxeXv9<$`tmPXHPi23?rgiO2kN=*hpp>W z`F?7Xgcet#$o=8MxZnFJ#oirlZj8dICws7YpQ*U+sH(&?cEE2}binh|a2~2~8~!y# z;;y8t!f?&Wv?#Yq+`jX;SSn?7(n1`uYhxApuZzLE7ZiE+nF(UH5J(vpH%j+$Q?8pi zjvP|FIDV|;d){xttwZzp!vH-h_fnDi7fNVGS8KLizZ|-38IL1<9?+#}mmqlPQP7N+*bKuSfny$r;Xt`J+;vB$sWpbSpl8X+hJo%IIDm1W9PhFa`Tb)Yc^x4=4EI0 z49x_RI$6H0*F+=hWPI$kS@@LMpP#E~pij^i8WJ$PW;V;gzUO9;pVl4k9JXis3BlYk zGFIq_sD-Mo{|V=IcEEv^Cj4`M3@#iIhY<%mqo#EZ&eQkD^ry9?Xq<}5OI8V|h^5@?bQhfo`1^Sx9&($?u?uHIw~(haGRB(ArX{mm zk5YkSZRyk5FECzlinwTbG(Yt*$Eiop$_zL6<@#Uqh3+|hafP2U5B|>{b8Lw^3I6E1 zY9PB5I8b@oIGm80!*l90u)=HrpR<{Yk7jmdL;ZFrX|jSFcO}nYXeFIlW{+9LQZ{50 z!n1#m;Xq&oU9L8vO>WlkKjNGHNd~ycWNc22ao7HR}{urM)-5R=fsxd))!e`cZgPVIW^Tla0=4_FURJ z8NHS}a#8a@ewv^O9kV1BCsO!xazaK#NXG<{(F@fgs{n2hV)CkdUC%%pd{9;U?`h=Dy8Q{p`rj<>xlK6~a$ z8NZKHrArrF`n?zwSKX(muXC% zSgK>p_;w8y`<|iFn60e z_Up;d?%p7w%4OoN z-IjdT)|(wOKEcmxyCC8AW3oMd3KY(JvS*s43Ea_w%mGhmM!Fki-dsWXiS!+@<6T^*Dq?4ss8Q++B5#avZ)(ruALt4YC29ssZ|!yN%{<5 z+0u=c9&8}%gZ*`+&Z8iA%q;7NzxD*7;|xVn`={QazAf6QYkmX{+680tJ#T*c+Ki3POMRb9Q>gdSQ9?i0homR= z;|nF@IVCF_-j6fDsWNz8^9lHym_o;KjC!9aQ4?u73ZAp z4p+R9t18uTOL7QLyqM1`clYGw!#{~pu5r97Ck!>tvoNI77OHNwWsj3x@$|U^RQER+ zpLRM3!79Kou?TiJ>T-RzC{#+aMoQ_Rys>$7>i$QlB$Fu)rW8VfJoU=9* z{U!`!;~)27+Oh~SBSja*a%;}@Ml9W^ftI^-*;%_AY86Sj0_$JkVC%x+Bj$)|VV!u+ zTS+6V>PsfJ+q6+<@ZJ5sBtDvnJ7=Urj*do_|;J1 z7)gy8))=(K2WL$V#wiV}DI<2ixNYZoI@Pr^PAxqtBwvmaN_HpU>jgW>WZ6^rJEwwd zL${IEP!BHCK1c(loWV-RD=^kB1=TC1XVK^fO&NF!Mjub)o6j0$g$qWo`R8Gj|KD2K z0c{t`!V_?6c{!~8S_%gnCUEa{`qVjZJSt78rjD#Je7Vj64+SiQ_75VpZrcrEA##jo zI-|}@iM>6*6qQGG;-z6Ka9~y;23MNEy5oj8b&W_~yS|a|Is(rhFF=ip1JLcz3^seO z2Y*^ZIFY4p&*kr+L{pPC{7GkTHsgQ&t@xGYKPU(^#fP~Oy!gx}0J9)YHd{ysuh@$N zoEnAguIs3DWh5xB)?I>&GvQs=;L1 zd-!zYicIZ*9Pey9;ar(+#w|O_(QG=j?-Q=h1vK zN%D5|8H?AZ=i)cz&ybQRW%>?yiPImofcpCG-1S2izLf6AesOQ9AZH4U=&Q)ruKxn_ ztbXJo<=SSdWJ)<+cj~HrN=TA?IvXth(2}(R)ph7|VsQ&u>#N~jXK!gQ@dJdvLwIgr z7`9ZefUzWhrQ^u-al+kN zHH?n3V`qagI5<@f>J9wR{y-rN3ETndnl;qvYA~uFTtLJAycgz%WbzJuHU1(OC$8RM zP8rrxN0wy)7g>&%biUaXXRHa0iPQLS4>@)}(wp`3y!g)P?-2Ga7_ToX0nMoroJ;bl zIC^Gr=)r$tyQLmE|Cqts8lS^w<%Klx<}XohaW0O(-;bv&*q}zTB04@YfCcN5_|q~u z@nB9ar|F)8;Ie~&-wklt-cSU+c$;QnU?JlK+tX6Gis)VFJ7Va3*L zHt?c2&hIwOmbjr`2YO50C{f&;CS{Y*h3f+pV&(D-~R(*^_n*@cAropj)4^k{KO zq%}_YFp0;12*&CkCt#nP4)?Utqh7~W!Y1z?yzsI)hhI5LOPZtM#q1b-Eb$;-bbCrY z%d+sM_Zb@NJPNC}tMH~LO1y98Z@5}fQgh~p)Zcq&J~^sOJwhsz(Y)P@Pd59?babL= zv}!dBkbE!dUsQ!}#%t+QXuQ;)s3Z(^wBzAJhjD>{8}@K^!$SimVx>-JzNcn}?Qw1t zSpqe(5Bp*C;sJiC5bX)47Y4EaPNe{e!2DIF_1Ad+)Wubg zni`VQvJ!BBsJ`oYh?EcE^pi1oexVkg|CC3*$1LFGzwNXypf5gOrG!5vex!xOGJT~p z1BzA83MpkXaq*0kFn(krubn7mX6JQ)Mz7zHH7g4iC&j_}N6}#3t(o3^=_i)#&!d%D zn;~FYB^U>IvxW9oh_au`qs;U;H9Uz=%Mav`#$a~2;v~d2d9bp-#86*+2pk%vnd(9m zf0^@!G}0%c*Y0Qze~`ol6NiZH^L|0EN4qI(ZLy%8)iK~AF&10WQqlc#4|dCP0==P4 z@a#nb`)yVie^%;Y`kk$`$NQG}QcjyX&6~+;LK`$>+=8rk>X;g?hI5DAhk7!{Z+3?P zZlr)~fse!_I3u?0S_s<)50@=DRU=00&jb0@OGqtkJH*5&;H+l}oZ;CZWTpo2#$hoM zM@I53mP%cj{!7KOeVN#2stC3h9MIwIAnwstO7E0!lZRXk{_Ugy_A6yz^?5eUwEj&& z@iWmY)Q8Kh-0*c;Up(VH4GMDWYb@KOK028h)*sfw;dVp#W2+~AimMc(8?!O!uTcs@9<16AiDQ~@1 z&XhgJmcp9Cb=7UTN<8&-sIVqc7v*XaIee)C)@#U4l!jV{_FIQMHNw#@oT#^dT}{)_>9M`1WNeg0BY;hTxnR|i~7#m8=n5q%lsHLB^mMHn=xb|o-%rMXZ|Flx+Ue!R0 z`~NQ&ChhgRs^QS2{@l^(3f9s)?DEUbH1u)?f84H3)w3e-wOt!jso$r_g)8WUs1J6o zK74(51Z=2LVr}o>b~MoxgO* zaIgD$$bD7^n^mWS+3rrPam)-`*2~jOL&*nYE1g2u<*_&_6|dU-2K`OzNZBzHC*_&r z_AP#x+&3F_*Qnu_FIPeHmc&j~h{Y{FkDSf+kL2>O3_Q8oN!Uwuq_#y`%v8Q4`83G~ zd?E_nN@MV&S0s!usu${JeI>bwHK01P3=~GMk~BIcdiee(RrEgu<(D0?^M~=+o{MaI zZ3@0sGv$Ec7sZ@Kl3$TL;QVY7bNmfZfQM4Hs{=BQTZ5jrBD(Yoq&vU-c^($rEZ9yJB_n(fBq`$^qbE!5kli&9C z#Ai-UxQR8T4Dw){*YhLQuJvHO=o56^)rqu!R)}{J-oqqeI_B0Z2Q!FO$roEI$2msl#@9Ulw(pC|J1oLYL*cQF2GRiwj*0lzx#f(uGrIc?!( zX#0FcSbZp$uT9t`>$`0x`zRYh<{2&QF?t5i`OpgzUET{ecP^3ph24;s@gFRi_C(k< z|2kYXdO~dqbHMsSPu9MXh%=n7g5#j?!t@1`_}trMpzbalaeD#rEGw+uHXb$lOcWii zsnU`AW%Mk*Cx3kS45pt_q0wrcq}-e`w&p6~(9YVp>0&GSN6NA52#Fo?Hw~>W$b{Sp z0w*>Phq4@PPSc(wE>T_!Eoq)ys8|VO+nQ*$yyWYCd_btKF~tT^U5F6$_}GGBG^Y46 zJ#)H7Clq(UlN(9+Z(9oXJ@8wa)1Sh+>$@PnFo(WBoyoQ1B$naV5FEdtn&NivBsZH( zoD)$7YJH}0PuF;`3`oM`DMqjnqxjglbtFIP2pAVVguV|iigDY-F)_h6Kk94eA z^-VY7K;9b}8CF4lXb%|IbVvxeDTkLmhT_+XWc2=coCXM5^eA2tQzLqVY28|JY+g7f z9ZRE+KZlAN0=n|pkLH+kasWBlsPTU%f}rQFGvM%cCjXJm0?Wh_3`u3 zX?$N{v)HX~4!t_inTIr0!YIoi9%w!iBg~{8*IjnJvP&5H*qPAVo_FA+(l;7DeI|aD za#hPGYB(!L&xP!nzlCpuE%2Gd1wA}qyU?w{7cF8vVDma#_Op`u6X-vzPNyfbPMdaRAE8ed&goAQ3*=F83c=PHxWY37k=bEGF!^y#-hAPqUs3y85 z^(C6B4C0_JolxUfgKWot3*pl6NAM@+c1@r71nytai!Q4fLbKv2A?9i@MP1k7Rpkq$ z+`b$uR@+j#mkPezq=RjNe`w&nsqAqp0L^B9rt&i}Ft6Z;xL6p5tS{|XP6R{rl1QG@2l$iT0Ws>oWV(5JBBr(Z;@Pa3`1X->pBR)z*An&6e?T1f zx~GQs6C+W1QzXk(N$hs>MKw+RRI%`f23#_#!TkN8FP7L_bx6{zGpZ(pQfclu5JfVsDh2=*{gq<1lVv zELWK^6n*SggT6`0rRcUU*VCj~W~m*p;8G1-#>Uk-Aw}!N&$u zE)ZW)mu1hvV~aKJP}vK~f1g0<*<$#6;2te)-!8~0rQyr&y?9+h9Q82I=Xu?1IMZ$t zzPM`1;}3776;I_k?fV7rOdrqBMm&|hdZWNku2{>qW+*^Iz%{t0riS0$OX=0ZAkx~W zjSr$z(f*_|_13L{Ju4=V?*T`)_Hn_@D=hfAza~4XN}TsCIr#Ff92Tw<$f%;Pq?sYl zk-6dm!(y4ui46XI)R}8qj*_!MZ&s>w$20$$==L%>_69OP`ywj+FX-}t<%`qdqe>8{>cRmQFoo@9c)sSJ-p3H--%hBKhzuaNDmBE^t$31j#RVcXyGCx&!XW+7I+A1g{)5XXTF(Y_2ke1G5hb8)C*`Zo4HP(D#D=xn;sID`RXP z6Na@XgQ&+tKm1Xd#w)8fQ`YD4xb4?4{QmU~S(+Add8z}iUYv;P-Q4k_{F0iMcbjPs z-WP*zbm5A;mDK<3Ucfi@+#Y&Fygt&Dlh1^SeqFusNzeeH>&clo;io1J=kHW#Ie~xc zn~Tv^?#N>_VO_PPjTT4o0lNV-ZR{rMcjj76o3APBO!Z^Hdx|Y z*5S4T(*AaK7QAWyD@>5Ow0=I2=Mg%oxZ1H1=008{h_PRxB}|TQwDrJl&nELO6EmE* zTiT&$7^8l;IR)yi5;bC-aM!#6cw?Z2*gR%3_UWO`RhK+@`j9ykZzRtKsg^kJ)G|o@ z<;8u}qgg0T;SliVQO(idF{WcHEd{qGvMl#$wKajh~ zJ(p&LB%tp6I4f3-o}f+8q+y9TzMZ3KNm zoi|<`NxFj-z|`xgc;Vtv$Y}TuY4>tycw;HueJAy(EmOn}y&Z5l--U1c1kj|OTHOC- z26j1jn3RgIk(_A=tv&3_>#m5->$6J9y0SMW?s22da)tbOrX8F5uY?|nJ@B@NBBZd? zE!#aEkFVD)E+k(!97(+O<5B ze2rNXanba08nh&d|8$GT-Zr+_ayS78{uf1ld+(Bqlml7k=}$&UUBuw(pY-`;1-;ym zgwML{fhy4z+9MUIVCs0@ZkNK#XMBR0i+wSD%N9s(uBSO!Qg5SWI-V~10IT*4;RS_= zomD+SKKvw1HpoT&Q`4wrekW|K%;v8_so1b`FIi1p3FQk%bMaMY-Y&6ivR)hz4c^(v zn!fAe+OW~MM3yz*q+T9CrU%lzo+ z-)AtoYl%4LxWu|O*a&a>P2z)Zh6pL9>tV)+E||S00v*5Q2m#|R(DhzDc$7{78*Kjt z%Pw@nka8)5cW(mOdrpLRVkn#LKS|?5B`yD8FFyFYKW$oX%1iFJaqrj3^ul8lmK+;G z)zUro*nUSoIB*>9U!KQz^rmyGLJBt?isG-Hfx?o#UAg&FG2OP2_{_>5Dal_I2fvxY zXJ&_?_wue>cYPcVTJnebZOp>PCxO(xEnm!9Kc8yrqg~p2N7BH3 zjx?mb_fD_&PDAou|ACL^dG7nVe&=_--(w2KFkf{9C11RS7hm=wNu&J2x{wqIZ4myKE6H=_y(CntifOFi3Dx}KF*O%@YFr_tw=!Enh`fOT$Gq{+|TO+EZx z`ZTl2Wnc;&T{4uGz8J)=CT7u7zqRa8OcuqdX_M0BO}OAW=N9nZ$DjUdnSAFoxLmvr zC(bg(T6a@WJ23(d^}mIMvAcx5+f{J$Kuw97u`#^NcctH1Rv^kvk$mp-r0&OK;i#Gf zBKYjR;IR&Tjaw|ppMH-=-Z|i`W0S!($e3zP>TrmsAMA-A4twv0;~TaNW0xEk-~ZKM zdEPTbj~)HUT*`-z#0;n524h*FdI3|~>5K+X4Jhu&aM~6XK(^9_uwZ62`*et(%Pv*m zMC(@kC94M>v&>+F`cW*JIZKpFev3anf+=)*BMuvvLr11xWIdgnztGLyH7i~<&0akL zCK$(KKfRH3*(wxLa=tMGZ)3KM`{8S9*5JLU$$)!PaQ^MPY^tLl7_6#5y*G)_+7m%4 zKL){1+cWI>u6!D5>_{8VsL_Zv8%U2ghV*T=v_2^iVm7?Pl72Eg<4zKvl~rK4PAlH} zy_EG-x3dY;OvJN$?+V9zl}geYlVI5p#wy;dWurW12__Hw(~8~vUi{qvET3wy75n5V zYitB4pRxiC#hrMg^@O;*x*1gm9>k@#o0-r!f*m%VLpPM1vT%BkFc?~$`A&%LLyR8&m)QglBfFTR>}0YJZ9Wi4 zjT3j_k-wa2EDREd%6GB)hB@fdJ-^9#>Q-^}ZABPcst!kZ?`C4ZKVpg7M4%1#aqh55 z;)PN2bSTFa&TrYw*3|jp&x$Uw`hX)095)_xcPYcK_Kj>#=S8;xgLK&L^c)$GS=;H6bmNu}_}@@x^90LA9rxZ4w%Zsq*I3inb{9G(4Vatsi|rdW3Fgha%&>nL z>)540TN`3I|7|sHoFvCH*&w0tegf=H<{UYG9{n_=KN%ghr{Q}XS#o>>>sb4TssC0a z*@;`(!Ulb^?jOiIfYli5>BZSGe78!~4JAjDz~6H)3@Z%E)#|<3n47A8`gSaMe0o9=a1F zvVuu}=}2r9ZScZp6?FUc2VYccgM6D7=*{_!(+(L>vsWgV2Ti3;3lIEvrW_|e^X9!8 zMbcm9M01K~(#w@M+0TgmC^!5J6LyTH_4h()chMnM>2Cmcq_e;jmEg{#`?$o+wO{dH;;4_A{K#+!{~6_NURBoGf_L^-SWC$H%j|9qh#1 zZ(^=rEZzDY12x**Wp2a!GIp|baL0c1e$tL>6sE(-8b#VB>qIiEZ;7{GX%ai7MMj)| zDJ{st{(PQe)n5_H8x$z|?=`!P&PD;L(yLGf$(Ng@tl!00=twCb#W%ZIba66$HI5N`Hsy0RX9xu~#Y4EsGxRvN zU6{}*fxl0>(e1P$mB2CNq13sj+%_crePh4FU;_hh+fQFGjNQ9a|i zz`jS5>AnIMx50-pw?Ah$pLSxJw4J|uu->FDC^;j zSEr9mVd6BPlRD77bQ;P3D*~IqUL>5)71dRO>D>KboTpKa6FATO`u9y}qrb$^Tc*n|}ERT1%ow{)c-@iAcfi4Z$u6~zgIxk>t zY7ZoxgB|GeLkkF2cE?R=L&;#^Fp^cc;MS%hz>hm`#rL;*_F!dt19u(mOqf;^K!?$sFFymRc<&eui3)Beq78hB}rpJ(P@0<{FF`Oxyt7Qqv_iU zHv!Yt>EfL_Hp6u#tIqSG6;sgAXZhUB>>F2f&2% zkK$x=LZkWVEJdq7_}dR+U;iti(r2-xS#Jb-aScN4*cmjr{~so{eP+YvmVkzH6c#Sm z11r5W=--+SpR!e<1^P6CRvPMs@fi|(#RB#=aciXIiEF~u^An~WUp5!91d986q5WG&8BKXSE(b!cbyhuKaHeV zlMtRywxF{4f2=3U50ao{_;7e?`3?Nuu+CSRGreX9EO_`jv>?wS5{LEEJ1UD6hDfr>ep z-z1S9EEgC3R<0IW7k|_4zYiC{R!!rZPvAb-_M*$yLakuc> zPi(}nqj*d|oKC!c#HuZ>xTXCYf&)`sA+2Gf*m=nX#+qM}XjQI4XFF*MwOfh~BTnIj zF=OFDk_58P+!PYEC4}4hQ~o+TxVhvCv(|sew0V#J*Yl%I3pxLOkJ}Sg-OmVnRw#-L=0>}47pTEzoiIwS-oUyyZW0e{&cX-Z_Tzxw zT6{KU3C|v9!m2gyRPsPXJFndsbJ8FD2Aji+lzR45u^qQ3=fGZ`t$h5*=WKcZ*bm!g z4i;h1{WXyK@9+Y&Rv=H#cq@F83_ZGoV6|xxjI#R1iq2hQLA~#?u}SeXIoDV`vfhC* z?-lUbMkWp!Ud6g1=HmKCOWCXiYOugbmvUM!v*^rKc&FxRg;YXWe>>+-+ z3532i#HaJrp-ol;hH5SptSXdI#-J0=TMT9vCqwAqC>?0<%L13#25@dp2j87Pj5DTM zf!yGi!s+XoG$3?^pzotV`&Pb?T<5H@_QkU>;OBbgc08YUu8!l*%5mhC>I|1Jx{^tN z9K0O$qUla=-c?>Q4U#CErY>IHwCT@icvft}zPyyM&Uh)R9ePf5eCS4-cKl%{<1N87 zPl}439AmEgVqn^i04i(yjJX!htm%Xl-ToW`2~`WxvGhKx*0UCl%>BhwOXTQx-8!84 zZ9M%gN3ChP(EjD7q|iN$m9CzSJ=Nbacv(I@i|K_84gT;^ zWfpfeSc8m?49qPPo64t5$6kZo*cy{)menhtG^^gRsS2hvAkvI1=S5TT9xWRGcqbe8 zt-k49xF2R|mXJ@o4*WiIh8=bo1)dA}T{6~-?1tEh#=M(uba))P=Y7Wu;T>#B*9n%f zaxhKEd5f*u!*HlnBJ7{ONhlAkW_rajqRweEnlL+ten&iK_k@+$5I;;9dUCKa zV5L}e_Y7*wBKMNW!n|N}a$IIZ#q(3?{_}%4s=I`K6wRbe!#0fl`Ur3EJ-f^g*HGGy z!92-#QR-C))b&5W!j2Dvj(mRnI?)eSy{bdsUqA4Y<^VFBuPnL-Jiv&&4AA-!2T|cW z(DNY^N*_5e`Ax^g%=99`@bsXpjjAr$nMne)ZpyaKbNCppC$Q1{aDvFbo zgp3hSbf3jLb@rl9QY^@+PNlWN3f8492lpleWPY58H+njl;}>b_7n=&|aW8ONPZ&(l z9|rH;4S0`rEUbE;Mx+1Dr{)QBMEts_B%?j+@Aw(e(Vj8OeOT z$hz&<+#jWDcr*LIIx0v!SarnS+)! zsEzJK`QSZ#?|Bg0_aPG00-|WxuUq(T*=9EN_$fY59zq{D4_|6mwPGIJt@Jo?#@5kUNH@k7MYOYA(yMFJbq?+-S1GZ}c!RfZ4HiO=}DjuqfXT!iM!D z$CuX3W=w~0Jf{=Kf+@+z?f(v-(kzZz79v zDW>1ga>2j81Qy?sh$l4?C8x3!X{`TCwrK7fG=8a$<^>3JsZoGy5u%p$SYmBwUh zgWnffHge7k*yL@1Pd6JwS-U!|{$~jx2WP;Evxs@O%;CoRN1Vw>z zL(=S2`+ zyNms_bAzTAEo^g4DEvxvV$-jC(7b!aQ2x-2&6{OLZ$rzOk-7|Y?9_wfU(86b{=|%i zk`P&w%5p!4((Ehym|V~_NS(e2FElZ_!QZvo1${-$2`SW5lR!l)`JBbsfvgAg=B)5% z%;c03oS26A&trw;rl&PbT3p%mA~1-$R>`9FOj~H~QsW-d4|vNxn!e20D4AQMLr-+W z;a<)p_;;!im))&In>oFyE8JfAEE5U|XF2QX-~@8Wx1;*^}5j;A?{lT33Jym-YdAry) z#W%Ups-?qW>y^=z7aNTg-8S^x;RgfrFgUP+XGAMfV7R{`SUEppW+#8MEnWYy&bw+b zZi+iquhxR{Nul`Ru?qzM6j_GoPR^N%kSSlxIv)9>#!Y|F92+94yZ&If&;?^3b9O;= zHM{dZ44TVBY0KS7)Yj!gb-5`N`En&Ya9Ir|wy5EOC6!EKohv*&Wkt_M4kgV@dCn(Q zqPr8K$@jl0bcRX8nuY>Ul?1`|hcEG6fdgG@83^xc3h9A=KWa=YqzzT8@c6U!qIYbr zp!2;<$Z0235*@^@=T30xYygE`>B2<*+gkT94?gi3XSm)ry!vf1PMXw*gt~i-JqZR= zZy(rH@SF`8bcl`JnGP>c6p-zJDKJec8zz~(#g)4vY5UxB?34K*Rz0BsAM6+dZ-d4| zoJSEWa>$|2a?8a@%4_gS-4Ti7TO}$i>l80Zf+hA&ia7J*Q8s1nX?E^_FP*up1CLz# zK=_?-*!3-&ZJgcz8nIDpVkQ}kBnhP&1O6n!ZX5SPH_M4 zJPQrbpfNWl!0?blm|N-z+uYMgXKimvYP-$OZU|>a&L7y$SA4(QFqa0p?qZ=a#Spc&xkJkmTQ?r9f`&rQlM^7@4vgZ3pqlvx9BTMahZ0FvWOy97QY5mlv z|I$5J(TO3Dn-N9#VHnkEU16IZO@yqh(Sl92GtRwX4Uz?$*lg!LxTsGJ&X;|Nof*Fb zw>!Q}@*y2`O%!18e@eJbb2d|xoEML;&IHSy0raZjFqU?ClJVdVxKX#09hGilzaJhF zzXq+sPfhnQD)%8C8Z{UW6fS3#<>i>~tpyq0C-Iuf8CKe+K>yT&VMNhP$ZczGI`MN1 zEW13ATCO5Xne!P%k0R=->IdIHPNIaf^h?Cd_n$(r{VYlN~}?e>ul?=tm}BE+NYU=a_D0AVnM86;J;d$v?+sO%wc7C~}Jz zJC(@&BYZD?*Cr3T-&=`Z-|9s}^rM)IlQrjh@5jF3Cxm(HlwtJM`S>I|6pq~)4_P&n zX~s#;l(_2v?TtqG=gCBL-E#z2lv~hR<$>TTKY-qCw4`2h{UD>NmoRv|G}(Xkg7tiF zdcxTC|N-iyal>#0BDmOEpyGS#k78fE9)$K0AoMLHu{#A++XV1cN^%QtIY&R>s6p2F`I)t4mIuK^@ z9V?WL$>f&5hIibYCd9JaOVkO?qs1={m00~`3cU$vWDA`pQ-(bM?gS2@JIkusS)Vl4{W6}i zRkdN-$rAX%J% zjUep-(eT7E1UElTr;Nrd%DX(CMvY0MXfFxvdw-F0-ue4Uw@X;AoIt6I70JyZ5FBh@ zW7fKbs3R=})6bs4{>!x>{PQ!;Dof`xw7D4msuou?$-pngK-j$|3FBk!!2XRhR5;h+ zfxU4k-RL3iSO@S@kFa^%S$62)FnIe^8cU3L4`j@Lcsx=>?Su)mO^=^*2N_fEw~5rB z`<`+-=CbrAlC&JxrF!`x)a51vaTZ(9?b{%{&3y~$Wus7SZ#Ml``4&4;qVe7PDP(1E zh#d^{XO~LuA}{vQ+$H&-)1<(Afs4er&}{HLRzx%GisK}H#nnuxWX|(j_COm658hjT`rStR`6V|>LdsRy)n9nF9R7&8Z`(iu6FT*y@Qkt&EmARs$3~H$og4)BEuLYYmRR~0^c9u z)w9{w`Rm#4^-Y3ZSuWjS=Y^u&cUb)8KANqXK|LxS6t!V0d8zO_+S4qE8JtOHMrwd# z<2}4*J|65pt;1KlS}}^BQL23W>2mFPc1Xh{u22bue{cSUWsq`NTsy+Wb=Y=?mqi>+KE!4B6#mu0bgf+5L_*PvGHjiF#7f$)Vu$nX^_2$-Ki5`Wzr0Z zeccqnaHI~H8}AlZPECfPoVill5XzQY&xFz*f9y(fgX@{HWU8J)r7J4&RO)BepEKJc z268@gq6v9Kwu{}<-SOJn{&@OEA6j(Vh!)SB0?F;t@Tu_xD{<$p)cb3hIdVT=#$@WP z=LadDPvD7-zN~q#51XPG3iI|2YVs~VDy}OTfIHG1;K>vnQKsbqbMuy`VWWmZMxP0E zWgBPUa+Xwm;Y!RLAi$wG1&X-n3~%jAKyOViIC08^mdQ;b-I_SD!G9uK-I?$5Zft zUbK8=0A-L8g;uw)jpvHs@V7^-dBG$6^e;;|@E!&EMX|JcWH!B>G?{J}8bHo*b9NIe9h)k7@MUt zl|CKL5LXTu$*vyc9`Aw*EDt%&u7qgNl_g_&E^a{u#>f_IxQ4G*xv)RV@vv%1JSjgt zih3WpCuX69%A0*4d=c+yn=WMIIO8+cVLGje<+=I;25+5jvFE0oqcYfwg-AxzVTY?s z_pc1Q?@$E6^Fz<$Ltbg+*$ z8;jnw@wL#-2TRbQS&juVh}|07 zHQ!S~i#{n}ocx>Rt{(#N+;g_wa^O$r8zFR%JA2!;i&;EzV*hUW(*=bB z@|-C{^Jf2I4tx7T%2-u+)wU5wnF`{Yy+*={Av$oLJ6N?quBrGzDvYjwAbKvDMpf_c zW0qbfskQ-)=!~XsdcA3{k}rVObsV3r4fW4I;w$F}Jh9uH+&Z=3%-b0hOWa{u+sY1C z?O;n1pD_h}eG=m{C^T|5>lLM+qyvGgwa93}{ju2km*P@Mz#`QNt#e#aO$5?!hWHb*vIdeLKYFeUJhnpeO^t-G_6sW+l^S=`1p}t3{`45%6ETH2yY`gUN9z(DExrC^)%|`Iu}L ze^02wD^lKY-%Af1N>;IP9(okDr$0K#je;4MT;LRz3CC3^%&i|SF`EeI+W)X}^ETW%z(BB_8p>YGEP(8}Q|QD8V;1mN z5v!cJ2RY-lWVWL_$gF?EuGU_{VD(VeUK5LHXKvu9S~W1u^`+ZOs_^?s?v^SoA*(gj z%-SrCJMFe$&6|h#&}IyMvMeFx4KtbY(ROB8ISZqOU*gRR+r;p%Be_HQ5}VSQ1ddAz zY439b+QeN}*Sc@9cx6+Xf4W|@(jUftR(hi0&l9YF_F^_oVJxtL-r&QZ4|DRv1nC#| zF}HmuQ(F#Hb>4{0o0`R*Fi$eV(RFWI$73$`xi6+U>Q1-e6ZsC7(-Xb1j+1sLRnz_d3DS*ON+i#Z#MO7Tnu96byJ~7GAa+CBdy2s-*%J zfy3eY0W+9VIgxse(t{0iHsF_%MDWSWr0wRu)VJOdvc1+Y-|i{UE)xf}wS`!I*Oa?j z2H>HxbeOpY;MA^Blr@JlJOc;O7M(g)oY;aX+nU+$k)@zmYR}nen^^^C+n@P1jbuj@ zKnUMY$Xstt$)}WIL8LkRGKps}4+3D5PaFHAZB1z^512v3N^FUY@xd2(#W%^%Evrv=XTG;6A=pH5M+RwR~hXZxPJF}DOi@?32};XV#v z_Flvp5f8Cd!r39um$8n`RpRw6yVz2Vn=F-QaZ|1}GV7PG#p^cyRN2pnr4@M7Z?7t5 zmKaFAUd2#34?HuDC!eyRv7dyv3_d?kHU(3& z7B;(|BmA{fCA|Zo;60(0?Uio8)H{vD>KkBjQ zVgphDs;VuCO}ofmd~ks!8>C=s>}sZPmv=w@`zfC78^IpV=wc^t%i@~%yr;O)ldhaO z%>>P>Os3PA+NSJgHix~rJGxAC9>jCrnHPo28{EW-Jte4_&;5f6cZ5bu-g7?g!KR+- z&t^5G!uz9&@K&k6DZ}FuYYhneZ8|ff7WEA%yDZ;a%MljN|Cpx zz$K>(nDMv?r5jFRnxQo~IT*w9++xs~7y@Gohtas6L|ECS2zNLB5H7n9pqAeeZ~+vc zm){GNc(_9HpIEf}*^4&R^~KRry5ulDm~D{q#NVc_u;t80HfFOnJ^HQ-PcCvse~AX= zEMxRk_BC6vb`(_bU3L#U0}O0Yq}@v=P@gMTgv&=nrdYgIWLks4Q7RlLWB~gcUnwjL z(}4#Iudx5d%!J0c3~EzZBY`6dEJtl7{Jp&bb2zg$Vv#;etd3!VlsRa&{ASBvbxDHq z?s3$7pdZ=B2~`tjdX zeK`B*fRn(qYAg~tZiKB*D;!7IFN>h+rpoViY)%RCB!VY z#Bskih=;Bj;&-XlSa2o=OnY}prfsx<*)oe+@A3PvG-H&IGiVem*v6yTYlO_B1pp;$n8}I=STSq_*+1FAl&9k}CEdB4zN+$dfQ8wQuQm%{MBT7vlMq|mJ34sLNnU~uth_%G`l zbG6Zh8E@QaSn&&XYP>6LX((fgebnhybv`A#ox!Td^>}x)8yGCLm8_fk7wJ67_H6J};q&cfQ{VaTcgm}_>tac74WZ9I^Ik(cXGSJ#-_ zo8Pm@yYcAoDi~kaL{poP0BiS!uxkhEFk3Z_+&_* zlmWenF*JW_2GqV7$->vkFvmJg{M~wlsh+by#RJn}`s+a;+Z;^?d53!9qz1MvPnwz+ zo1mg(2;H(N0441N$dnrghp(s+j+w`jv!;uUF>#P!sn48yl3>ZcQ)1;D{_LneF4kA@ zENb^14C5V(8)qZv-my7o7#Aa4?6-{h9_&X_5hEZ}(}%QwTfk*@v}t;fHdOrNF5UCW zB+EbVMWMZ5ZI(0d{BCBlw@caXL20n*+Fn7u#R2TRhJf(Hn9iKOg!lC(l97KOq4akM zEzt9!($pHXcDu@~Bloi3SGb$)iz+M*Ze`4l=Yrogn|wUaGjH8=3iGwc`#P_g$5U@u z8CuHk;JI|J(j9JoIL7{d6>-=0m*{=_A3oLj!M4u1EzBr;E6)GN{d=l#Bt&+_NAy3(%~ov3?m5Y+x0P3pI8py*u$IbXTVj(rQHcSo#1W%qLScLtxOZW|5B z7bAs%-f?j6jz0a=Imfmha-#1wZ!t135^RnZgNKV1J6}*Q#B~oPRE=W}m0~gr@62DAFNyBGhWPUXFqTa^i z@oBsdZbMc%8n7<3LU=c_0qxuj(7}syIwyw%9#sk#PE}*n!x%d7b2#imOUbQ1 zoI~?G7q@g?XEWkPaMpPTM);1R`j{k&lXGN&54%Opv`O^%;CgYtv^JgVw@t7eRtT?` zdXnCsayI8x4tK=G;`spy!i%;*`~%;xGsTB`2So7h-dARz90L3`fed%PI0^A;Iuu*HhN*c!IW}jvCoTV@3egi3+0F+)vE+<1=)SnaY6j24f|K0i z==6Z~dHjOu@#pvumja3{E2O&@-N;kQ62mOg$fI;5txL>fsWZzE> zLm3NOq5PQx93NYcFX#29;KjVBA(5u1d3Lm5UnZSWaE7^Kr;AdbTLtrpzMz`ci9K)Q z*`0MEtY>i{y=Yj=)=PTWdto{HTGTYnjd_cmZhwXB(G~2}O-*VvA)G%X5=%~d6786Y zS$ET6%Ir2_@}S?sr=2bANyQiOnD7NFNA!bCm(7CuHy`@xA_u-oD};Vg!LW-|scq{e zVRBhC4cK>vJuy5hs02IWjgDV9edsRcXf%~{mY-t@YdI(ONH4lqtOshk%|iPHRnTI3 zn4We@ILp~wRm(ODDhu{v%lIq86+L}+EV4h$(O$$f)~#iIH;trU+g7rcLO05)Dr0L6 zHAusJIJH()u&Oh=vFp!v3^(znXHHWo-`AL2=N-n$=T~6lw39d~A|ES7FPc|xPNv!B z6d4o_8ds%g%R?s!-|tP$rn2y;MN0G;uwUYkw+L}B_qQmoXE`gzLa_04(inVLvhlDx z`?`4}bCp+u2TQ*)ulNA+{ooHf)3|HiR22+r6Nn~?LZ2UvV#IR^ST5NrwEZk)#<%P! zIa`W!X0OIM|21Iwo$KggtqZ^8^O)4KRPd`r*EdJ4K=t<%d=xbbMx<>KB(t)i{GSc! zKOIV`d#q^tf76>z3{w;CezT(U+uB4czjI8ZvmdA(?FZM=t>L6$gGA}WCtP(qiu{xY z3r^vS*yOom+4IOPtkY&W9;=dvndf=0{m3x-{Adcp#a9r4j?{@+3BWQ$J|vxq-u5hS8KUaVznVLm-u-kkjr18yG@H>|iXESzx! zzoh@gFFJKQ-d(gxoj1C@p-n4M|epY_fmgZ`V!`(&} zboyGn(EZ#U6h7P&Z(e`dsX0`yX9kKl1HCZ5d}upC8O9$`3ulD%jWLM7nlX5MgxzgtHNRX-0h6PX}*sZzvBTF0UN(vMi0w+z2F$b$G&hf??LWyc%_!8&~r z1GhGdf1hMhxbsxz-OmgbD6SDhf)=9N_f|apkh9^_Ip?`*3i)akLa+7DQET>HW}f?! zZ3$nA?!&obMb`rpe)}@p-?H#A*B6RYWoT<~0-U`zQm}TiBozfN}En(Lqc{6eMB^+?%1H#R=bFX%w8s_zv5Y&f-ZG!zl0NKmJl6SEfoBmL7!4au@2*3!o`un^x}~s zn0*U^{4rx#-n8+cP@D$MQ#azEf|1ms%b8E+(iD@ox5;;rfw*1dOu>tKLWOZ59bV4& zM`K)?vW@h`;C~@-aIqG3CH=*Ub>?us{vF#>@Rgaw$}q1V2DG6m6L)Sg1S8&~{g z-DnlshbzFGf=XPp139scm{Smc{p z7CbIhFo}txe^1un`n?00{XScGKH8Y=whC{``!4-A3ne9nI5(-<1|xRDw3+0I{3_iR@4Je?M;84R_f&SIb7 zKlmj&kF>leLDj@h!u!>wkmoa)l=sL|>0&?pz2p^2jq;(qwcThw=AL-er!Or`s}>v} z5vsNea71?`%`WnVNS6RQsM{zm@m$yBJA5#HuaOqI2EN3g{64@!x**})v?hK+~Qa$YJ(_D(=e1#Hu6}H#4QSv*)nf{;(zkjz^6aNOUVO7#ndwx*}O)~ ze6fIiDIJP$<7czP=Z{1OSt$rL&Z2;MKQSgx3NFl0q#ese#%ll7v zhTBm8IlXb9wjO<3;zB#NtJ1B3TbWZH_hp|yi0KWyvoM^y!oQio)$H#=@$Q)rB`Z(H z7n9g&VLVJ|t7SXAw7_vy9G!l<2-kUy1D~6H*pRerI5M(;xurx1E9Z*Lef_*9zm*Dj zLr)4O1@(tZ-S3(*jc>6}WB222C*EP6Je|7YBcU$co!)qDjYQqgh7qAu=Mf| zp+)(sQ2bV%j?cJ-YTr0$$vEpYxpaIqx^=se04D_=~JhfeX)O$g`tL!K{C+ zBdPp9_d9@V;b$=wHh*G z-7w0t2pwYvL4a%`p3J|&)?WtP(wWHaye-CC6$O}mDhHn??PHy$OmONyi8?!C=s?l~ zmKc|VOWq!0F0Y<2DHS#R^V@>*cOC)5!4l-sUdIY@q(EH#gnjRECz;27=sx&6)AQOW zIDPLCRgWj)&HOIduy6*GbMr;HI2Y9W?1+X}jOf^)C*l*SRdB;(IPbTZ;F_=yn&+Vb z-Aofl>YG!TOh2~ZW)@@K#eVhU?69M+h4wWgNms_0a&rTzG9{mG!GA3QskLyh z`4FpshwRC(82Z5XgDZyq65OBgc}4e0QDVD3?!W8^>sAaIS8GMmKulF;tVJM%+g!Tw_tNbzXZwlQ@pN6yV ztY&+UaXwpDJuBCljx%a@v)_(>bgoYyOdl{2KW;qEHcN$3)6y#D4> z&kE0cU_f`}l_{|ABbZ|Ai(`&z)9o*lQ1!z#p>~2UjT{`0EhamldXWQpo}Pe;SK7p} zgSnS9JCq_tbt><6r_C=~;gWj-=5dAPoj!Khom~PeJa4kMUpB!nJp<}>y9-+Lg7LUX zhj?K}DynNl(AGyjG-UD$7H21ced?_+ew-uDvCPFU7Y&8Hh>n(`A*z5?ovumNg#nMieX}DzN;jm|Y4@1v6e+=IwWpBY9)K!IoRR^53b~)~ygk`n_rdleZG3Y(ge)FeQu1L5 zwz@`v4qT4Md(mU5%ioJ)CTZeZJ!jGhRmI8!RYKhWC0Oh^4EI010{Lz#@avS1m~1=^ z;*B1+Xof6;xswxUtfUQOZw#fECqih2vJr(Qeq&A+j> zS~PjKBfOF-7gzY{iMf-Ka9+0!?WuM~SmO+FBh2Aqr-bOZ+7;D`^lbjOf@9W3QQ zF483@^xEW&>4)r5@m3P{)9(Tw^?yQMT^{>V+`+7z2UEj7BV6-_=bfx)&>T}4Dw5WL zS7yBsFolP+m9=S!S`hv{U&O}0SkD>*4asjqGG%4(H*e)iVRC6a*2>+3o%9imK5)+9 z4S&ihGNeVF{itM24z2F$&n|d*GvzrA@aCF|a8Stj!;j+lFLN@^K3~w^{;2 zO230+eW&{e|1{e0{iv`vM3bpi`B2&;ee7KG4n__ffX|$j0j$O`&69rMP`OR?8t@bH zs*5r1ZU8&`-5o2H?P+gKF>ZhRg!MaS-x8E#iN_~ifM}V|@MXOX)-CnKtXfx~v_N-* zg_CIQy-wkQ*bnmr`Idj9OE66_pE{37;!33ztp2n<4t}P^Hf*R6tuxBmyd^Wxwbuu{ z-!hI;(3NiNk%yXLw&**~p7k|Obl-O>9KUNA;NuTSG2bIF>ER$c=Y9edcrPA*48&AZ z#@RfE)ONd${rzGtK7Lvuc!_dA?Y6YYECc`Xp7ZN}Z<&S3dUv@u-ywbNRZ!cYhBFni z&~I5^I6>T(y%!RCeHrl*}CH&;WPlDv%_^)g|W<@s#=l;uqM>;q`|=1%d` z`jTJSM5ejNfGX^jDPpgScy7pa@@XH=oo5j^>4X+p9(n>>uiX>nZ)Rfetx_S_Z!le$ zD}g>%J>uPonRx4C2^Gfehfg{ZbReM!XVvNAPcvWmA!mSJuig|acelgzxHI7KpPtY? z{}HGs=TPGG6nL#7%^o(7pvmU%nDJM8&P}qV4Ku>oqh~K$@?>U$O`#-hGwnyy*WYI5 zZK3pap&K4y+gglk&oJelS@h~$8uag;1E1zv(~3_6FtX1Z_s3J*srl^$icL?)!9KE- zxs&gTJ^LxVTw%_>c3Yy2k34o8Dq~9y!u5TdSP=J1pO3J|5T0xLW?FWl|L07a%=aAs z-Wv!(W0J+h+xjToWI{u2b6b>(mC$vM1d3B0vDtCe?E3X}oc-m1gO^l@L~O5mdAiXJ=mv5-~yQ1vZ}?kNvPoo(}2QSx$F9K(BY z--Dp4ra%03kHPI4*FdS>o1I%!$viha6ucgDF2hbI-2Bpv*1!DChDKBi2ZDX^sdPCs zoa^86uvG^&hmWSy$qI0&UWwH$mZ#f1Gxz(_Oa6Cr$JJv?>A>$V&>gBpG0J+v*pE}G zIv|_Q#+?vldk3IN!ZYFSi7Nv5&SxfDb6U*AL(GahHEzG&1D_JoXcB+l%y@Zr%0_~kNyBCO4vJ}`%F7+2#$5o#4OGr9rj=<&b-hMeU|CrO}!oLPI5nN z4H%1qLiezI`1dbF>uhQ*riU`c;Rp=>+n zI-WTQ-&Rc0}h5T##QN zeyhl!KYJFy1b;jFa9Evfc8oaoP>1Je!GEFJ; zOfbafLGJX=N0ZiYr)u>SAJoYhOCk2EG-X^Y_&9pf!L3tC<>EaC?Z)gB=fdn&lP4eR zK)mCcO4l#XMA|Bg?SWTedVDy&Z;W9YPEOdS^^f({mc`UP4$&JSqM?D?ZQP4snBm zDD03AsjTN&I?ghw=lrUKlOCWRxsQ!KwGg_cvxMGwT~b$^AnJX~!b4XE(L@uzW8dsV z?-y&(>!tfaddw`0Uv5gR`1n>Pts95AvHj6{rUtF5O<;0KE5IawAB*9vmPDJ0?1-cl_Eyir3lF|RiGe1p zH|M#hmp)Xe&mEi<4j8%Ek<6q5>37w1@bB?M(ML+gNei<-f7{8g~M>tod0`p4e?3K9ii}V9DT;?V&n1x9Q()<%bj_TbYu|x+G9s0 zCU03?(sxU<5kd;NOY~qrtfLI!t!5#ZebkglCIw&~U~b=CUsyy@s2j zp-mu7kADWVasrD{J^+2^3#`IF3@`esVoGZ}o3L>ptX8N=Jg@|2w_UH}VL9bul2|6{XT|3TQh1l%$*gH&2;SXhV?oK*Cp z_??rnI_omiPub35FWJz`_5N^Fj=QzS^)kDXd2s3T7+86&4eqZWLHg%)J=#W0gZqv> z!q1%Xq;qK)4O}&WnQhp^()%f(Y+fN|TCZh`I{t_+U#4Ps&rxO=6Nq&t4bX0Pu{p|p z5UjDvM8Au^FmbO1{@fNxZ5@(C=d`dQ*&Up#G{l0tlBg~@m6da^RxTTi={LC3W12M; zuF<963*>Rsk|eZ?ftDSA6sV=21v|J|k!SiXK*?E^_8$j2yp8j>+xEfn<8t_t6C3Q8 zYm=Y4J5JxUN9@_hGoZ6eaO#Jx>_o2K(p&Ind;#fJ36#6= zFwAO@@DlleteD?9Mi! znHKHM{#})jozpFBi&UW#eG=*Sh9HbGki}^RQ^_MdijEv&qRG7$_F?G~NdH+#mGXC4 zOMx#g;qI?zx$Bt8Fn$iYjll?)JCHctlRgHgkmK!COsuh{xwR6gb}5UBRLVS{&L0op zU|P2)fs)pRVQH)*B@dCML9=ZH!PkbQW3IC7Ivq;vw+dcHD569<=PMpv3IVR1^>(cZ z`YIYy$&4p#<>wxc!AGQMlg%QQm^zTAZ-`}CM;D(6Tw{+@a{MW1=mu8$BL^$4A>D8r z2Bn$tSTvVsO|~y~&kE=Q|JISBjNMCdSIIt5iF(9tu||;G`C9z?YZUe|orX<`*Vuud z@_ZiVjnDioNh)yyKAx1rUVBSYg7ZG+J^2>6{gA}9AGmX9tP-Y5^B!iPFKeIk&SPs{ zEdI1K!)bn;G3*yfBP6m(^PD^yuU{>s?k*(RL5ZZ)o((-#Bk-Zv3f`C1spiW?s4+{! z_IZjpT%sQ(9v9(UWh@(8;mG=(b*7>F)#$(Q%fg6(GfD3#pYKPGCb@KN3jb08Dnnv$ zrD7?Li1Kgow!JQV$W^7Z*LMUTp5cu=@)jPe?S%gZadva21{PNXE?dVr<>#kT@O^KL zem0yb8%tw-h%evs9YdQqcf zJGYftPFNwH2px_lS)mx1c@sR#&w-_~4vt);fQCx-uxG3sYqv6|Lxus=(xOd&rTW4V z+Zm8Asf+8Uo)*792o}=#e&jmMUNCu~Pq+0Hv3%NQF?wYRUT+zMb+rtNqE*3t?cXt?@ z|I$4)I{_V4L(t>04o)AKDfS^3>YT*gKB+ZqUiCQ`%Xro*O^=?2q~V>SA*eKKG;72n z3c_CI6UjY|`rHAaxDC?z46wP-4u931gE5v?G`d_8O4=i##r&3dw00yEx7yL%v%~Pr z16!u?IgQ>-44}RRd5~|K!Ych5L33sj{q>p+GwojqTkh0&_^SV4wI52^bk3>!V*3Jq z4LB_59c|`*y~}W^F^4v;?qQO&I@urHhfG7YLU=zej7&8@ z!eNEsR1^A>olI8bXO|5<)r}?Py(aXmZw0Ip`J6Fe3@J^|g2Z9c&Bji{X{AIln~?Ds z8ulpD1NkV3nV`)w{rmCVOk>(1--l#Z+tIOx@2tr!5@%IT$KHRRg`C+RA#|z^-rt-{ z8Ag|2@=IWm|Ak_~76*1nViws*{(|S{B)SiK(Ei-Y? zfDvr>0DfoJ&?EOOCH!g-gZE6XfYHs9%^YUvoQ8nv zu2weZRDt+zP#T6>#F4z^G&Z3+jx-X^vd+h&nR2fiGy7;v2Tl*c_k1sN^>W?|yJ!S? zT2eIYL zW-uFc$s=*7wHTjgF8t;@9&ay4QS7M%s_XE>{lD_jZsQzq(9MD_4=a4ByhS)YE);VU zx3F73i^x|`nS2@|NPTiSyA>Ht(IiiyJrbysqKI=0;;AKjI7;dC0$Y&lZ*~`)M;0(o5#ARUg9EiGg>u%Fy6|1 zE<_|dp-%T5u|M}ardcK8tvBi7M!PchtPLp~mPf1UNeZzQIj{sO0m z&IM;Y42z}q@jIbD%d(6jvz8l7KirRLPp~1W26>uSU`E@&SfI(~Lj2p2jmJmmKz(2s z*&NKJnyAC#y{^}6NT4o7H~oR=GhrBe)E!^2Lfkz6KjC|P4*k<}!v~IIaK=W?n7-^r z%lUV|H+TgkI2Y4H{c<6_`I}JQdy&b><+4z%FkH}@jEjEj!=WfutWM><=Z`2VJmB-q zZ2~Qp3x`=-Y+0jVg?CKD@zci~yyn4sc2t0_Ef?Xk*B&P+s5x1(RX;<0=J zpG-;LV@%Z_`d8Jn(i{c$c4i-1m?4edCOm-aMR+|aCA%RghP^- zSV5N}eYVXao24aq>!FMIqp7!L%a1}D<@=T;%T9pdfzh~M>Jj8T(Z%OBuFUgmIyy8? z_t^T$0;Gnjz@aKtvqfTv1bt0+#Hj@R5eLMP!`R>i5vEZ*( z!-+lHc!mADnglB*4T4MGIWCOuvWI-;sxz;cHQ0{EerKoPzeQJ>aI}{dI|3Z%9-8&j zJ_sta8=-h^CDbNm(xRgha5g)wrD&Qwc%`Yr9Np>ESG2%Mv))5;uQXoe@5Qa+>gJB9 zY;+C#&ca4#V58r7lx|R`A%~q=O7UC}rU;m)JWt#|E05Ipp4Z1si28k9NxiU*d#k*0 zNWm(YYVeP33je{9XD?&lwr1k`m5Dewje9Saw?WG@doq4Akv@Db0{w`3`0_#;oxkd0 z?Fl!MTmKX?dG6XnZUt05nnihjW@H<)Rh)HiIF=bqWqrjZaAC7DE!-w680Vg3%cFAH zy1Zwi*_}*qju}rev9|1Q@(`-7-qfPF0}qVvZBZ{QL~7 zyG_t7vzYrHS3!WM0@|#t7C-*(3n3pDf(iGPDE1U$r~67)^R>9eVbm{ZJ?W3B-D^Ns zmb0sl3pCf?k4e4DAdiWgK$G(ipRF2;`|Vw@@YVMgMV_C^^oXQMMp^98wjnfXLpHmv zrGT#ulF)POMi$|m&Nev?!U5bD5tLU0(rXqoD_*UY>!n~6=NE6%Qzh5OBrY~KAfFLI zH2la8NRW`ho&6579OFfz)B)~yC>agD=l=?pYSXzpCj}cK^I`Kc1$68>+tQ+=1J*19 zv>K;F`5S&0ncg4Wy$iTgHHo<=>AMUHo3?+2CQpiOpkmS=(N7h7QQqby8K zRihF^#L!!cyiXiQGGWVvQlmd?@^u|7PK&`w+Iz)WuS4nlu3-AS+KVQ<9!q=OCbDg# z^jIPP`5v-*?D?N&xD_mq6>o#kKVlNd6dTdLmIy318cu7g#?p+~8Yo_TSX3>p1=mYc zNb*{%P}g~f%^X?@nR7ib)7b_4dD^4KToPJipSzdnrBZ(NT9#FzPudnc*{7a(N_}p` zo@%Fy5kK9q+?MxKWffU;rY$8j8ADuBlz57%xerU&>2dKEVB4ZZROC#QqovxUS!&Pk zm5#W3aya#U!0WEzsq|ub8YNCL#MU2Iz$D9u#-y)g=ILo{n*0_PY_CAGPR7vEWm*&) zp^wSwBjJqSAO23@uhGj2jgl@evI2a?U^%#>Sx*b#e1 za4}!Y4zum7ZFw7(bM7@SdQT}Qa!Qr zcgB&lybR}pC}5wI1?WS%kNE6$1C z(aPHFyJ4k67=1D>ASJbfu>Tb@mw!GuVR(PKkx)z~yZvbTxLqtYCLYIYVw3hTI{ zh-bbtc&IppZJ&A-oH?WKh~a1&cW^2ArAwltr8^u z=y+QYb(}PDjn*`ZN&CfoZU_{3dIIS!c?MR}Qn=qsoBk_%AkIR8?a$rAYL}|f`u#&- z<*pcdQ6G&>d{^~fP$T^PkV#scQEQyO10pM>adiN9D|C+($3Dp;m9M^J^5+s8^Wq)j zmU6OObrmE&7Gn74%S`xqyTvVCpDi#>q@r9?OjO$__H_28#Xg~6`TI8OP1nM0rxjW8 z&up}lJS!x$C1J{D71php$DeV1+TNuo^mN;x|8i~iW~~A0Ej}ZjIH=0&z^`n0a-Q(; zPyuB;)1&m-9_HJ1jRhnXVBSt;G<-8#a9tD39u3~i5?yWb<$+M(8>d1n#0zA*6k zsy(CGF(4eXKS+@O0xe3+9LI`FRnTdbJIoz#K#wOUv-3s2q55$iJsXfjGH0d5>r5a` z*x_-u!5PaMw!)Li(&)SHlDpvjNi6zzhbb)>!$yr8!S;qDY^Wbd>H}`V!6)M=`kW0G zYY;4&EKBdX3vl%EPtZA|FV%ckz~a>xg>*aa^oZ(Z$wiU0bEH56VkV<(oh8%QHx55} z@l5xGG~xM$0*swF5YAaNFw1pWXg*H~_td>%%TAQwljS2Z#*pufzT3Wnywmu1_Sb68@&(|&0Xl%>_9xzIF9*CtK;o0yTriO<1BT%fN&xYuSNJU zD~sz;S1?y>32YVAy+)JAxk5Di>V_}&ma%bnZZzMX@9&o65l)@bk)*-1x*If=L7Vib zWJnl0ACrYwcjQ8b{Vw+C_9M1@@h|wdz>0>(X0jEh3vq~&H7tA$=y;_QHoa1zi!#5( zD)UN^&8lVX4d>YBiY)fjVIx~QY&4p;%i!hNDVTB26-;R~4fIvPdfA;!Lw6&bxW{J+ z=cH(x=d3E1z@O7_M3B#W^vhZdQUctz?L8}M2JO?qpaElOU4T(}V) z@!iA@Jef%&3yjzvE6zMpi9hkhVGwPcE5Oj;Ei78=GOOrZCY%n9gex;|K^4FEOO)hb z?lR8C?{ggl88FB;oNvyijj#cgM#n1AmqoO6A@ z`;M#q;r-3+?C09YY%SBH?M=Hxt&RxndAI;pb&bcBFGk>}?R6}4#4{oC+6-K+nnsh8 z$D&lz8zD${l{ovyFfo+pQ0BGUkioui{5g9kY#dDF;{*AO$Pl>9S% zplm_3d(pT+daP%Fo@ZpiWLlRnKfw(rC7IBlTRY%ing?Fy_uAd9M?BK@uV8E3ZbQ_Q z8aDUxAdIiE!((-d^zubLoBP-mIxDs@yJ^z&{j7tevvoLyR3@l0hz45Oc6&lPNjkO)_~T3vtf*;5jJfcjiCcvsoFe& zIyWZJuE-zIn!a)TUR@y%q5KWedfopv}gMi=gJhPJom6n;jpV27_35^98F@W2pyKi7*k zyE@T>R0}$4HGi_K0=~alV?>Hsev}=WNy57*x;`U} zGUm9^4*ML6wsgZlPZe}^)}q6Dqdnd%bmV>0Fm_w-0n788h4!UO*nkd0%xRaSoj>Dn z#3)bt^YTSYK;dYt;LfDU*8^}}M+%KKQe>-cm%;7UEXuiCC1#IK#jQ8S(6A*t*~M^m z{*L^|{Cw}R70ITk)>}%2;W@aw>kw3pjHRqm(Re81D4e`;of#C#VqE_#%;1DB#2lQC zVeZbf=2(WX@U=FCXv*MkH5qa0k4ZFfyNv_kmrEon6ZU_O`Ot2j^kT2rE)EnJU=d4#t5mUV(X6EImHAhkgHlh9pE! z{Ou}1v5gY=yYVD<2;G2?^+hd_nK>+d*JH3YT?Q{UE&@&dj4dR0@{=@x)V;l|c~6tL zB>pBWJMu(0^V$keb2p;wCmp)9R~GXQrng+XkW5~F+{v`1L`a`BgMwi}?6Yc9fDAs$vaQn0i zp7sqyX~`67yI1C+y~+_b+*%Dz*LA>GJ%c&L)HBsrj`-)wCpPA?C(g~z!oOSpfI>d6 zIbzf3fI&5EdJqfpx1$l?8N-E5oDFv)4~5zWI6Kmq?BWhH1qFBP_oW$X8>Q%J&KV(g z*FGluC6fNk*22l_5;6DoaklWO$l@dFnbJXT*u0fH8T;kae_!{*FP+oyr!k(cbnS2H zbyCD+2^X?Fa2N7o4M=|QGR|55BtF)w6lVwnaBcq~Xgzy6X#MCzr`n}3daD^J%yuQy z&jwVToCT#t=GdodB1^B9!*dNMn3stgo-Yr?6Q**c(_de_ZZwz8C^2Jt-Fo=#(?Mq5 z6-cHLqcMlyMgO|=VKRv!^nA>CtSvAh=lPtIZRAF7y1>wrzBAoyBo_IgXAAJ69VvT*bv5P*gB&GIV)U?vaxg}PDj>y?zm7_7{ zW+-*k{u7t#c;h>-lkhM?2HYwZu-yh_@bceq&iK_qT>_XBr^~gvaADMCsAT#Dn|eut4t*%=b4VwKe^i&jKBMT$4t= z5A|8WCmS@~v`qZhdKDc^zt1y0n7ky7PV+;SSgBYWYFa>?7#Ci_|SgnOu>7}slfj*8rrAomsJ!s=3 z&MhyrW{Rt_sL#X6nA2Z^hI(_}-2xLl>DCW-{Eo%J8{3%so=F(r{1|rVsN>lRy_Som z^+Mu-4bb7(kDMwwciQ?0dy?JBb52W`b<8#hEwjUhrERRKGL{yF4;6QaY}Vgn-hqeM_1Rruxm%WsVUyVWUmX4Fc>*4G`hvpJJD`Un>G9u}Eem=z;CeBC zwpzHW{#Gc}tQ|={DVL$#)s{VVvcZMg)2MvnE(lJQ#+l>KfofqO==Gcx!_;@M>>&a0 zsK=2a1BM{l^}*@pU&XH2UE)Yd8S)Qy!{D-9?Du9WJ#{yL_Wl~Qa#FW@bd3xJ-u^#3 z&Jy4AEbKw$@vvmi2l5*S`6#+&>4`1c4_Y=WXHoA!1y=E)3a-E0B51_c!;-U~ zLF^q*>qZu}G)1J~?ChBo)uBRbZ~9_#oB_2u<>R8GgX!hGhvIW1fx_diu)8)cV5=66 zu3O%+GZu<1_wT%8qwc71h7|Afn66+ZGy9^P;uKPN|5M2E{mh)Fgwfsq&I?9Qk9h1$ zR;AsMU2IRj6^xg3rgx6KuE^akLU9jUHun|_+L1_xr_yla@U`q%<}@Owe5#fmkB!~M zbkk!htK_r3Q`N@gl5$+k)X&1vf4%5_{b9)QFywn@%A}XHorPVfgx%8<&_Mkwq@8iZ z+jCSFnV4_|cUG4o$C$p#D~ zCC;4*yB35$$J=9}lqt?ANhK56MSKrAlFYm0@$0H2yskHy4jm{5tF7a}uVp>c-_Z;0 z!8JlqfQ82!O%rA_RfTeI*h6lE4QBIR<^JyqRN~ReK91T4F@JZ#3k*dsdwX(r3KRRT z97E9ucEf?>(P(yaJlmVB539Z^Flpsc_`dZW%hdh~4+hO=m*R|2ZonAacR-H4*=&nT zc9yV|9k*cNxf@x072PI*V~^xGG)u;Wfgp zbL__ZB&ghe8qBZe(}>7E)MGLmY_9kU&A)2d(YyTr_SKxt8aokJY2Ib>rg^x3R}m~+ zKNe@bn1t?KdthPQY4HEIn*BD{W+|^VXnmA4jt_1SOq36?@e%#V?qnnm%`Cz>_VTo- zR}!bmPa*k4TXNVw5F6GX6EeOX6Hm3B6{ncg!s%HPF#1m*ZB;X-I1kQ1K2}`ZXWbRI~BV_zEbW=!pMn3nB9t(3I7q z@iUKQ$>0A3JGu<%L;eFe*H%d3d-k=A+Zv5yIorTQcLHY5D4@gl#*w05Im>kvS)|zl zuw7~bqnflRKCc`mD9Ez316RbpY2GAhIE?QYUjnno5u_Sb%u?jtz`a!u2Xe;o$K2;I zEzpj}czUBvlOuD;l@%*Icy=c(8`Adnr?!y_^k?&aa5yxNjqE)Q7j?Tq@@fZrc}ANA z)ezFnJjed?&%ek&Dgc07XTWqyLJ)_Ud?|CTAe zQiS#wHSlEk9gnQNrX+t$Wc4xEA>zk+=&Z{k*N~H}ZrL36SS=FYSq~HTu8W|K!yT~e zvnKgFgkyA$t5|7egeg3ipx7LS(JvoCTj^N5*_{ZR9~PkV`r%Zs-3F+r4KM7zu=xr% znCYTcm^L$#dOOEq-Mv&uczIKN)|Q6(d)38r+hUoPZy!3BlgKKD5qNg1VaHODU8!Ft zMh{XWM+130o}7&lw-1VL-)w0apU=20vBx9ljcC-rQRrjj#=P(X3k=EzjhbXDAv_oVS&p4-q(I4qh&=?gAM#~W64?JYE9Rmb|22&c|%)KBV!ztS0 zh%jr$a<=`w5z3#I#L=TQ@biRY!rzNoRQi)UovyrxiYo`%trZ{Omdq54zVC|_S1hpq z-J3$9<9X&HAwxZNK=GYZP>J(+mUMrD!51@W%ORd~99Mv->os1&RT$c_-S^`e>7VftB7P8^>uWmadliYHPi(ytaG47P7B)l4~4kJ)>xejkyNQ;hG3)tX2j=p$|rBlk1*!EwUxP$M{6kdA@cU+dUT=Q2# z@bgMBB9;3+SE-}t!U4jIb8499kV~@Wu2lcW0J_I#qUxe-y70mV*L{%@8h59WtuO}N z{1=0?mnM$5Xou?!d8V)=fZebi14R>-v7L80tLZe)w{W_-<6qWq7|+mt43kz!^mNkEqXlkLyxzI zS;9CAp=y{PM*eO9x#4MoL}8WCob!VfxN1U+`)jDaqz{9?OGDu5YS{VH7PSoDz&?j2 zCVeM~-Bg~4^`C92?r0yp^2v&9wtG`iQ9Ny(I+UfJh-5m-)y#zFWV`GOVA+gpT>NGX z**WXtx_bRoTHC0ejt34`w#;om+obY!C|J<~V?Z!V5Q%Lj_6l?k8`{Y;$o zaXY+gO2>rCWb*GwWHUVTY2lSaY>Iva+*!gMLzc;SFjW^Dw(ekiMyit3tX&Kgg6N3U zSLnK;Dfk^X!o%;4#Y5XYvE;fmHl^%ke{G^LP4yRB^q(`iB$|`>Kpj`z^TrJ}&zWYF z6g2qshhCXG;*(%$8a3R4|33Y&j_;%t_s$0e^-w6SQ$g#vDYUwwhAn;4$&_F4Ip0_t zn&?>uS7)9QukvSg*!OCoIM0}lygme#qAs43C}(wiFTzb@H;Z6R@P2E5nDpQ|YrLlf z&#E~exH*X`-Q6f|t_oTO%24;xgJN09STwe?W#8T;Qs1XX*zvb3I4}RQAoAW#%#Xor zVWS7CG^t?8C@ZLtNTlK~>h!gGG+wF7MQHc}_TS7HMEoeW#={8GkN0D_g;KOk{uz_cxd@u>KD5?b z4QG$p&XPy`2W=NpDAO<-eP+R+b7QRvfap)hmeQzZUaBFFv zX>|kqoYTPTojXQ&s^ZRn#pL`vk=$0tkVeNKs#E#}e~UR^_$Lb2o@-&>n$LO*c`me%bO-`3t@j|UV`v+W!4-DXE2F3SoXvN!``-Z1Ro{o%i_C4?ML z01lpEDYExiw2vcpMJ1wgasn&h8RGDkA>uRRJ*=W;8e6la9}7_l5ghvmQDg0ND&cH` zz^D|e(|4nFzo%l|=?J`Z#hF&7^KZ%|NA~TT0Aoj$kiJ4N8tR(ip$n!YZ9N~>`sh)m zOCP-05DFd%vZPhLneQs7gKZk1)WvktoXXuMzn%&`H)hhTCq=N?D~ME9A+&Ceq9zju zZ2hM~A6PM2*bU?|{MWTS5Fj0z(FGhR6Y4RA-+~(IX$QHW`6} ztFJ+LLN3n!5rjwe{b@v5Jb8Qc!Pg#rvD8z8*Dj5M{gi&_-W>u<7V^3Eqsa{atYNF( zo(8uvBr12n=~feH&6-%O4;8>EoAeW6^sWK;w<-?@muS&?)1~mn zvWT`Ac;I5o;WTK@I6PG>@}AfL%w`K&(atAeq0d&Kp4#dg4 zW;$leR=oSjitMvdsVo+2&J@tXyLW{YzGRJxp6h`;_+u+Fe4WE-GCYnKc7Y?~@;+#pF!o0Z7OqZk+P zb1QdA2^-s;1ab5ExpXHFbryuerJN$R&UZRF^F8SA3$L?=0!6Y|MUXdO7<;M{CZ6_@ z#@jHa zqaQ6zmBQUiH$eBJMR4!5E}JgB9ZptqZ`cZN{8_`DrcQA%UQeC-&(tw*;b|DW`ns@= z=ghrkbi-Yu<^1;+5`gcG&x=zBub z?ycxJuU%}2Erb3sr8H~)VL1Nu6C0@T!y`*!Cuo)0)1Tsg>~P{och+eMrDt<-xxy6c zsfHrHqyiy7>Qq}aggJsFJn?)(lw!t$wiha@I` zA1gjw(#|+d8E1u=(Q~`O*l4>G{0?YQ$hcs%3@t)S{bInnaJGH6hp=zI z3J&}#DeQfz#uPru2^DW_Nows#eB-h~OxX5VI_*KUtr)MM$`!IuUZ4bvypQoYG=4q(Sy3 zT8{4j&fe@wVl`=Y*zbK2UfP|H6W;|>+LdII*kviqy&Qywjr_Zs_(wG6b!+h+16m`w z{KSlJ+VrD-IWyT{M-@gTB)deDF03iQQ0G2)Lb{xlo6Kj8Nv1Tl$Ci7<=D~r)1Zwr< zbA&&w?Bv|X;3{_+=09;K<5LZ6f&4@&uN)~X-oXUT#MPjFV->UgeuLe2sb;sbe9(Km z1e!MOX3AC(?5%q+R`37H>SS8Q=cZ~{b)t^7sf5$(k`hX_cBjfQ0!{H6>@lO<919+A zVD-&!nNRc>G%{~tYac&l>2klAf729H%v}I~G&IP)X#r?CTG01tPNWhH5L3{`o{t20+i`5?pRRg8EjEr7IT#@q6uj(NSRo z^k_J;7acipY(Y6pUGzVS&cl(b_lx5Qp|T=0BwIph8J~MjMkJ%`Y+48vQ9?wez4y@G zOPY#%j_TXc9@3`XLF40 zzZnAS${=I82ahS9#11Ql^7JJ`Fe5~ZZ>LP9?7G#kF0w1;{a8ZBah;@)3t(tF4c;p_ z`#otQ+tek}qpQE*>*Gke7di~{PB_5)ZA0Kes2{X_@*B+Wdn1Q>uyboweDp`$Lm?U5 z?)dP&z#Nt{I`Q|9gSpDMH3w!7!&8nE`7B60)!@2Z_}35ry!YdBc@JrP9)`uu2c&Jw z_4wiWbPm2+EkCFjj49#Hd^g}Ylyt0y%Q*&kvOEe;n`lbWStT@6%#@EzdI}-Cb>zoS zFOkMX(S3Z-pZl7vf|`Gw75bkH`1`d3Qr*pPZs<0i17mv#zw18Gk;VIUl>_&gHw*@U zP~zUl{CT))M_KzzZ|EdFCi9hJSz2c=7e{yH?kYY!;$kL8Ea?K7nX~BK?2i00JP~JU zEP_s-2J)P)8o2(CDnD!*&b{iaQ8B{+wcfeo{C&^G@5-BN_3ZF!;xkf9KS;Y1jM(PU z`&g41p{e73HJT-`yruU*#|9SDWZtvm!L5YG6{Do*Z4z;P<<=`oT zQI&ifPR;pGUUDc8PL0*0Vl2X5b9GVYN(8IDJ|e~3@W5r&Ss1%7ld7o|=LQ}mPwfu4 zFJuVEs@Y=UNevc4Y25cOA7`j!;OySMii;sVd3)w_seY~KAgz>X&f7kCWAaBBFzY^D zu8-zZgKx`2_L<(wXul0#sZ6vKcdrKTWGJ~UOxLh z088&IkpHaO1JME7rDZdfacw6bC_40y;x#&9v~Cc;+V3Yi^v7Y(`&e{Nae<>DYf16) zD|BC-hhMIKkq^1aP-)c*tF{{8G@nbfsoWK_eod}!vhD_Z6O3@0;7UW6Ty`w&jg3zy z^7=ap-Yg!O3Fia&zg^BOeNLBJ{nsG-WGiw1!3*ek!VNj`dj>yQ5|0MM?G?H_QC9cO z#lM0#lXt1M`oEuhWfPH&IN8k-%YSZwr9R6jQ$+{gUU>lB{xigBQ#Vrfqw#p<{4J@j z)`re+KS(}v%E5A?GW+;=qH>2c2$IrKx5Sg8F3f?TfR1FJ+nw*9G8Fz_e~#VzL5kXv zj-L)(f|i67jtOaloeNe{=$y&;CMk@q|2T8G{y^+dK1*TvyFiL;31=JqA~Z|)#fQNO z^z3$bT9A{-6Hle!@q7E}@CV@-Jn$2egiogZ!E|ghRUK~}4q(IeL)a@tu#`8%^SFu* zILt5>qK4|@kW>FCalP<-iTtwe$J3-=A-pr6R)E#I2u^p)#kIRe;F=c}Fe1i^OZF?X zmUUD{8G}#jgaPghKAX!b5k#@ZcTlbZ#kG z4Yx$qg#QG8o}gcpD_1X1;#N*ctg|o zlD-}e#fDWAxcKh`euVDaN-$bq?eyWBkF_vc$CUkS2MULA6c$98qr23PEw;a=e)&aQ zG$a>?y)%$&2H5ik*8m){G74+c2J+X6RdCSl2&I>~v$eejuJp|2;!SS+(lZd%^TVKz zXAxgZYLA-Am2mojJA8aGhJSrZ$H8~)gj>Og+N&a0)EeWUvqQOEkO4lpa0FgmiRD{y z8rAV%_3)FGDOz-JlP$^>n6^sv2%J)QLPi3vhyn7knlGJS6++2RFT;jz3uuc^29Bx^ zB&TmhxJ6`;rrdRrvj=n_jb}Y%tltV_b{>cK)BLz?4+rYiznCty5^e<5k05JrgGnQL z!JxfKoV7=r5=|7?N~;S#UC@dL4NJi;mZgw#MmRc!n?1E^lQhM_56hQsmQ5GjmJS{s zz&6ig;HCW(ZdaPacDoaK-s^atWS#<{x1%X!au$9c)ep}e?u@GWhvDXl<1qZxA(-UU z4wEdC@cQSKa`*jPY4jEso_l&7HH)tFj?VX_5)%V?%Kk1WXEsyss*bGG2}D29Q{?4i zF)mHGJ{!f^~)azb< z>C!uGep`EzG|ZLomb)ewMjfP%ZAL=$`TsK&{X|#Pi5nXylFbHBXdxdS+@YC%C|lsU zn-g%*%LLx_t%g1rAeXkYL9YrEZlgB=k^^T-$IW7J^V4qdtlMqb>t7?q&Gp6evCVSB zV#LE^ROS8uU7)cyyR!GUL{8|nTYk{_5lx=*7dkKhNP#M6q=LSS;mbQC9zGJ-@r(t( z6V8+5r6bue=NIT)oDF?vYjHupMCtbJi9G3?Ij$VgmhCt6V$%@e6$u=Ls!5lr%csHY z|GYCV(d~}MT0}+_BQY}2g&oQ&fCr`~EJzq5ET)ws1#QqFd`HBcQVkC+WtTi=17lRM$QH7bn9)zDy2 zA6{_57kT#p{Pf`$Rd-0?FT-=GbHPLEy(tH?#hEJokv_Wj>5mSZv-zCpN_~8wfiaQ8 z(DvGB>^toSJZkiV&B@BFwp>TjC>zLQ;x@qemPouF@6VAJl`&%0IPQE<15I~?C~mb1 zlHYFIM%(;dIH_|C4jFcwOb=*d`x}KAy&{_Hrs_jXP9N^R=e@kC*^J%1Y-r{204!MU z%};!qtM5$n7M@)vE*`R9{xtBYthf9nIculT(xLiv*S?%WG=LYaoJYT^#=(S#7TiJG zPRdv{k?*(oawGwTR(J^pxtl!hT{E?s?Jdm?FP2N^=(ElrLpCp5M&YCN*&wwWZ%XTm zpDBtpe|vCL+A3-Gl^>FC_I!A?ts2*PlA^K!-Nwei)j2gk8n zWg1Vuxe@H-9dOJl3|8jUfQ|nszEPql*Mt~wcco^y+H_IAdQHK*c8ukRqSw7+o-UJe zAufM)z1n!cfnr3HJwL4}19#gi@U3$ZFX(bvK2R>^Ac+~M9sY(YlLz9>l{4tkM?1W{ zFB2Vi_Tt9QT_E&XJYRPzrMQQk_;p-6w!S5}Vh3+iOZs*#Wviozc;A7_R-90bY}o z@oL^2y7q4*-xG|(8Ipt^PGvOojS-T42&7t6Dh`-w;N%{8=)XRjC#_4z?j!5y;^4Iq z-n>N8JJOb(3@{Kgp}+K~%@ufiLhKA|MK^EUU|zF7hFWf1B%Rs5xKLvjXr|SZX?qF( zXlGG7-4RuTw`uUYcHQKRS(g5)L`@&_K}z*rtM$ zRl;$lPcJ^Y`grwArw*vM!jKzx72=eyCNR#rj3f+We-jiXyxrGRV65tvD3pTvFZ)ycmBk52iaO{!^5Ue+-Yr z?s)9${&;@w5FC9fhYnndp-EBSX#aZ$H2b_6Y>XF5yS6N+w~Z6w(4BOMeA)`TCEua5 zv;UKN3wPa}Z8b1L{L?=b3}Myt2c#O&r|H-95Sj)$;cV4SQmnKMQtkdxn}zC}Jt&Pw zw)sf@Tc_~huPrclKyOUGpTS0RS4!DmD{0G3@gg`P&g?q^ab|NOkF)WoOZnT;gM#AG8?U&NIlSw#i_h-necZ40r1)@K6i3Vgx@Zgkoyr=hVs@J?D+1b@X z!c;X(`mW&f!gcdwz%Tj8A3u4g=n7mJm4%r@V|bgG2lV);PMS?C=z5$AuDLBbghLIe zcGY6JRbnpB{n!z^b%|ul+FNAz%Zb8je#7Tavt=J`;f?ivDV5PX#YGhASzL#Drn z3fE2{ZZp_MkHYSe#gziC92l%PRQ6rIHBJ-4l66IgBpW#=6Q#KWNv_bBmHMmU{<*Wr z+`BJ?ba@Z2?8nJlE4!f28R0BF)e4{48S{w@TmGFl2CGrm1$rZ!gx8L!mqOwswTCYhq<* z7SH0eg|w&U4%`#j_i;fZFikkIayw0-*EdaI_r$lbSL-oZ)OEvv_)V}dBm%FirHZ>% z9%^jS!=59K)28?3lBL*F{rB-K%n5Qv>zj9B^12L;{j3Jp4hufWK~pLjyouHwI!d~) z^+;c83l@DGVe781l(BLU46xoH%bMlXM?5zRTBqWW+{rXnOYj{|xyzx)TJhMFI&i$D zjcGZyJZBGurE=eKDmGo$dIAb-Ogb7U|>z3() zUae1&+w^e$E&c{yZuH|9VNuvE<{21l5p3&up*Uu#1|GZJ2r7Xgw58k-c~U&ioAjAx zoal>1>-6zu)OM-I&AxEHSr^OCZKS%G2km3m zgI?M!%35|9v_<|kXViYWaiKSF-}w%VXY26UNAB2uQZ8#I==0r6&b;%d3-{Rf2=u;g zke6!K$dO-m!qm@C#7-e<~OyV5LizE8r~3`eT*Y=QdnixeL+gZAbv zr9r*-xEvYRAkR08pb-m*<{xvwzN)_wa$Dq@T=m&+-N-T z_7eP<;f+sCpFsV#)|hQ%hBs%|fb(^Gc~OPnR3-Z2v%iUaX}+Jrckl{O3qC7VzKh`J z@^rbosyXMEJcoaWN1&QV2vsJvz?;#lU~g154?hrz55DViQR66FK7TE3-YoGb12KmX zGqBLi7Vt{XBKu>4J2cb-FVD=zInJrLZ{{0HR`-IP1?hP4zxQ%t?}PLt!UESh-lu*m z?f6BP5$v?5J(lUIaiNwqFKPWi-m5iRKK50M=MC}XKN-&GZ7~^J3jb7Za_P;T65`2s zkPmzjysxX9)8)%6&AE1J6{xsxgJSDQUU9b^N|G93m*`HqXQV(E24+lw;Nm!`ntuk96UF0EK<K9_%Ge}=vX_JCFBJo!}jzoeTr zQHt(7j?&{&U|5L^;Nz z(vfnq>b{L$E{VfeV}Dm~t#HD{B`YCj&saEsi@_%9kaXTbmvy{bai>n96n3B|-ilvJ z%|Wi@k#mk-_#Biz`iq#lIP)8%4&>ggbFrXnqugwh!|@wOv!%#2KOJNu7rhhy%(-`9 zmx~!(*QkWz(TCwnmrh&|+Xv5XFvEzB8TeQ^pYNM*lKa?+jCG_JHZ?axx0(ztt*WMy zFCMJ4cpV%o%14u^HMBJ>g_rDCq2AU`@KrelcetN`o%&{2v$2_mja8>3cTR#z$rQ+` z>&gBF0bD!QolH0T!nL;p@rA9(VYN@CFTStA`@85ll?q1EQ5|{c@(f&?tA$OqDe!Nm zaL&XOxbF=Q<8H0<;M$B#Zn0FAL3IB@riAk^7m4?F?#F|lh|Uu=NNti-VQ*V!PTqS* zI$|l6yw_uuxB>z;%t(*EO4V{O!b}IvC+y zKOMN@Vv12$pV9J{FW|;qLwv1XB6tgN_EIpSm#CKy^m>JJw%^l|7M~7lxbuj zEki%nZ2OU_hxI_CvV*XFM+RMJaOWmtJ@#3CmWqGb@w#hAAiE4{ zU|I<7_0Hm*C$!PPHJhJgPhsa5_h8jC!O8d@gyqRoc=2OHw3z6EJvkSb?yClu;^$y2 z4ZxqbY7_^iJ)pqIEhHI>E_rDHZHY<4J$7%vR5^l`#u@V4cWU@HrI?|Fv6T`-T&6Y0)6uvd)m5ZzS+}|Hx_*S)KOwSv)dm8C2wr68ET? z@L#y5MX4^YpyJ1c|Ct96D1T*K?0$+Yj z>bU9F6ME8f2)~+okRnyQu}8cn`Gs4c`Jsn&Wnw>G{OJbl814ox4t@|lC6V4n4ae~I zH_2sen6%*48M%6K6rLOX8GHlWuvg7dX!`9+=HhS6PiY}+b)GBTUjn2Y?S)gubo`^^}dO6 zZ3gkr5*?0QXa-dcV|h!*UO3Xnl*@#7s3yXg*N@9a=WDm<+*%)sUs41263gJ%!lQH} z%^VIZ#Jk>G4+oejV5{F%x$OA|dSDuWp)VA8@lq$MJypPmqnb}xs_cgscZ;E&UI*#i zmsRv}jbI+mH0C7{>1e#54{Cl-#_bQ4(7u%iME0=Z4=YCF-V)L2uD3x=EhR2{kbwGg zh1=TC2Ya|(fyYKu_{h?|VE*rPtW7@R4|Vp-$nEg{Y7;zlpB2|BBCwkrUGJ&>BBZ*|&i1)#h@C!+LCy z#^6@Ezk1`F3Akv9F-Pp^i;HqFo#)xR?+Qp!NwgS*a)d^=-T!&MZcR%nc8Y} zcwz(`wKT$9qdA~-xfh;@Xob@kbcea`4$}H$YusTx9B)3dMEC0J)bt<@U*ENtH8$I^ zmS8_*yxB6SHy8=oHxK9Kv4b0Q1US6mf3*sU~OyCv9@XnRlkZnk(-7{gfFc)hO5@ zu0QO0)t;3~KEm|AUqLNig+0G*lWR<#ljnyAWMi4bZGYzRwC6S)pU_q8uH4bhK%%R2 zM`L%he9}x9!rkpdq`u#?AWBUg-xrQY$B;PQHfRWXP4kq^Kegev0~_Tst3PBbb~syh zXOgPvPPte6bPnvZN#1@jPMrN>VSSf&f^Vtd6Y4o=mwH8>5`Pzd91DcX%2>W(AIt`U zJ8AUOnWPZAgTt?!aCb&OoUkZKs_JRZ9WKRlo~1R;`OghZb(HDa;<0dWz;H|yz3#&* zF98E0T$*#9Ro@zRR+7krzpt{vUMG&z^MxDbE_UPb*Yo+(C(V&~!+JQ+AKRCMBRlbe zNkuSjbu_9#=8@{3EE+`Xt1>YVF_CC7X4+R9MU|D!>_WmUeiaU@T4wc(RqHPC-s z4&Q4WhyGE%yh%%eT@4OO=Sos&&x{(sUyq22IR)PD028rkkMOl!Fc6G?~PUE4#JqQ_*bV<}y6h@mffAL#uD zCG5QL4QcgI<`For1Gdq=;&LBtFH?m`b6Pnex!t-eh6RZ^HA_Ov<%AZy}`!L z24#a#BzX|GxBg37T1KpVI$Qqsa3}TM>Q9|Yo8W|X9_zi`1}eP^@%K|R{@Nm(F~bFW zeex|hQ*wyr?F*4diTVG9mi_Q`Yr6D&`!h0i8qDdU&puWq6?c5H!EHV{*sHfXcc0sp ze_G#>MqY`oTC%ST-&YwXZ}uw03+ksJv5_g%HVNzBeSjvF9`H5z6GYiBCkxk6>|&J4 zVLvAEOph?Wb8kJ|%=8D{jD^x*Be8=LT-Dr*6;gLg!O3+@;ZCX7fY_4Shnq|W2 z-V(wiTzy4eqyzdl-i38T{Y6K5FxDTsPoc}k;{;imPFXFLbN1KMcDw1+aBd)`Ru9Cl zgNotER6Tfh%Z77&O+bBxFUD!QQuGNsoH)r5ROSJy9!O$q|9kYqQ{s7gPoYa_9AC7H zru$Vg6+TOaiPjgXUd%I&R88T~aYZ=up~zV2bV9m12g-l`rrFEtY3|ixXkjPGVt9Z2 zJkmoxme`lfg}2$LcNqF>Z=gYjBXMu5aeV9WAyQSEg0_l_Rkn-$d1j1*!nQ>4z15~s z^mj`fBs#UPZwuyP(I-X30Uu5&ETtwJadvwQe55Q``dxpEn$DZjg^caAEBY?X{Oitb zl7#2`ktEknDB|Vf?6==$IKO|~6|XyK()pq3IAyV5`7d%N14r@RAqyP3P55?8C-UU$ zLGaDv4E$V~hZ|;g$3F*+g*Rfo3}P2^U&R<#Sl)!#r6c6v^c--ZJNT1X6SQ4?>0D1$pgEt9>dORL-2fP8FY@fLfd)2pw00W zv@%%->t7GVU!8wT$G+stDc;X$nd&gU*?A~;x$lk@Gqbru<6-r$C*9C;=Y8^!I>=?8 zW=kEy+fd1;d$jIy08e_>1A~9~;@X0N(lkSL)+(w7_5F2V?c)zA$IaQYAqYQTGh(%< z9_+vO6pVlGLk6XKI7G0@b~^UwHA=(ynd2xl?{kqZmbh_5I~fMHiQ`ZE3SiK;&bU&Y zrSog!`QzU_*6NhT4%aQ=?NpdRur?r?S5^Lws9B6pH+JDC2%_9og74i>Hdc9uRg;_u@;71vm7dDb8)%3kOcNg4O<}JXYD6|ETVxj{JKDFt1LLCwt}QY zRl4%h8>5eD@uM;BsNO}H|AxL5-_CemTi6R%N!sAArQM-K+7THgQ$ej=5 zMD>4gK4K)Uh&M;izTsGss*6LWo|b>4E{1ON%{gVE9jcf{;AHQPJXz6Je&Mf;A#UwC zTJX#aH@>BLHwUpv$CEVvhcSEiY7K2mcR?q?{CIgRiT!NCxZ@;UG_t-4YH?clU+eyO zZG<`IXKV9&sD{%0r(PQiRDN4Gy4!gyuG zscLPP;WStI1?anK!jVk}1;a%jJ5*NE2*a_se4-O9X<1CwB403Ipc3x?M9{TYUp^Tg zhC@_r`EBV!`Ek$A7_-Xvp8Q<~PXoVlUJlIg*3Y_3?QRJ)HAkzkDVq z5k7ZPgB4%uKwX?Uo3&TM`CmJr+rPiEiO5ww@f^YRN0cRNw+10_54s$ne^X8Px%60$xAW!PE*Gec_W*wULU;;? zh)hy#J(#MA@3X>44!^U2{)M_=!7~Y*as}=jNsHwp!Q9s#=O@;x(o|%tW4B2sq%S{+)kHn%Bb+{b3>*v_q|1}jxkz%v+;A2A8TP?F zv9264rWL!px+LMv*Ty{Y$0Ar2d`51pI9feE*$TT25VPm6iCFY{FmG#}kJjiq)xGxf%{eu+v+m1UIt76^B7~VH$H9T&$6Ndbmg!-@Yp-o3i8gcoy?E`#wyEy(y;$X!jkQ}-W}z%9^A$`c)-DJO2i z@INKh4!Z|o59dJaFE}|j7DQqD&BCFS9?MNf&%nYDv8>(5ir&3@;q2DI_^wT(oO@sR zGQ&Snv$?q6oruAfd*v|o<4$?Sz^ioidH@7E-h!K(d+@5KDRSF@v7D4|iLx|-HhO#D zXjK!mI^ct8^TOqoIkQPc^gzzuo9+hc754bafNQ&(&udo9o9Do>T#hbn|eh-iFzO2mScm-chvaFYh^x|yc~{x zje(H)1M%L22-M!`f#u<)6hALVx*gC1`_F2_OEagiPN%!%GC_|wWEP-Ps22tgHk8c1 zdEuhe)6yx3058iPT$ithr)vvI^YH|pyKg+UIsTZA8jgiS2e-rS;##^M;cUk71c=94N?G9u8id?++`HJ-Ewkj%*9)VdskzF(n3l?Q- zg??!|evYrClbTE6;lwR+g?1-?o2JZ9zK_Gab$O`fo6Mmbhsq0GT#^1L@xPPCI9PrK zr%t^k=O67cbgmV2(V0r!ulStp}g&;qzSFaaj}kHa11@odl$rDyN%kz@56TKUEtbzh#K`bDYS z{MLwZ{!m=9<{;_M$feeOt0cWk3E1mzKC8Qz1@5FP51Ex6s^cDHs_!e63 z+&Fz3WRDyL-CalUSOW(fe62{ze)}4}IGFQ44?SEf z{^sml8fpKW<%))2!RY_j6MtJQR?N+_!jO821H7+5&k`3hxb;L)^WK{s&vnPypN2u) zfiWDLV~PGt6UnZ3H@=${$ww1j!>oQ4Q2y3R3jJHi9t+pgfo(hOZcr|F7z z0}S0Z58$gI{Vg4UIx7lz|B;Kwd)nsl`HX0;`tgv8P97EWc@Ms4-5xu-h4QneM10o$ z0gQdKmOg8bpjW(bi*O$r@@^)5QWF`i9b<6nKr70Us%dAO^iY*c?oFp5Yx}a!a>4XGS}$jMb^zD(WGUg* zcKMW3doaFwjr>j&!b{~-^eu8Q7aUyXN8nqtz8~^_m}PSrxMHn^4?dri;l37eo55Fj#)YpL_UkfGZ+* zc_8Z{H6=SEYCV%K-u0%E+8i49Hv&hU97w+3YUygJD(W{*!tG&CspCB{E3>uWrJ9}D z_}V1w>V~}cf(3b;xkLUHA#~}`U}>(^5Ag8wqg!Dc$#9K{v{!Q%{Yf?y?h?WLbWRpI zxNFdNO&VqfZ|iw8m>QwX?PT+&bBZ@U1Bu7ICOx+)Ja5hbX`{2egzyK zs&TO62->nqjW3J&NI~g5P;%)i+iJAJ*K

tvoCE0UjQ7}60jmMhC-kdZvj=x02 zqGXtkI&;Onezb5sKguOrodVqB|C9X2i<xYne%#mYf&9JJ6fS7$VA-?x()%IN7<<cs zci&6mOZBJe`szYHvO5rq-HhQ}N*uhrtHT-Zx?@%8Pja=+;C=>Clon)$PgTy#>DZnE z4xUoTJv7+5vLoF8PnX}SuA{rB9B}xl_fnY7axzfW;N`>4Q?`#EdxxBnOvmqnp_`{u zz0xXrb>Jv13fU)zoVZ0pmv5seRYM-}`7t@p>;@ap-iC?Uc9<rdW#xYZah3599`Nfn zC1jP!XRi0=e-C!TBMPI6CH0cl3dFWPhB#sLADR_;f?OV$@Y6Z2{H?MQBK7?txyNVf zZtsU<{kxJSeWjO%<9Sc!1&XWgiBWz<@_z;6=<M#ncp$1-N>utHIo~qlj_JK=jK4k} z{X82IiYDS+(Y4EN`-L<QnB%2A7RW(b>^NAD^}73_X?YuVyV;RmDwoot#P+QEaUgfJ z^kvJ7dtqbwa{6@eAp~vg!*ALLNcp$cLT%rDFm`Dc_w4=wIyD>9$#sr=X-pRN+|>#% zckT}7roV#HzvJ=wk`Y*8;~+S{8jRsB@S~qPd+r|3iR)wd;^s55|H>bfY;1$?MQ1p} zQ=dLAc40?<;i_9hU>A23W`7tW&fh<&t-di^^c#)N1<`!X!GR6V{*oM3NAux1#nLyU z?X>ccHhNroFRLZrfsXrx%j|)0WykxW{&B(0vr)pN@qek(Tf!v|?vTsKG5mYGCAL^O zvPsfJUYGxcYHxSp@xL6gkJfiuSf`8bQ%7T32Y1|B=+FDJdh+TmV{zC0b|T~Cjh5~F zDc7hKt9a$oDQ6?>G;<Q#Rygt21ra#A&WeA`6JA+GD_))PoYwjWa#OYv9+*%Bc8$Lw z{^fk?*W$#Q=K?r)fFp0Ye2YT5THu_K+o4TXu{`fi7+-OU5gvkZ+;3qPcbwzR8AG*j z;Vi%&_CDykHV@MWt)uNLO>t6Ud-UvSM6Cr|sXI(Tt2b>W@Lxo+yEVAb(+~RE55wG+ z)^gZ2Q*8C+fJ6(+q<JX?f}Iya%|3}-=lmFU)J{UZnQ8obbO>H_H50kbFBG_S5D(A~ z<`GJoIPk9M62|r9i@qZTufv%ajkZPGhrYb@`bPLYAd7t`=VQo3O`83xL8{f0;Zw00 zFY9I@xLv{2{A4m;ST`LU>+B$5s|)4q6*Cy?crtxqB1as1Oo}o!-kV+oS|>Xz3cg!o zpPlWYd3Gr2O&*2|!@A=6x^NcsJPsGo-!~cc<gSx~j&n3=Uqu?)uKfp<@%FgTDh;%c z9hBev(S;rtu2(;O@k4U8SuZswy{gv#=L3FO?ik~@3ij<0cb9D@@OXC>OkMs0MvIu) z$2vm_s+a+NU@ZJvv9;RA&H?Y~#_&wx8`&*p?mczi!pLW1(PBq`_Hqozq)!95vFJSP zHJK@AjkJ^ctcl~`fy43j(M<LnCGO-!C#Bx?b?~s&bsBQ<0@(bigJTW@q=kYTUpA*T z+F$j<xC6uRlI?T)_`8P2niXINy9bJ8f6X~|`8_HT`C(P91nlNp4_z|EJF#A!4(7Y? zCNCr5vDJe^(Vu8^T?TF(a2}q84+1kQG2<RS0CQ#?lRn$t5qHB6aQ5;ws(;>-Z#}<2 zm*Q=?V`>`i+ULi!(~uXwQ?D+*(Gy=kE5HL{zkKLiIA>+Vv5KcJ2LBw5Rnu!I<gg(- z^;Y8t6aUfbA#wP#{W5Cj5{LsEedS$c8R*z*6WlsH7v|*ak(u&E=+m(yFEBYvdy~gu zu=N(Hbm3@>;uAD@>OaZ%k`<qpqtR!sa60QQC-0})q92?@X34u?<KALf>*ijC)#brF zXh8=l$x~#-KWvh_iEq_4xFFmCCh}opRZO{<&mAUhfax6yU}1GSYwq@zO{#jr)v;0Z ze*9Q6{A>byb1X2T@Fm#1I6$fQf6IF;he2@uZ7Ki$NICPij&Q#g;H{e8c<%IF^1JJT zUl+IIdGBtBJnbF%=Z^*$=>1jxIA4R$6-4r;ur^o>1$=$icosrO#ZQehfckBCg2yRo z)Xd@h+&mXsEHBWK{oy?1Trs8fRA7^NG})I=hq->6WNN=dI`VD--To?g=cT6n`du%a zA^nCdU03dyVTFH}r*Om5R4mnBMXeY4)92#1(vD$c@o~LB&3V?2Yj)c3n~pN&D1~9_ ztmOc1hsjFh9(|ocQCZ9_ZLW!&mu4zyUG&9CBgO74AeLhv=W|2Obcps$fQxHgcx|F3 zpG;Z>i#9}{zL_;uw#mW-TW|Pw$3gU<Z1C^fc&b^|TlBjb0<uSOzxBf*ZNm%rIi-+y zOm^W)_mA}S^gW0Z*@n{-)Ua=uE_a%yfcEQFLD+i}o|D*vuQYeyzAJo4U-hCCcdD)M z`iZ@;q{(JMzvxn08NK-ShjyzWhg@q5LA`&{$r=gTbob-sIYT%^DUv+*Ch(WDXQ1V# zV3C`@rKGZG!4VcS#_vYxo;#Ap4*f<4LgJ-XA!+RNcNng6AH`W`600p=sBoZ(F>6(; z;;uK#samNckC>lB*X%S|{@M!%om@^=Px(@*%>cgUxdFy~)nlv0Mu;P~Ld=gT)Zn0m zOKh58oHF9eSK_?iu}C;YqUF|=BCE0@hMTxIwp%{{8$3#=_J%IU>lJc4&sTK+V=AZC zo3YmS2#6Wn4i!fhz^T=i(*7=iXf(2j_nsXBl{!yg@ULg&EPKPf4U;Lv@io{?Xp~O~ zuF0dQ4s4?qB=U`J*it)`<GieR_>%y#dOHqY(mR^(unrUxa```{46M34nN|M`#iBL0 zK;0${G%Kp9?ra2<9Ill01N%|r_c+lNcm+xej(|bE5=WVK;`t&^TDE8hv=cjG^W-u( zIjOk1E!jg|o86@UAQ8`d*TS9ZT$rx68tM-|k)s{HLb6JzT#e&6yty@J-(D^ikS1!b z7|Vu*kzll=8*8-{XDZ>6`;@84@l)2xt6xrqz#uDpyfGQR1<a5Fle{tGku@)yHXTfM z{dMorr5o?N97{E`BI&&0XdYpahez6<f}btJIk@gA>8y@Iqudju{%5ds^lcK~kKRDX zwg%#?x?k|pPz7Co1mngHoj4$>5AJ^YTzG7+!dIyUJfcp(+1L><z~(%~PJK;kLqa%d z^9IETg$8$SrCR-Uz-;Q%eGIpaSKzrR4(u@D7)j!OvTe^xImGrZwP~Edzpl8j`)bST zCyQI-<dK%xv)GxQ?u>z68L#1jQ+qsRagshdWMXolH9nQb<H~zot4{UbLxJtSP;#~j zj$6@=>%KYR2Y+pNRGf?XOIzfEv%lcOyNguOlF6x8kHY3Yqxf}q;iMU;$q%mB@iuF3 zIW4;mt{?d*FV1Hw8}WcbX8eVcSn>b2(F;rb64}AFrOHYz88?Z!Ohp@a1b26I|EGub z8~So`8x{KZG6zhikKqu5G@AL{lI`c1po;i?7X1)Tg7@aE=efJO@_jL=>S-%t)sh5n zKAo5B3WY_f+C1Wu7k|?jhN<!6g!Aa8e0G(DPS1L?$%`!9Qrla){xuUdmOPdFf70Ru zF5}VcQ3785ZO=mwbj5Ycj3GWEhcDeJVh62yYFqb(UYEr1lDYnL!k~h_I(c$S-bi|~ zUi`e(%`~g$YS|~+QMhT}L%`2UdU9Ti0@nI)-o;>QN^6nFoKfc4eUKvuZ2=Xt)1X#( zPBx5^xG+Bv7n%ETO<W=_9oPsBgId!@-3`K(@EN3y1^80?ES<R60l$mcM5FC@dSKg& zuWgj6A$Bki3leU^xtjRx(lTmH+Du-W>tVc#C51NpkbXZ>$N!!U<<48i@xcMn>^5-; zS-uJ-pXt9L*xi{<y}3Z=pT7ksvAaL2{0U4%<~}wdl%_7;Mgg;{psKc!{#|lHjowzQ zJ8U>l+PxFhK54SGo)c^JG{N^Th6)$LYgl{Fo=2{>5p4Y7+`8CV@S*j2?ZMV~#Y3M{ z--Mvf&m^wjI)o-1`XKc!Xpmn84B#N22XyGzWbW&`j&3K|V^PXi81H<X+Fo^M``3NB zy8bYg8MbC4Rz=;2Ojf^DfJF-exnlJ#_q7LyNe!Ruux@%DlzRt&(_Jh6<r%Ah_lMwf zPj7jzRTSO|3511>V<CE0CQlGqgx;52@bH9ES;;X0^>mG}zf)WMFw}%kcWr_E;y`k{ zVTv7Qi;Pi<8QqI>#!azdykStbWc)Il%f0*YixvH`+s*cPp?M-XXW1!?l|mtA_Yp{` zNajB~{rI7?CZE~bL@S)$irK9OuGu$$zvTtt=vZr3@d-s8%^sxgJ&>wAXVI37QkYcm z8ESi;hJ+qElrwE5ILz+LZ@n^EwOu#7_Rk2F{#oOLhb>ayEgtmS;xpNOvgB(nhvkXM zAs842yj5)y|2vh6tH(TqX{X2I+G(yFketgyUe`f;F^_Mj-Aj1&Tya;$1-KpIiW82m zkc}^g$)|e=$MVl`ytVlp*|+*cng0r@LgPEUIxz@GcM8IbK3iaGb}M>_GhyVk6)@vq zDisg^OE=!wu)Y3J%%2y^T6==YW@kBda9#|berfYp-*+_PKm|=66v{^;^ikV(4IKR4 z3+I1mh40qIlUslizWuaHn$u(peTVI??w;MOaGzQN`TCkTX8tmHo7Fm6>z;)-vywQW zcs7_#UrGHeB)o6yhUW`fK}yIM8QVUU{&cuTg;NASsKf@JIoV)#*Alro%!^MQ&BG;* z=fQQ{B3WZ>GYvT33cF8#2#57T;bwdx+sZP{{@#_V*M{@<MIrK%+oPp6%iE*!f7_}b zL>KUa(;sPS*JxC42|#t#F=&)Ch?i`gCFXGP{CK=K?mE~P?41`<aF8Xx)Qyl=wO%Qy zxJnc-d<Z`1dWLE`EtAx|HRwZB1oqiv%vGBQP^V4>l&7&A;)A}!fd{P>t8*scY@2*8 zzuZil$NF-T=yh!r-R}Ozjbw6D>=lfi(AV`WxinmbqV@aeUsAMm+v=d}Y==a=xbh4= z2zw7M|B}e1wHwdq)&q;g&gZ*R5uYC8h}&lhPDZ^QUg|p!dYfK?0e#nis;e<wUtvQ2 z-7Bd=I5f;|zmQAspHyg0I8047j##-!3zr9t;qzf?n3>rbPVXOuN2;B<F{}q_Z_<^I z_aDM%o66+D)_wSQ)CyRA$DD3_NW{MC|KsR9{IPz&KVDH*GD1>DW@d=Y_c@uF3CYOJ zN{D2HqOB=uN!nX!>l5zxb=r%zrb@dsw4*eB*Z21i<nf66y07b;*X#L=qw>Q6@I9-J zEpxcT3|3XSw!AzH{u?8Bu5ce*e&R+`?^J=Va}qh9suG?h%n}rTHL-c~4zN$_4C&A| zzMp&hQOr2~k~vRO!I*#kv|`$7rhUbmHC|Z8dbL%typ&j4nRk;l$9chuCA}zI!H2wz zk-td~K?tAqRxJ#`)ZPD>tbHn4cs^zy^0^CFz87A(Gz5=tS|v%i8cZpi@4LW!8YXW_ zpytOS?A!8<-RP~3=UzCY>jz)bwCB!)IZ4!Mt&dqfQ_<;ovY0fi9!6!kVD050%-M7n z3-?|EFKX7aiI(w{*R+ORubK)MbM)xb<qzTm&No^!+5uPP6f)znz97sT!@lhM0=d2H z@Vi*ZT#h(lbHHP^%lb7tJjRj~W!>o1fY;3U&~<jnJ{CU|R=7%ZEGSl*dovy`V~f74 zU~+;wvp6vZ>$=qG-SZPru4jfJ0YzBdu~GQBS_YDB^@W87(`d5iU-(*olx^NNnKhf) z(VwTm^vXdWt@GC~RgElM+Z97U28@Q?Z8u!qM_d5;@oAJYCJY}RHH9yiPH?aMEX<i; zNseP>&})STmd?z<F{Rwav^R!UwYb6MfB-C<VNI#&)7UJpA{^1si=I9VBb;rCH?}`y zi@ERQuFF9v?g+z?5k}ZN&KWn&d(5VU_JztTH{jrBJF({-(p9-aJa_dqI~lMKoci^! zE&Ya*MgPf|?fsl3?kdF&-Zxo%!vOC%4#E|@XB59KgN>RriflE{xDFodMh|(%=|GDs zj%{jS2cq23R`QaCFN|b=zwvwRNEsUEo{HzVcOdCc5H?kGver#0IOfGd*FLcp;)9`y z`2ImS?i{Vhrp;3#JUD=4JymJh+_g{{Zbe;V&GArUi5PvkgMIAw#Qn;NtoIZ<a=o38 zA6}W$ET=MvxiF3$AE*VIUnZf^q*y#_Z%d=D^(B>p?_dx$hBP!dXCN+s!hD@EY*k<J z#N1WPH?}W{S8VXA<sVj7GnmcRDWtf~+7LNZm!!wUVH3{+jZrTVWi7A4fxC;@#<f=^ zv7Hf^lNd_NLmMH?U5&Gmez2XIoEf11iFXxJ=}KrdJM1%)jP$;M)teF2`DZLG4zZ%L z_2GE<T{E2M(#CD-=OF!pIUL~p9<?%m>X@sG4@US>kN-)A;p50{ODuhJSp=$_{qE;@ zhD|X0z|S*Ne76rtQ*{b<d`ZKj+vP}SgaQS*Wa2RHKWjQIPl0MhC~p~pQytT2ub&^z z;v8Ga*?$mS&zUg><`f=!M|@;5h@^NwLOu16*zY3u5=tjYS~4}5a@<3Zaj-%Q!JKJa zbfV9jBk<MdW1v!`CCRuIj<i~na;#Lbva(p%6D?tRi)=BiRgHD0_=<Cz`_rn!6DWM7 zwK#uMEIw?_VW9%z=o#N&W!)j+R)i6IazzoH`HU_<&6Vaal*fdQ7|LY(goC{r#VpQ) znm^JHb*=`BjW4Z9+-Qe(k=Yo}*%Dtf`!S7EmhczcXqW9UNoAZqh8gkP>4Nvn)~%j3 z4qOg0`@e|)$=c%ieNUk4UoV_@`<kFRpg*ng%VTw)ngxw<0c1XsdqD220i84p3hkbP zGqTdK=A<4Ze3;A@{BotbURMR{_s^i#fnayzaWUqq98OXg4U2C(lVX|$vgmxW+UrLn z_!~4o?w!QP!J2HAZDGfX_KE5;QkbIVho1cdDOBnP$hH|l?9xQ^{oDbH7KUuj)ji-h z<S_d<ZaO|l48@(je+c?ZW>Vcr4fgTADQvGXr!6|Qu=0c{x&GmqwWZ1Qvx>o2-(t`z zcOv)eQ_&KPu+M^6?E2G>DxRDYZ*8v=rfI5U&GcHhIrKLSs8l19<Wr)-0qzQDi(zsr zm%)SBi8S(uDkUs!f`+q!IBbhCjkOv`%krDxQ3!W~1|DVAOJ~xVcNR3Rh3|46b6&|s zC)Cs$NF}qJDP_@0Hp?=GMYrcs<6?ihci4pL=T9bwd7RsFa}<;Pri~48Ikb8pXGYJ@ z7YyDzp~==ryt@C8nD*`@SdBycx^f&l>}^lc%M0jF=s+5qUkfseyhYD~96{dL665)d z`E9inb=+5W{l+_7r5}~}S&&b+ZHsWZV2t;6j>R<%3qgL}TxO!z)41i;Js8#KhU2d5 zVW;;tcpGWPCal+{!}HUzGsm8Cc$a+m>+xdLVnzDfIe~m1egLMGL@sJ;gqtt?S(mEl zdc4UJLx%liVYg<oKk213?(AC0>O38BiO0dF2kPbQ;*Wo#)YJXY-4I1ZVg%j@_ZG6| zsM7F*iZImK8P7bBrXlZbz~yod*7yD>h9&!B*=#ea+_)9K?X+SQ?HAbCQd8*Ds{|kJ zIRP_vtP&^xa-oSW5j1%l=L1^tqJObH@7-xo^|MuMcKS&C)b$NYdyQ?9saY*HyLC(a zZ}V)>?(eWC>olm&3m~_kJnG~fK4&SQDS5rI{KHZ9AbKEt&WXYsYmSQxLXBxgXAWL7 zyu$fdi{WR<d`LHwMpJ15jUnxVg5Mgpuz0fTu^|<rZj2Gl-)M|0Jhj=^Q{Is5J{AY& z_Q2tN&s<X{4JVt4k(BNpLy8Z5sXX^8)R@Fk+14p^=V38T8+a8y&i0_h2rHb&cU<!$ zez4zr_OeqSY)P^F0#u)N$EDfBF|&9&PCj`-(tR)h4yo;As<WmG6Z@}X&mEqzem_2l zZCRbHf3hJR`DP8e@qYO4`b+W6cPA1Ka8CaL6>@6Z1XGueB8QrX?AbKFUy4<uD?uxn znWv0o%a|u@ftD&<^5EHwPY=NPp`)a${}ep*IfgF2=!f6NPD1P8?~ugVPALODuyB?o ztTb-}&G8@j8`6$u>MP;+=SXbSJg^btsqTaari_)Q2b*%J{-_sT4;0yQn^-DSna%#n z)w4K7frRidu<QvD>r)z;YwmBB^J*})%U7_M=g-5ry>H>{69e>~q0g@GO%U{6c%s~` zbjm9`1Z$1dSo`N)EHu9#j+&~Dop-lN&b&1v`St>=-4=wcHR&WP*CCoM7=*JWHzDMs zBW{pZK#!0BQiymX_#W8@SI)nN=>bC_@sgH{UGM>jj|%1eecnH{9!|}T?SiwlAHWC? zN#VE!VDUN_iprBX@8vq^Z<tOx9X_rGQ#?_2vaV=2IEC(?$Y+yAN8?qO^RDlXm!My0 zA!X&9hDQ+xVPcIecD*;k*JXC-6n$D;;Jil6!Ax9meJG1p)5h(xrf}@mX9&xS!!w}) zRGVmskE>1SXu=>-&gd2!!I?WH&)eWaTtAF_ex~W?RPK?ua*MS{`_Ku4n~*SSj~Mc~ z8O#$-vo|(3*na=t?97Dce77b~V;dvE;8mTteMTl5+_^{aZ~4H-?OS2iRbP~ykctCF zc;UG|F7Q{@0~>z=)vc^&A)9pB_#j1c@5#Wtb*9Yt{RkX(`>e#$p@tpo8AG-_e{3xC z!u9eU7pD7VJCo-5qUTM6P+o(dZRh={L9qcgD|xfQo;(jRb2saK)C}jGNdnd@jr0oa zX`4bit=t_5>Md^AFJ)KL^btz9f6xL^lUjw7b8Fx@pZ)G`l%hLp<mlzCQp!BBUMPJZ zh7l=Gnd+T1?yWM$DoZ(hG%EuqU%L;rt|RGl)>E;3WV$%aKAkdJI1ARkgjN1$2yLDG z&G>kcpq=y;mTjBCEaJ|HPiwb|@!bySW6=+AwhzoLXc1oYKO-pd|Gy`v0#I)c&rb8V z+>oL~G&Y$B({{_#Ym~(kJDeFl^`z|i8%6z~=WO=ESni&8fD48;=zGwR{?p&g(&dZs z&Ny4za-fw>a^`NWz?nEk`UQk;IK}SoyA7=cNwCXiDNB7VPrmyjnAE?I?Cc!wKC2BE zXJn1%cmCgku%jQoSyDi)QGozbo>XYO9@Z?M$IMOlL*@i0@{Cd<-&^sNIPDyJc~lvl zmvjp&^`_9kO~=?Li4^VIp~$;X7hU)HoC0?PWsGk>=4hkThkWE)gsu)vde!j>66h|| zxUR#d#EiwOQ)j{TApuyr*_n<{?F7XtPrN9&z`}ga`|Ov7`&x6*N9!a@(d9lYAr@U0 z+TrybY5We;0G;KW>r>qUPo`_oB7GOy{lG^Yw)GL4Shy1GqSmlkNB0ZaYy0A+GmJUM zB+zs(5B71(VwO4JlVCHq6chM6;hx(amb$DM$25(@9bX(#xk8bCF5kgEj(OEMW|cC~ zrEl<;-`5-dE@P{dhSIU>3G8M{1gRc~hCY1*2;LlngOc%RGW8Q{S$tfaXYzq5{u6NL z5icme*b03r{BX)qVE^o2!a3#HlHeOtDF6IEVXe9xj_$a~@+SJyg`fLDmS+Plo~U8P zcUCi>^Xjzmz9V(Ky8&E1<+AE$2TM-#VWlBf%>3O62-xt24Zb@R6}_JeBlGS+zpc)c zn>-TFq~~F&+ay%TL@X@Pp@p0|eQEI^id50U*9ZAqDzuH2ZcV`#=fYh#`{$CuOgHdJ z41kGcbD4qNLgDhyT_Ek~#%#Pt@y4hso$K9?E(N_2tlr;*#^eGzrRhh~&+RZqx&@*< zu8OVG+gWIK7JbfH&q6Ya@N8*sdcF0u(Dh_8d=(05pN1T1nIzJ}kY|!b8(YP!1V{S2 z=K?#Eq((3OUx~R|PI&jAFO^B$@tC3xw#_TR{7fZudc=JVkBY%*uQKY4xGkJp_k`Ws zJRMu(dBW|m1N-MEil<NgWWQg`hhyu@nEBZtOb`FbLRw{LaZDoolKsh+B;OUJV?A-? zo^jOrxs2WPIwNRyjDta1Inc95!XC$~;H&*!bl_=?u(OqCIhGh>y>u2TlPwjbI0^fH z4Zwir(RfcPRhX|7L&*_dFh_U3nCj~&B+a?cq!v!Z@OUrk{etgUH>5-0uc>r#aW8T| zx{;rkeX06T3>wOKqnhF#&{qf-WNJg%Nqv7>KB$`+1;&v3^C2|yiwA8O>WSA>%LRpk zT&DNL7exa%dOcKRfeVZ1NKFuauyVsUeJA6GC3@__zg(I;$Qh-Y0`c+)cQl-p1pSXL zWVILLP)0l<Y*q?`%q#0*{LfpkP~#BGTK9~NNO8mKCzQ!@iyB+@OoNVQxC@SD88l#V z08Cn$hu%B=DWmC&Ylz(nDDNLhDV|?o*xqyCTw_f4Z+qhAp-E_3G7nTmPh7d;D68Ms z8!WaC5MA!+3A!n^`0kAY=`|z@KTk}+kp(tb5vGX_d$c6J5?3~~bP61PenEU(uoGTO zc$eIzm=5p%0{0j8!g^AJ+)L5uuE)<>CH@)utd0$iv+&~?0sdV1B3O^J!!=GFuu|QQ zo_F(V#K=;N>#@a3i4PQ>K~dG^D)`-<Oj8@x@bh$2eBwM3RSo;n?`7jDq^niDTM#BB z8{5)t=L~3>qROQ07m51nM<HjC0m&46W}V;D(Rh3o_Z#`bujQU}?Vy2RWFTR4za56} zt~wZS){YgP>W?|L0-etYrDZmkn0m4^j!Jz1(W!29&{_*^)8$}9`zM&aU?NU@(#7`A z*$Z_`EW!8MeC9v*t=LqxQ&Mt-!2L!(t@JU*#7Gsi>+%xU{8po9^%<z|qe<1h2h)xB z|B1JwY;ni^@2qQ6JR9{r7~k6LfYWnc^GwSC)cG}pWF<qX@yrYwY@kWbP4{6c{|<Rx z`X3~eMd3hgyQWrM9r~it2>VQv&=brcRMe&^vjb_)1_R+iA8%Y(SIg>`tP*}4xdgS^ z6WB^$J=*D{jP7YJbnQ+A?^EW{$Ifo{BH<J>ST_vi|JqVvjW0f&Cd>B$fq38G0}GAS zMlY3roEKQjK7Q$sKV>EGJmr^oefvA{=erS9&a+2-_-uC`&vyJ=8%n2ozlIg6jcnyx zLo^(IQ#`d^mz@A@_V2z0+U9tJ<&XaOz@7VD3U#r*?EuU5uNQ)kwZhhwj6G71!^jw5 zd2|0l%g~)H-Nu#uSe8Sd3Oa?%`K#ebuSm8xM~-un@3{tl>xEi|YnkaxSvsy>jERTU z=`n<3yl0jetsaU?o%Lz1T?F3DJ_u{>CF6!n1J?EL42ucK#aX6+m7J*(9Uo3R_#W@c z5k<CE=9X~ohYU?Bu7#4zn^<03F$MQZC;iWcXnEBY=V{J{rm`d)H8_d6ALCsry(rh@ zf`hQ&_E6Y3Bn%cyY2cA9Ns>d=CD5{!e^xz%C@${}vwSj;^?meDj2}9H?P$Khx;uwq zk53Ei`NjEhmE*}T!i(M=Z-<SyZi0Qd8(A68qJu^;yrbR+%kt-nrRK)s5_1!HO8FGR z@A5&NepqQE2Zz3_7Q?t7u3?1}bzUoFHEsoX+M<P7x7cF$=Dw8GV1^G922sS4E;e_~ z08(-Y!@_hWeCBBfAD4#WgqwYsY0D}0kUuQsm&(#v%^sH4&<ZE^Y2oLVKq{USfhTtx zVA1=n?9Ai`>`CxUA}blhiyow_mrc6yd{!$XgL`_Gg1v_{Z2O%>7WK|lacVGZT0EK( zn)}kNBJN7MV?rwfJV@Ikk!*52FvGr<t(3Y27RKYK>zEcQ?wJBsy82>&Gyb*O#=TS( zrA%EgW-p|Uv)6kMv-4GFVb<&8>{i%7&iv&}%Rc#}Jlq_LCb;39W7Xp8pR?F_%X(qE zQXJji9>f`YHe_vW%qkZWSbTUankU9FpMHf*eQ*KyVtAnQNjsXiv<NH0|1pi)<1BH* zM%M3-ANAh&TUfeHhMjzrPUe2as5Hc!eu}Gra!)a5=>hE04P$g@<b2M((<z}fnRcjK zVDB%j{JS}WHFSLu_wU-o`gZ>W;JK75yDOdB`Alhvlr=aAmnF&m#`yZzEL>fpO)m=e z3SXCG;TGO~-hH(Yw>z6sfk~2U(xiWI+(t>**tiKsJy4{lFFdKwG@fni3Z&FS=U{R{ z5&it;k3;r$x=!EbLU;Zi6O@Ak*d(n9q-HdpK1u4?;pa2x+_C|5Lq4B=Mg`Dd`wFHM z#{00G6*FhfC|2e;nqIrcpzfo8aO`R{x~-e%tX(${jSrn>58C<6euxwv>8fK*>#ndh zHw(mDZfBWeRtcK#)n*PI{dqIl5fyVY>4+%Hc9IJD5Aw!q!%N`5Aw$t=jtbOGwg=_u zNZ(HK@1WC%1?gR-?E2(k_`)-eF3s|#`+i*zdrAi67M>TJ?wI4g@jc-EzMkC$J(#|H zI1RIU36Hz&!Dpd4{fS;CG4fdr1#u&3!A}qF>2;z*al^6U(?(XB<Vm*|)j}&jn_4{w ziPfcM;>4}T#L(nncyezpTejI44=(7BI+%ioFY~*8M;v}K5?JqQZ{d5@exdj68o0fe zz#~h#>CCTmQcE?(L&*z;_Gx`6{jwu%e-=!7o$~Zzi7FoGbf)+{kz~R9sO7=e*y&fA z#D8PrDc<2O&j`T%`A^`4L6~@E*k5t#&3xBChITmLwgGA{aIaU37xn%2O=$l!93@FH zWclhMYnm_%e(pi|^2(mIbB4B6gg&*l=2J{^1o<q{N8P8j;(&E|IO%sL9*Ib$vb5ot z9eW49jJBh7Ki|NgN9N$YIhJgnT?FOujd0mD5F6TUu(`_zpMNY92T1ov<1?et+q8|1 zl1xVW`VJ1?Z3Bx6dsI4G2YZtLvSOV(3?2EbwdxKW-L(_~?(c&R-Ieg{KRcLyFQ1+c z7=Uly<<qpe2S7`riM2AOxJ<H!ZO?wemS_w|_s7zt9&Af$z9Q@|YlgN9pTt+(OVxXY z9Dcgj&Gy$0!#(e!@z}F!xMDtr?fdkKMHH+SpQ#CAbiEquQLAKSq>b%bRqXd6B`W&; z0?PYXlg9tJHG35C&FB(Rb2UK!mvNZ+dl7g$<q;crL1^1mD2Z%;$xfDYx4{-U+^1DQ zEw9>P@bu@B;ai52!J04Z`?W>z?oTioB)ntgD<kOL;%b(A>NfA|{SaTr3s9x+LqDEu zh57HA!~lm3&bJN}iyxe48FzQUp8O^j;&qR0c%4h9GYhez!iwzUUb!T#wn6IwCG@aC zmPWXwq4C_WLc&#d%pW|GWyULGbo~=>c-NP@v!|2(%M5bfXhDjFnaul&DVA?i!->*- zhj_>rlOy#gRsRjUGG`8i*qEZmz%gjR_s|F5m7x1%TUvjwLHM3KnGI@CL-#vFaEnG5 zO`N4h!`^Qb2KbQc-Mb-Fmi|jz_jM&4sUAYzzS`W8)f-RRWME&_I(BW}G<JeBG0x-| zv(~}UC==Zm!}dgxMbmyZ^PdfsFRz87yRVr`rGUkUk=^)jzC?4rEv?%95=sY*!n)=# zc5@e>k$#G&gOgf>sJtzLmoJdVuCFe?-4;Q=Ew-4jpSy-z)9|`Ve=5E1hiQq4wDFH8 z{%N}bJG>2rNqO4r=+#o@5Y-zs-tS|bjT_nam`bR-HMr?`i8bpd_W-2ahr?{24eY?@ zbok8~aUny`z}Skj5ahR)$t%5O-QCAQtDy*24Y7lZ8#xo<fD5?4djUJOZSc?VO4wKb zMr4CNfTZCz{Ms>-5*J^V?AO~aI7wS#=SUA+8ypQTKKqzVpJ`%muS~3zj)fV`vS{?| zB-DQraOQ*Y7~&dF2Fv8pj{6j~3%FBIrC-y>T{p$PA7jwl&XFR|`C!;_BeFkgA;dkI z$i|K-p_iNyVq6)Ig)Qb3S=9^k+g7t%7FQvvNSi`z2aBqP+u8KUHrB_+9tRmoqxtl~ zX!qa}ys4Gty^;>tp^{BL3kOQ(cnpNV37bK6^-+j=kb`bNRtYyGjiQ2KHnzSAr2yXb zqH_flazjIu8e2sFKGm?1Qf=_0U=)Siy1>@D--RieZ(+||8Jd4Z#&yb5?t%)q1b^R` zNqVvqMfsO+Val4*?6zhg4&QZ~Mchpg|3wwjElDdxO|D^+AM<Wtcc7$fz8l-PcQVt_ z_CpMjz|GSMXyLL9j-3uewXb$4d&-UGo6HebZ)8k*m>!sDs9=(;yldJ1Xv9<n+W!4B zOK(ubnR1-VxWWVUdQ#x^){pG2`yjmVp^mA!t59r;4X(T&j5lmmal(WWX6ENYJCw%2 ztH<g1<JT4-^JJ>o(aF{~xG~iuTUm>(7N#Fgq0V*6SfMtYhN^~OL!$(gdAIJwt`zL| zIfH_N2Vl*<srYbmt!wl9V`6KV4_OZqa6m`_slAtBhr_tnG}}|W{d_Yh`g92?O9Ck_ zWE_jm(V;Czwt`pc2%6rj9+qV-lw2_`LVd&Q;@_9DXnf-oySC1OB_wQN_r`eAEBzrj z_KpI(IBznU-?+kjKk|S7AJ8}Dzl@nkQ^v|SVjqJ+^lOeE)NlI%DlX$N@%9w-NnIj3 zxNG8?_FtlGz;5<+CGW<*)IsyFs<7ka2sUuzcIGM-h=n&!2t%xGF<-BOeN4+Bnf~1@ z!h_%6V+$#}V2SX3@G;Jc`OMaK+EI%CEK=O|ogI8X9?Sj=!q_k~+|=-rt&LWrzZKhs z&p$WA70XH?dGS!XbEl9sX-J{w4bGVRIg`fMZDLM#+IT$25*lnxsePO*dpq|sv>lbj z<%^PVv{w!#bLY*czSg*lcfp*MPYB_DiEv;yiSwpq3sbLk!Dp2hO!?7i*sweUJcbP> zRT*tpg$R4ezUmaZnPJ8vz64NmInQD|DiAx|2ZDi-19unX&`hly8gO*6Xm7Pe47oXg z9CPKduGd#K&bkmkd^rW`nmRarU;#X>9!kruCZi-KQg~Cl9nM&dr^^=#@$2OKaB@Y5 zcxBo|dh)}JF0JPs0$*jEDmatop-R?K<$=R__M~cFDKBw*L5J2o@tFO2L0LT$dlJ%E zL%1@1FEOKc|9aE7s58uB6W^D7TMpwq(_n&JE^O0}V-_>dfrf@V1(fH)J}thZ=h?QM zoobl6KozGN-W258N07&>LA20HjuNFz@%bNw_YH}d);S$lOY4gsvpEY*S0DbIUBw)_ ztZ5|<r*qLJWO6nN-SdOs&YKyq>ilAOSkngsUp<0JL9;>C<1X8=n3&9m11$XH3bys4 zHx;kc65E%gQ_+TeRyo5O=Q`z3%;$4r<clP_mobo@7p;SbRlc~p|079&7SiqI<FT`k zAuc)1GbeL-7N{bQlK9`<d5=mM8P)(%m$$>-Cpzd7w;RsAY-VD<Gus~^4Hv&A<H{GG zp}w~ZZY<|ae+MgY?{K7@{uX30ubU+r<WPzhu(5YD(KJ|t%A0Ih0H5<E+`rC#JzvCh zRC>|y7E4UaHpjj`Q_#-wu%P>4GR~S%!;W=NqEjz21qaS!^bfBEDT5xC9I}WFd2tt< zZclgpe~5b3dKSBF5Up=dCDC>rj2<)q2hR(}p$=oIV8tM+>F~g}e19_$S4g@>Ps1lt z>NM+pGTc48gsm923}mnHyu9>Z_<ia>5V8Z%WTidL;yv&l!&F@I(Hdog#$fFF9`UeN zK3R-zhJ*6DH1D`KcS?G*#eaI3TzDuw=qSMW2b|G9YP~ppek>*5Qln%QIpKBNObT^M z!^7M4Se_Mk0F9Z2na>p2t9P<=dCz^=Yv>GotP2BV8<^W`PwLEm0(U*9;FIQjj6T~R z-`M?x=igQ7-z{s{{63S?G8L&pF@x<nYYkO~Lr`I+3oUngEU8%Ym1#Na&_%nOkXN)3 zTHB}M&|#6(VEIq92r8tc{lWBP(k76X;@wisT<XynMbE5NFr@Et_yE!5Vy8uB=_x|E zb~ZIvu7W<iJG$3-A*<W0j}u1(!q3KaJS)==PicFj`gB{Y{yUD6I!tMFJ%0~|9%Qf1 zo?{l<!<f~qgY5X=IWT4N85Xi)FlR-^(~z3`!pX<uu<8Oov&w^M<8~<uigc$GETR<? z<WP)Eq_F-u6!qU)_NWxlNcNN9{Urb=XzQ}mPBwJonk+8so{A3y6W7Jc<5=AjO<H)F z^X8YA3-OYrLZtdQx;`xw%13nxsgp`6c3luIdY??QvDuZ@@i#l~i&a>Ug6E&VvFj#H z5K$1ux_%f_?cZx`*b`|y{kfTiDGp-O<5MU-s}LN%|6sdgc#omN8eKx&!E)nXkh;D= zT=MCtV7@dLt*&Ok=8;I(xkEK{`yyDQzs+UYgn_7-QpGl_aQ~0j6r9+62S(j^Bz6bx z7VC6&uwO<#5arSjGRED3s!he{Vin02eAhz1(q>7Otqp37^`JZ16Ci5MVz8=8q|s6( zSpEDeL{FWHo0D_N<W3Q8_?#!{uls?iMgCx)Yvb97gYvY}X$Ol{x(lgCicq<%4gMQA z0MDO&2DiB1Talkl8|DjasJ=Ps*|)M?OKQbilN#CZloi}<bzCsI;ED^@)G<?!ZxAhQ z#yq#(f-k0Sl&E%#RX>o!i1{1X#D@wr>`@3!`((w2->^V8&vxO+Sa+u9Jq8cVDZno0 zUV{B77tG$#pIrY5bT4io)hbS>raVntWaA5^p0@b-ttGJKf8hR@Y1G?tDZ5$8yXZa^ zbWEHEL0$fwvuKCk4tUeHvWX}+&W(*K38TMDIwbw?o`$-fF#H{S77ny8gpZ}%Ss-ZR zhv#LGQtO7g!{eyuhYQL535Mmit3XvJiAH*;v&=9zEWh@Vsg%lMi_(7ALZKF}74;+Y zv|TVfY6#g>o?uZ8Z`h|BRV@1LF4kwuAJ%iY93tEjnY1{XayN#s!px)KH+cy&SfoZS zs{=`O!7JGMmuE;8&7!w6#}dj<WvhjG60H%sxasL|9IbE?BJ_J$ourh4FDucl>I#-T zsv34|+ygR&h~J98LUXJm{rjuJIy)DGvu`H%j$Rhb9+%)i>$C7ErG^bUEX$o+(Rdqz zp!QlIPF=K2IDA<aYt0?#&dZaqrFT2C-u;Rl;=Ge|4PUwvCBn&KDV!Ulfl8j8oMGny z*3-7b_`tE`wr>b1wJW1vogUpdr-qRi+~LxZ4lthDA01AOZd_KFhsVaOfQ<Dy;H^Cl zN1IQ=J>m}6@*&YsKFOZ!3^PdQj3cS-uV5ohezL~%AH*LXfjI5Ig>X>ahW<JTsNOUM zU+MTV!&~>5+iX|P#QFnm6EktomlV9)R~n0d-xBVKGidW87x0tpVRD;G$>z{*sP;Cc zH!jJjn9QBR!-7!PUz6!LjH9|cgKz}r{yaOoM0`-Gi+Sm?IPmKSc5j9T%h;(y{tpcW zg}IA`yk9P^1GI@9?((A(N0zdL5&l>otIpP6wPvLO+*6=59UrO*<m&qf9-2%Rw{+Ry z&wbz6E-_C$^d*@_jQPbp4R%1Mfh*}*s-s<pfY&EYVYhYo%;&K^1soeplRFjBqR|uE zpErw(in7p`Gr$*~nh3)lDq^~uE+z_`OBL=<Zvyf}^<5)aWPJ$5pGu(UZ}l*&TnlH0 zs$%<}rEKVNo>jPIh`A#du|xY7iVliaSoQV=d-K4V{FF{W`~D-~5|B(Ab}Hh9;gitc zJ(<rtGFS-jz<%Id>NKg_V(9UWZ0vePQmZu-`)#vp2>h)MKl}a^=LVKgy0R1Zgsfv7 zeYLUc$~Q@!o-&?VatUs4%f+h&F{mzkmDO;r4bB>hmpSh}WK|^I*Y8j3mz%LGRy)}| zH8<KXAB7smVK~QBpLw@i2xpas;-({8pdypM%OBkmi+_9+zt&!sj57)$K9Hh@Mb1rL zvN<?)tOyr3+X<dhSs<D!;;LIbi#NuZ){ghVe&#c2t6D9bER<zc0r7Y<vXTWKH-~&_ zA0g?_K39*haLi(ZS@=O6I6PmDoI*R<nxO%t_b!mu&yS&hi$>rqM?)MeGm54gC_=*e zBvjk)LTk1FC<NuAn}Z6?=}@LU8Rtd0_@Nw|??trpg&5r5iw4|(2J1eok(jxd(xP4W z1*aBg`uF%QE8_R+Ufn5pTxK%MTog_!Usi~@TccQ5>|$o@6OG={CtY0)rEp}o8{YdW zhpkI|Xl~6MreK~&5#u9K#&j-o>KjW(k2>Mb3l7*+_y}6$M&jBhUxf0JzF>4E9)~UC z9K-fEta(a4JH+>cjfUIUvaw@n_a+mZ?)r|omu7=*p&qU@c?%~kpGcn0_o0I)a?ou2 zAX+v+niS62(NUc+9Gx`*jk#C6+T}f~Qi!C9nU7fV{8%h+xxg&@`?KTYr=jFi0(OOX zW5K6Z*12W`9<8*6*dIGx4Q~hHuU;F(MiVDK6Rs2eGc$07<x$uiq{o!b^}wvEa9lC$ zuK2Ev-<7w|7NW-XqsQF6m9eM^N)s~09bw%<oU}YUekun)s4o%6W!r*|^*lJ|oy)#m zngiih$G|Oj1Uow>Q|PQ&1v2msmR=b`caXU*sRXz>EsO1|*&>+UHm1CsW#U-RE_TTb zXxEWSm=vu9_Qscl6)lsfY^;V5K3)$)cPi7leNlAQ{}j)62H;2UM&{N!5x-3Dgg*HS zsHhZ!W(tb<Z_Qv_IM@%37rAiWiX1*QixOs*e*iD95}NZQ9=8d*;rRJ<G@av$b4u>A z2PvWCGsvIZloY9Q)nJ^?W--}~0!@NAxK=U&AEvw$lH{^zQJWbps1C&5o2Sw^{V-e^ zC4<BE9bplnYAh!qk>&T>#X9ELQ~0oh?CXT}5ExR3Y2G)OeZpQrSks4|3H%Lu&yIR^ zM6)zQ8C*~Y6w{O3B=O>GAFUnS5#+^odg)Ogr3l(I+X`OaNX83KgJ>?lTQ#no#~coS zVTo1?!Sqc!Zn%6D3e7C(sfQW<JypXd-qb+TgGzMc5<l1O>JU3So=NlqsPwW7(g4o- zsHkHW+G#XlelDAN+gGUMoPgHdt6;^936$jc9UfdY!U`2Xyjo{KTYvK%&YLf6oo^J` z-%Fs~L5Vn}%7gR2NAc{@ZP3VR1@~T~A&GbXuV3m#fxp}-BFT%I0_VW?5;=PIU<S&% zR14QX<<j!ia&$21syO`jW41HW6!k41!<)h6YPumA9JBe%aPJMlM1F^GYtlQmO6CEq zN|(cwey`a33zp~{d|9v${R|r>n^Q~F4LE#Mp6#+c14k3X>EGJ|a#vQN?T5w-?rvi! z-6P%g?d~x&w1Mx1rVPNL1_zkkqk5=#Is_N6VJ!b(B$SOFOXhWQRBo<Fqc?C4$dqbV zJt3d^dCKE+>VpAU*0ed<1I@U{>BEL8bV$lT$n|^49HYDmZ>f-Hw+}V{o<-NZOxZ^( zJq%ynAD=Fbql1YWl-Bw}FmS!dtlw9`gN{g0Mpu;HrA3wF^U?RT6&}p!Mc;S+$HeT( zm^noer&KNz%v4uP{F<g?(`=s8v&g|lR|{<J@t}b{b42Uw+0;-s4)1!+K(8+j<hOLR zU>j_MA>H+Y(XQ?6c=8>{+gS_G%?6?Dlri-4<hQ1q-&2`^UtjLEUd`WmlhJ8g7Caa| zihA1{&=1b96wdbog`;E1Gnw~XWE{zRNhecql*6+zb?`o1hl&zqV4AcPcCX-N(bK^c zIVXpFEq{sKHZCyXeJr(qTLC{k_OK7XPD0(b7Fe#Cg4Z7|XQ`2vc&oA%HnjMNGbc`= z)_^v)WUwKf*=9n$Hs#{6!XmbXw!s=sojfjolfPfGsr)}Te9@Roa<(dXe$a0=Y~%`< zH>?;pNb&qf;ePf-{e-Zn%#D<9HVNNbo<a1eEbOlqjsr_vsr?HGb1XZ-@OdHjkI`h8 zQwwmW<9?AG=fRsBHl%y+HB2-PWfn<mVco!O!lFTZFM2?Y#dvD4%^Mj|iyo$Z9)oM& zjEAuQZn9}FKC$y&2Kd1z6*o2qq0Ye9Y}l%0aBxUH`@UU6QnS7RPCIHg9sT7@6Ti)K zjaq+O{OcBvHv+8SpZ0ZjHMJBpo*oj)l2!`}Z(UJVOk!;IRcJdcO*CdT_;qpiL^nSp zx7pK|l?B*RYm0$Bs+ijoga^z7T0Gl?u6$k$(gn-mMdK3~Q#=qqDcit*f)Dj<>`jhf z$=w0H*vAJ^Lid44@ESUStp9l7&MVtlk+&o2S_GrZR97mMAHv4Y8!l{H@c|CD8R3d# zk@Y*Yn0-H~iJPr@qo+{<P3GL$6>YK9dfow>EnMh1jfLUss#txAK3&%Uo`Xzb17sp` zdDCwgD~=Qv_}7UkQ_s6D*bnUDR?eqX2@rKxB~yNN7B;y?(YpW>dg^~g7_z7y0%w`B zX^+N2pAz1I;hCUcn<DV*GZT7U@LkL-sup&72jIz*`B)Zm3FiKu$K*VKvW<giQ`l){ zV$R(H34yfy%5Qk|#0wLDG{WW4x7mN+y2LffR`7R`C)S-ELjCl%vI}XdXmvmx&%Pf5 zn|6GFqZMB0bZj|z`-f5P!hw((5=LM;luoUD#C*}0cC@@@#ZsTe$0M#WYx51zQ#gx` zUK&kt!`{HVcmJ4*y9aJvoP~0E=b4$D5k<X-fcV}eOvqUZnR5d%_1Ofh)0u#eRs@q# zn<rR|@TcIGvtl#nio7kJ$9A0C#D33LqaFPhLc~%N$+Ty^DetZ&e!5o00uIlh>s!}K zZq#d&#iHM^s&5t!HG0H?zwy6?VJT>nREiUJErX>CbWkbx4LD2=pojfD(6-|<i}zp1 z?u|;s-IeEDvrZloGAN7EBidQ%MMJ9NUH{rHb4f#EDcNNfk)xf5xa3R>;{Fq`V!<eK z>&xKu+1c!sZz*>DcS~53@rv~-OGhbFW$Lf{3#7CE33FeKko0y+W_IS4OlImMR+aik zvS*!=Xdf&s>ES!V(0{`v2S%m4e!5xY`jK-$mgr0+@l+;@_u{PIyCT@_Y-9cGF2k$& z^`h>fAJBJ^32hEA<8D%SR&3S>^tMOQx6=yj$VL%9oy?*h9F5Mi$5EBrW8w8*&b063 zZ~mb-*x5gWXugFS1qUaA-?@QwiuWfpwLd~r<#h6qZid=O4cxNSoX)0};3`ckmR~<Z zkUUgJ{ON{{pU<*qLzc3~+ZAzdoCSL^*M&N-zGR=0Zi_wNjxbk}rA70z@psX5TK~Eh z^g<Rg=jb5p&b<#-hchU;rX5Z^X%Z%F_rpax#Z<W=h~0>9V{O0X5reKntdlziA1lPL zE!@@6DlKfNdI`xcyhnU_4y?*bg&!UKj`LlC9D4a;EXdPRewJ<yQiL1VM&kv|Pq1Lp zcHx%e1=c(!8sGaf(dyTCIQ}Xcr_D}B%PR(0|2v#cS9QTrtY$whPGIY|TH@d~Y4{49 z&-AuGj=sN>WjD^G<G)QXWkog1Pfy41BU7m2s4d3q^u-e%1<2B}Xx#RA{PUDMV7OO6 zuX-bx?6w1q;{)(sbs81jf6MNA+d_>;9XwLs1?qEo)-=P8)#S>6h5Q_*8exE=AMrch zW<^pqQD$F`N3!$(>0)4U64jn>VUB@=$>n)Am1_NA>qaZ0exf0WfxRKxSdP{mcmeOe z-h!5GJDEqkF{}Bz)Ai-v>uk&SNY~cS%K(qu6L&T_Q^H3dEXILiz#m;Q&7BRW@}qIi z`9h4yh?1=0J5w_SGj`y72J?4P!Qy)Y?$VD26*pjB%kDv+ZMr;*h-`+P7d}6dK+{4G zK>N;n!kGb5^lC8o^xhakA7)I(gabY7`dU3YJ@Ffu)EA;s{Zu^2xnheg_DX&<TVjIl zIA%O$09|@!PWGd6VNh!jJUMWPS-A|yO9e~VkOO@wPhA?G=4^#je%^%*V&L0!NnBKC zM2FVPW32u|Xd5<x;#_ya_r`%#9QzSg#9v~**70cbKoRk~KKh-=hmp%o;k%|Dt=p4~ zm7#ONOeq&t$2*h3Xch5aqzYNamw_n_5YoA?-s^22jO#N8_Rg4yGlTk)<@%p&>fhsV zXHO=+Xj{R0m8dg=J!A0Q^!^yv>59xch&C_c^NgE~B8ZhNvRerq=6loK5(Cua=VPt& zRN<ZIi6<_))0E}DB(p9Vt}cwn!2gWt#azysKC%h!e~qEigA!(NY@T@dOrtnwo*8=c zS^wf~-&lLJ4N)S`whw56dFOp-l%fjuWSL{&vgxGRrb!MBqiICPOnmF+3e`MAqbYwE zG=o;d>otYq`l_dl6z{X`L#NSOo`GHLZA=E-+vIoA76(X$kZ$ZiP?9e}qxhk?{?c)A z=7$kv^+cT%CQre;i?@g?HiqNHY+d%jdlaT@ONQw8bu4vG1}1gfgbv<G`m)Z9p06}y zHOtCak!K@Jw>>FVDA>{?mu$}2*9FHtdzjBuJ+d6J2B^CVo}TW6T^$<u<=s87J*iCg zt4Cm?^LJQ0t(cXk#(+wO0ed_n6Lf6)F&jB8R#EMM%c|eQps%BF;^rEOwdHH!Qo>SZ z%YP@4O2uH38-QWjZp7y32$H_f*yD=L;`)HgEKz$Gd-g(_mS+0!vswvLubc;)U&BSG zSuY^g+KnzY>k=*)E4no|K&st(Sm<O(+5e_;PTp^^^`<Npus7_8Forf5<*<|XX4GF< zM)EDZ7dr85d+5YvFnGvKmcDDZYrtVSEbc84PiIBDZvJI}t+h5n$ViF!xvUVowNz;C zwGB+>dH~y>^h%W9^-OY6(}QZHE<<naZLGsapuq=K_#S^0t3IAbpF5tzv4utyotDm4 zeHl(hWBwEFZgOC5lVWg}Ly+Y9=~?vf_cdsXkb;lN!Q2&HF02}n%~{RbViB$oPt~YH z^ua)?aqmr4K_8&+>Mf%G6n*-=ppFHuDWT8SsZij1(e+m6X&9WCMgtnv$*zNYD)iLw zbe9wvRrjS$my+nXac@{NMTzH?Cef8!&q1Lw01srDvv<Dn!jsEWXubU~em}S%x-1C5 zpCb&B_R67Ki2`nF3>GHGW#Re93xLfxq}QskP`0@@9`~3H={)aNk>-Jk%n%zNW{8Gz z72=EMGA?TQEu331ir!^9Vz|!$k_&O6FJBF49Pj)XPt;}q$uqW|_W>1A0X=87!SgJE z_52yag0e=!^1t?Ugzp@emo0Ry(~hN)X(KRA97*#se?fZ4aC$#?JFBa*qSu9SnC_)X z^A}f$n`irzdRQS1TOLhH^-gFQuSTI4{;@vG^ia0+J8T+aj-C$@uQw}EfRR7y`8^c3 z<(y>JZ;!HhM|c-|;a%64A)e@UrJO1K)WUDq<mmaI8W!Qid6HGjS;xf?DvwJb&x>c+ zS_@0k$ym%hwa4O!m)pbv;~T|`pZn1v^I{fZkjp--RmYQ4`ilFHE))}1dqQlZJx#J! zp_qyc3OM;woba_2&M5W8rHuozc}bHX!?{!C?n{}`M|*tRvYnZX)W_3r^|8O%9ATBu zJz?kWzfc{cMKP<JVgH5{oak<klJ04^Vd*&*XLLaP^h_3Y|EJI!vsL)}QwMj=t`%NO z9)d<|91V`Mp`)Xk+1!_}V8`^s!uwIGWE<X_|LkRwqN0Or-Ks5YO!`If*3*aV(hD6_ zUT93V()v`PCXHsCL06;yhYeL-%%Zvk8trpUaQXL%y;fKb_Y1eMybC~*YW@bc5=il- zJT9HU+0G#*LT2BE@GNUAmHuZ=CEWu#Pq|y-vcHJFrW&GBxglPEnvK%aCQ;pAWnpHD zH2#hbM)|`>A<xhfPb_@Fg2(KKYi~92`>b#@P+cGl=KSKmzhYPcX9ivQrsO(Ap@@d> zk;NXX(-6Dov{*bM2_~O*VA-?of$VogkAQw8_k1SS5ASfj@?D1SMu2JaowDK1i*Wn+ zJ6NCn2w3wDXgq051z$~Qt6!BkdbU7MqY5bFekEJq5lL_2UkM`<4C!|1Bc_)z8d8ih z@K)Jw!CoX*T4qMaPU}*!fjstGP{}%Dj4?N699!O~$+XW7Y()Dqn4jQ?aleLPUhna^ zHlq&A4gZQOkLsezF+F@)v|lU=J_eaNHDbidW1L+&4ZGEOZi?jD^(0Nw(@TV(sxwJ{ zhYB4Wzmi$z-D9nDRH%4QHhH|)$B2QGxO>_GkB<M!mRCxPuQIc7M*cL>NH>mT4onB@ zThb`oEl+Jma=0qwCHtXaj<s)pvtaq#g3&NllxNK_`NJ0oG2=TCC4x5RQaCi!m#S{* z;pWmSY?xvM?oMqK{O69x1k*ntTQGpCFDc-Jl{+P8##++gqVaUVFa`H)S3@`R40>{D zCgrN^5i@MV$!v!;WxfPD?)6A4tdS*kl?Zq+<R&PD4#8iJQ(6DXLDal!H@t0=qmMTy zGVO;`>2kRl{aN9N&#m+Dm07(2f1k2dKNM*8wgMQb>ce!ZH?X=CV=7!41^ezbfP3&C zmiz|Dh3^6r*Xf~dejpv(K9u&kFcvte9C92ju_*Z!3y@VN>r1Yfik+@CAW+49IgA}Q zk4bN+W_b<s;EQ=0I|5oTL&gZdTPdSWRiO}9eoDj!71BPEh6Oz{(dH^KvsaeXv$Y5{ zqp}&VgbNB2-@&fIGf>A)mNqX7L;Z(IFqiW`g(t-r^G}|;^ghAnvZe5V-vy>m3nJUr zD0~{)F1%dt%igUT%kN1(==`?;|EQX<<d!V*md(WOVgK0wGVdllr`Z*kfoK2v(uwk7 z+IltzhpBQdXww3w_i`$^56)u`>b&XO(kwF8Nyg%+1X8)S99};917ES2?faZA!QwB_ zYgYxk7$SvQ<VwQY3+#F8S;7BmBu(O<_fh>*$+mJfq<90C1?15=J5{{+<|=%jR|cuI zAD9fAfp&3kAS?VZYcr`~O}#DgZAvIEKC4IHE0SUB82(<5Mz-Un2G-2aphs_uu;ZX2 zsYs2$iS1Qz(o`E2;z1ac>rcY2U5spgG4sRxd+_dasBEfY%zLCTbfY)cC3$1zMSfT6 zYG$nub#Sqo7na+a!;3aQG+#M`$=110ex)(K-^g9yt%Grqt)oz~wH*pOfr1-PvLjoT zLi}=j(i>-sEmalZecS<@Zkpl7wT<jY|4BlhKZextAc9_hi@`l@^1R>K8{Y&fK#zhi zrD}QNj-^YP!nsgXx4O&v-VLM0PN9_ikLMf;mjn1G(vI2-V8GdHeUA*KxtAZZ*m4oR zm=#fHl`=U!TLTy5_Q7oDZ1!K72`yLVzUf7VsDHQ!)4GkRBq|cB!pm5FyF1>`NX17p zK8PkKuR`S*2VrxCA@wuQ#(rm>3uk&8(9t*N!EgK?rlvd#9hZcl<sePgAmu`D74uoy zs3^QVnlo$~+Ql+WceEelO%M9S&<Di}Y~JJ}V8bS}D?Be9IY$XD6*WTj%?MhzOO;GH zpEoLd2!98p;G@sHKkbl7`frAy*wUK{XFYT^{MT2!Y!ZSm;$K2zOCEl-97ne&I+Iph z8>^aC!Tt-fL3@8EdeDCu#U#FFxip*I;Tg&Mp2}!%nT#^ImC%1!3B`X^LeunGcCG(Q zsMa*0pIe9Hys5)!`tdP1uR@*tjSsV~(L-pFKj$y*2|}5C12}i*4BMTp#WS&0uzq<F zZK*ZEm5ur^)1y?d*uEPMZ1uol-J4nb`9yKVDb9kAR78(4n_$iONw~t<i@cWDV@!T0 z`y2KT66<a0=g1(;9_LSrZY{2@i_&TCf>dlQ*#!E#dQ-kf9b2t(mx-xfm@~HolEW26 zjkXN7&g+CA^DTqsye_8hcRXA0dMvHpXH8wZpF@%QCvkd*0*1uI)0G1+Vb#HKJTp;- z{O+cpTx)-5TCaxYjf3#SrwH7+dLsA6Wz)`ylkh?QI;ZEm6xr>$7X+u+42;dlrz`sH z;``g*Sjvw_aPL1^dgQ5&Q_SxQHaq^X#xo<S>{$+u;Lg?u=ek{uTz9fz4+pTI!vW}L zFrBpH-a(J`Kj@nAQn<&|Y5(jQ*nVsVT4e^{k+pfK@;4CkdY*`%J)VMApIvY&tsf3; z;=8&iUGdzrJW5yn#+F5FZoJbtgIwMB3R$;!_kUe31!*Z@*y$@UC)f@Z@AIxpvk7fk znoirkPeGTD*I1W3pi<E$cH`e3w&f#tr}PWMqSs4VKVKQD8s$YBrGn|rVjG^x3u3=| zjiT8l1L&EvIVoiHqY27=C8(K+9$z-Fnm#IQ+o<96m!A(e9^|pAT@l#Dy$Az;DWmM> zkHUSgAovlO2C&$JX4!TL1>@47{^Sl8HNc%7_qS#LN6~q>bNPO8ToRGJM@2|>$js+C zr({%CDLWw}B1AT6d`qd6_CV6m-kaw>MO$g7luD(ct+dDQ{`~>2%jM(qJokOh`~7-d zwEGPg^IyR-`#BKj-;Wl5o=M{_7GVBGe-s90<Fxzvtffp7dq?EqCBKn)sP#T{JG!9Z zoeZ>p;))yMe}m`h7U7^M4Zn^n(Toi#bTm&BzixHJ+2^{!|Hw)qI5-5|?hK>}IgQK+ zM&cXAL+pBI3LTegW05(RMT>w4{QI8_c}K5@TopUAv(RV7qujt<h@#iehGUWOH5OYk zfo81P(vbL{3b~cmLdX6t-i5j^eEoZy9g^$@%cq>3tM5Wv&M8p-QUf~oJ(vc@AuU|> z4}L#rWC6C>6tbQ(v5wARm!yl}-}+bJDy!TO(f2mojhIA6Y6-a6FB+Y%KNVQr3h<~t z0dp7fzLm;P*w}Xpez!jXeJ9D|nbIfq%XLPv#>?y26FzUb#AnK>2@Avz4X$|Zd=xc! zxl*>QIxCo<M*~04gn5PE1<P@%B&NKB;<ysLKf6CE40fc1Uz5fC?Ftk&{RX^{AByeD zUj(CFdstiXBC&Kr0XcSUU>{ARIg7oR^N)_h5+fN}vnor{R@%tA6z2)vCtT4&LyKL? zEWk%il~8&ofIiR4hwg+N^v%+upc!}BcfR>C{cH?nXZm99sUE>;zd(1~%2}Ci2>H&g z61&~9sAuNmhVPH9>Gf~!M~SWg2Xx28M^Z5SM-tASkbx&7rlHh|7`Un0z!o|IYq=u9 z?AQ^w!q9}aawnFCelDwyDa4S^=fwxhlW136CVB0R$8TXNI9zTQXIZK+<85a|Z?{&s z#QP#Id<!vi@KbilO^faI%f{cf%h{ELarAJe54*9th3y<t2xd3!5jri!V^@B_6|W-l zJi3}W?w!xdE?c1eymIJloeY`{(<G}bqcD7ZKBYX?fK9!R!KMY}f@Zfp4Y=aU%+15F z@k$K)9nrv!7kRQ?yw8Jkeu9US7QXwmndL7GMX~QwX3|f<^kiw0|2mzF6fVOZ*S9cz zW-w<c*}@dNw=lR`6s_Pl=%uHjPKO#kK5!hktdJQSJ{6_XH0Zo}Z>lWd?B)sO>_fR8 z9eeZ&7UxW)NZ;A))+I}NW#>%8h73cO!d5n~Y$85bF;h^ud<ll!;oZ{&b;#0-Lz$oE zczLxit`AdTX2;SXbi^$<7T64l<@%&$VMdPs1yR<sUYKj-PgeU!&|ZghOq{inJ&@9% z>-pz}t-q{s>ew*!a0^9;i__WYi>G1fvXK}nn}LfWwy~l2dQs}Pcsz1AgZr-C$YOLM zo3z&$6EaF@@mL>LSym`kE^%)#8dD7ko1E$Gg*T$3+Bz6I`3LM=Dn~;M&%=VX9yHW7 z6nCV^iDApN=|fqj&|mb&!eu`o-))=l`Lh(MY}F_8-l@>W@1P2o){4bEhjx?iv%@MD zu+p8rsG`=FXV`9mpJ6mJSzV0lmlmQS?;sTK*Q1Sn^)a8N;s}jkap#OU*iooY6XbaB zxNR)jN+qG$YI9og!W1u->49<UX34Lc-E4fxOyNnB5e<C)MfljS1Q%axfrHK4;AZ4* zA%yqy9mGq}?LP{8mkuV+fAMs1=LlxpzX$p@$m94cyLgvY4lv+0OK;8<L0Vqyx@eE> z%N220V<-jR8N+jww&?vc9Zi=YdUne3tiBPf-Im2xOrM3GuFqNMJte9eYem(k!g0^z zKCmF&9B-f4#cs+?MCZy)5W8rIWO$keo4X;9EiU}UG|n61qc=%3DCGq#8)${gFUye2 z=lN{JuROeCAIW<hU!llNo2J?uk+?A)`)m*7{ZnN+sKk5t2HDh$mqOAGj1*<N-BD+c z3Ou{|2SOqXDY1gP6_^#~jwv9kCd3^t^KpMws!(@pJjIx5<Mnfej1*<?4WF}BnNL9d z+F|(Q^*7e!(VLz{O{Unjxr8?a^jPL6Zky8w+qQEa^ymrr8RDtV2_WcEF#1?&<4C_S zatV9DEEh!5DZaPvEV98Ba!*C&rGYfzODvNfRK^)C?wD$P6$(BQJK(B7`_i6+oN6Zi zyyhi3WiAtb@@#+ZTzxux^scbbpqll6d>?fD_25U4JX&8rE=+u2$sPtA7jx$rQkEb? zPg?u4?Rz{ia-}By_w<mv4(GR@I^s%G@62U){+k2G4Nb&X$HK7x5`mh!ol)};XAO+v z^VRE>>}pb99Ny;*>?|*9m>@HftYW^4G2K}xiF9EJe^gO%#T@qYv;tm8Th5C14-f|3 zJ0RJc@SACV2|*QeBMNkh0r^`2wC2NVL8+`<v~t=612wd9_!;h{+mqwoe0G!2$$JYT z&*S_lHmC9Zi!iHEjm;ay*%Q|O`0Trz_`ATCR3|0Vg~kpTa_}$oDDfP})-o8#8Lgfk zIdo`;6egK$Zs>Vl3ss^f)d<sJ#;;AR_?9urHh*Df)6WQU-kq$*qaWTq?+wL)e$bR1 zKzDmLva$YSXt4SW?Bs0azxnpK>6#^e*|CaMTwcspOd3j8hN$C+gZ=U1!b0rxiaTz# zmO-(h5>;$$V)_RZ=}q7h<`Wi5Jt2;GCg?e6x0W+W!Oi+>Mj^EK_+(MxR~U2_j3(2s z4iKt3ib8F^K=QHy=uxLk=k4Xm=|wMkb>cl_?ffmYZBFI>PCK|5=*2cy>as1nJJ^-y z=Cr!AS-dbX2Td*@M70{ze7gwNF}{NJ*>PNQgUspju4wvuCxvw8TG6?h8Ej3s9;U@Q zlm4C+V#c@8H2BsxkbAF!sh76FYVQ!NDJVk!pZVY(_(9Z6m%*IMrx5I`j}y#Ku;V|b zlE&#{qOI;T!D2`hZh7(<js}M`j7UnQSGIld^Pf>L?2CgCY|)#t6iV=2pKfpxW$94w zW>B3hgF9-xaN)BV%;H7_9G;m-hEH<Q|D`ERZMZFN4h}-a&)(>2y+;^uY$#PlXJH-L z<50&WJaJ?)9hj~sK8qehliJ*%&u3L=uD;5iC)Y9+`$Fb@X&t*5+`|~>^$t!@M(~`7 z8IFngUsfJm+#-i@MV6Fqe?gKHJ(%u>L}S%I1)}TnxcRXUGm_6^t;R}pPbZ9Z&$=q= zeViby4@;!xIv1*XQHXknCsNJc@8Sn#U#bul!C~H4R(~{&95&c9+x?+nlOBv}y1STy z*9cUepG#6o4!AcYi+1mL1c~?bDEzw@X&$wQxV0{DF<Aj$C(gjtW$PsgSxdk|=#faj zjHgSv-+9hFhECWlfHeU(S@EPC^r?C%Ip+A5rLOw{GG6^)OSmdMOpBsbUf<mfwI_mi zoFj<w7a>?BrM_-LFdldkO1-BWuyy=Bt9^csSYW>kQa5ReefGPH3ij>LGwWmhyB!UX zKCo5PIF=3pU#rFR|1@zz_9ynus!Lc2s^TrxHn^Chhc5zBNlLFjIv1!>X?q)s=m;V+ z{`YLVFd9x)USj*>lmut}I`)_E?EAb=#{=q@p{6~VyzWoMgDF-*wD)FlX0-y#d#X=c zJI}x|0|^BeonsL{pE2=^36(sIB*%Y92C{z8R{4Z2k*$>6Jf=)c@1dw=7=>1yLnvt9 zUzj}zVS3&Y7JQ~2&VQXrXxbk=D%{wdR)U{0NtjSDS{V5_2|5=!V8QxcxS;hFjQu<t z2A)VpyDI|q95bT)jOXHzX+tqJ;5ex63E(q?ZqQI^WW%QBVe(RS+M6kmvVS<n&y$Bf zEzxx0$QY_*QuuwWmDsJ5LS=jB0i9khF5q1YiwpN4&?S~O4z~pH!AEGC@RDVErouz- zBKS7y6C^~vWzv=z7{131$1W;@P+4m{V0V?Rd!GOUU-F&HnyWlN<AJ+u{cyKaB;|yp zl6U9=Hb`xx=(6P-T>h{c&e^?XkG~B8g;UWW&2!j_t4HHa-<de>vk!ImG{T&&dt!Eu z8x1aa&76{wsQuPUNaW9xj_LYXY1JFg+vUUfZ+-A~?O|AQ=pT$x|Hh8<-{)}PcorC{ zOpjMhM6-xVI6V9)B=;IiPUj58DA{nToLfu{(xc$zv-?cr*addGvJmgYSyIOL-SDns z1l8JGG<>#965=@1G%D{4^9aksH5zi1_^Jco$xzgDSE~=-;)MHm4`V&!lcAV1UhnQr z2JgQspree5a)m&LZz!Tupa#v>%ElQonWWP+n)=nL3lmTFtxu5CrO-M1#GY?&8p^Bk zFu+a|#`lTEoj0SII-eu|%by6p-<5#o%{}n(go@x3|A{Sa^P%zk;&AG?fq4AN1Hn4j z77fe1aoEj&koO>&?rG*wZ0;bcI{XuQHy;7R^IPHKq~EY_lOL`z+a^4{`<3Ze@$+rq z05<WJ37yNH({O#2J*=7We?CwatfK)`ekh#Tw;pGn&ckrYMGxE*jARrx1)c1<*J|@( zu~NEI{M)5NJ!#!6xsNK_F?O|BJKBoO=lo-1K1I{ox|vwvGnG<h55oD9M^Lt&-^168 zA)5zwtWvg-WvDr_eI7%|u688n4v)dJ7lPTI`7?3;HW?gksSoEa7UIv%o)~!g2D@Rn z7&1P@fpzsXyu>?s!TV$JkP_d+tu@BYMYXWgYZ3xyCiBQ6+*lqDs!xmY?qkn}XENN4 z#u;~~e&^yv)`ylQ^r3p|Sekx&1aAD}$DLkEbTsxLD=r-gE+4#c?6qd0!{34S9noX6 zw(@%~<D8>$=CsRk412Yb=Yw0#G2H7I3>!RJ__((ko=K8PCVdu#MvsTN6GkHN97&G$ z0?ERYo0zlVU^<@P3|qI@U_1rl$W|#7b^D{7Lmqw_e-EbV7GUiHe}3;XX7e(&xo_jL zdu)yjmRVJZAHC;-efdf8*C5`tTHGIR)_h<MgX=_r^A{e!48RQ~imYhm8TgUugntc? zwr#Ma^6}wVbZidXe04-n@-)S>R;M9zmlG|nyT%@=0{dK8&eDofaXma@hJIP3xOKO% zd-hn8wAhd_zjuF0&yZa0AC9Kd*3^9V3jDC^MPGh9)0w77%AS^oSC`3Rh5KEWBmJDo zt&67Zn_jTI&u@el%@8!GTn_s?@@e(BH|*(%lkVN~D%g`!Wi~swSXg(D!5q_6q?gUi z{gE~i_W~EFEdk@FE_8q0Q>ba#%6ffB!U21xK*Zd^>>%$W>3AIHyvrPFsU3rX9ZJ|T zu?=p`N)jT6WMSWPrL6QdP}u~13i2O8$x9OPP19t2I!^_QRjldOib?d^bvXJP4Z{T) z+@II{gvq??U{5I&Eqc5JxgXiI^`H|z)J>-2?kf0_=LM>^MZ@7kYlJyZM$o%tGhycw zcbd_$lCA8DgTSBR*yY<J7`@e{{Yw*2sCS~B*OTbi@B3_G3!h{9Wuo4yNpvZP&tJ~V zqPSqTMBnKid$F~?f%RxWR*WXP>BZ3OX--t6^B4B2Y0^xi2nakLh8wqg;)@z(tedtH zTH9OUZlW&c?RLfO&v+K+!f1|`OvU|yv*^K<IO;W{fLi`sWczR&Iur+i<GdD-S2`kW z4OuF<552=yr0$kH;2F%>v;hhWc#eE@FkT6r!{m~q=)dbPVSrLR_4H4pc+=ZL>M~>T zQ(Gie^Zxob*Xj6Ddxs!r>qg&IB~&{rroPawH#^K3Cx7QIXULuHihQ?h#^>cOr^blp z`_%-;Zf&IYe0Vbdvv_-qF7w&oMmd$wpvZDCz4|*4$ImoE75jl;d~!YLJgyfrMmf-* zB?HC0nnMurM~?cR;oQj|ob%Ab9YB?PK+S9vMDCb{78O(IK|mzE<V>TF8(Y|tj%#98 zb|g({uZ3%#OIhX+E1J={l9kPs;KM&NpyHzpdGYz-i5EP-c4!A=y9SD%9u8oOUniqd zW4#dP8A0C$o3aP9tZ@E+x)c!~L^pU>`r88D)6hw0>LcPv?%sECSC|^^n^^#7J>*HA zv#vwWhM}6T1^RhR!|7JGY@n_VZ7|=!RNFW^>_!Y+8|H!|yL=jE*G-^PQbja5VhVk} zZNyf59}l~S<_ms72O&|%3qMY<=1gl{R(<?3+xESKO$koNaKE86r7;z}zC31@vL=+U z`X`KT8^n3i{F&sJK{EV&cAIwyO-E0mC2x1Ir`z;#SKS)$<Q(04_6`pGm<PJA+u_2T zEHUa-E@Q(~DSz``w!6rIw8%m{bTyw!9meC8-ji@&WEGQBS;)exCFpU^n^uMjRKat7 z<$b(x6yFmSpSB>w*~+;3`&XtB6pEAM@*v($9le8<P;S-=F}l!_Y%7A;Oh;R~dvqin zW8;{~&M)jdo@0qWbtzsh6i2T9#Zs=%VOJM~)5I1Jn)|yKwO<V(IfEqn{dya$YaPSg z0Uy}Mj_J5|Q!HDg!yU;3X5d1PS=eJ|hX$L+P@2U$sGi)x_CDMU`R!6{aDgr!Ibn~F zc4g7F6C<c$ek=x@31g-ErqPqB16g6yc+Pl9W;=9ssd9!7{<9y69)9D<T}lV1mL`zA z4@izid*g*kli|!s{+!V3jb~+x@S55+nE9#{o^99ysmJnZ^K5r|?d^#HTlnWYxmb9i zQ~^HWui>r!Iye_-i0(G4!Ssj;>}sCFmUbZ1JQYo29`WqQ(>)N@M~7TqE)iSf4?*)W zS<0$8554$vdvj70skVP$U6yrh#Ezv*@xu}!Z?u*`DehDfauytyCE%k$6L=qFr}$?p zXFz!M!lT#w2wEDU<n?5m=z6jQC7Nl%GO6dF`REtueY*#)R7BqvCsDBHRKaHJ56FTd z+QFw9FV1nkab^h`|C|iHqpWF*9MF-+%}fZBr>zH)8HJOObnO7#c9UTHjSk_($Lr#Q z%4}R(<I2+S6_Q@l8}@QfIoo<(4Tn0f6L0A2;{M(f2)=w~V)Jg1eXRuh)+a2xJP&vG zcE<$H$Ia$<tv8-7l&Bdanr2%tpKGzGFWU!O@}+6jqjL5u#2y_V1)xo|6v+M8!Gd<B zvS-Od&|HbTe$qEHqi0hvu-FxAzJ{}-@8lXrwBKMMoztlJKklezhlMG+el*Z7fD}Gw zvE)DRVbVI@{n3tQ_0}cgpRvxU-Z>2iD$Jw_oZs#`&<MS(!>BI8ljdDC#^$7r;wrsj z)^K$i%Jv>i{b4G)gz-5Vn4sVoj*Bl3pq{?^<dYypMJu+5S7e`ZX3Rx6ow8b7QuIv_ z+XB!rv72pDxy}wpThY*%RJN|jj(4|%LE<_J)*PLTZ~0!~t>r-$IOY^PVX=v2zH=e> z>$137`zOdgQ^Us6d18y(7Ff$OIlY@rgvR4nSiqOVa8R&7$^8&0A1s5bxBp=4t7p+p zQzKlQ-38vf!+n&q59b?OGsAJ7wBfWh_5P57YDcG|W~K@r9=MV@ZF9h?wjbh44|R0z z=f{+145h~8A}m$dAZi;eW~zEJcwqN*40*Us7?!yWR@9Y3Z(VnaO~@Aq$Bsb{+wJ1g z(}jZW_&k~<e~UE)6qC)|gK$x+7aa&*EA;#CBs{<pagdZXRtGy$({mv8T)x-!xy#z` zs^Q5dDN1uPWX7(;sms?&Sh#v5yKm74UYrg=+13R@_npzW?D<N#IdU9Lj`YQ`UY2xx zoDKv$Ri)<ttVsW;gxF?FOzxNsKNECu-JTLU;$n|A0V61GLn!7>>P_Ds2jcmJ!Ppgj z6HML&lm2{u7E>D!D+;oypZHKbvvC&v>SxY&^108JK09He_8D>Ec~7=_bqW<;XoVvq z7qF_zb4)`4FqQMb9n&*m=BI1$r|KUZc>WW{JS)c8W0ncaH*zjv<6E)c;#Bm>{==I4 zKZU)iL-9kJ9+VvSVFvMo*~)naIBB6D_L|3YQZ7muz`Y~2UBhU$w=P``567E255TGQ zAFIglqj8fi!uz^%7HmEhZ>fgS>3P5F&nJ73qlG1=@N>xai#M3GW?%Ng(}6OoIjc== z64iwUp&#$!Sq}&nEcY)1k4?PPwqi1P&e;csI2`q_$CH27B6e{|EU742F#qA}1*r*g z=+J3RlH}>s=cp5DO;qJMCp*+jH9*ze$-)+oNw~ny6y258VA!GI_~%0vxLQo1bCQ?h zr0_<V)ZGAD?M3))?ljoE;Q}+c*CE`Byux}pxnTUUVK{lvZsujh8L{GMl785Sq6TEs z-f?aa#_x<z?-|g?UlvfiycbHdDtNzW6b^O!$)>GHrNZuWlI6|qOee^b8ni}`yst5A z&q+YrQTjNg)(k@T=JA>02Y52JAL{T~?y?e3!o_b`6wkox))$F`PGn-}yFy$)^Mg?0 z<^UQ7Wgyq%gd=pCp||xodbeK&OKtzM3i)0bbXOaHbPdJ`89DrQbQ6@`m`J0eK8W*| zq|wW#Gw92lk1%IjD($_=XXO0MvQIV)hOa&(PP#t<{<?Z#m(u`z_1PLb{tc20iwmNa zcXJWmgkqAh2Y4n(Xoe|(RNgR<xl;sE+|jdk%Rs7pyNNw~IGy4P_p#kBRZw}U5mG*m zB#%S=Xi2&O`8V8y7MX`^9?uniT`A>mZ1!FVYbeG6Wft`NS`7>4&gP%~&a^Be06XQj zvSTcV&J^#F^j8f6%S?UzK6H&>Z!y}v^lE=f*SjoOS-H^CrrTogH_l|dvI0)eS0)cz zb@Dvi7lwQE#z}9Yh)(!5tg{@0XLhGF=-Kze!#3QV`ql{-?{=o;Jrb6jore7ew28+K z`IC(LPNv-mLSkMPzRmo}>ZWqmbE_|~^SfEYSp&2j?#&v5im+r@ypX^%J((%$^iJbD z>t5K8CZ}d`u-g{cf6okCFIk}8xmmO}nP+Etx9sxfUo0uc2eX?C*pe9{Ykt<rChzj5 z$WtHLen;*>Ix>SSqE10T!zuWY^_tN?OX24{#*W@97B+78r)%AJnOaM*aGDiEqSQ2~ zZ$n{k=Vl?+<Ryd}yHkf%J<D0OR$Lg^1zy!<@bIQTt@7V0jEE0rf5K`&S!p4=B0m{+ zZ5)b$<DBtg)H|3{9S(<bv~Xm7HkBO+!%>T4(ZA$8Z1^(-e{i1QmKIl>^-}{ff(y~< z-x~Jz(I6<j8v$8Xg&24|f-IU0SmLofC_hqw({2pKY6CC4x%wgNR8L3gA2TrNX(-uP zxRLLhk+^M$SN+l89PX7J#Ac+MVOG|7@E-A7(AL$b%Uii0cHq#4^V!p3dWk$n6j*`A z?<}Z$&BP07QF!W9H@F;~&oXw}<J|9K(b(=6e7W8Rdkf^KuegON^p2n_vW2vM{udZJ zNQ>SdUBql`EYQT7yJ&whrYH=@p@|`M$uEuq46>P4k}N%ZBt`S-J%l<;#vNrDSYx^k zbcXGMQLCS_On2VR?sS6G6cZNeR?7@kw!r1RYWPedi@$UIQA?{49+52_SgwpmFHB;! zUTf=f)=p$kYg^#@_H8id+(7zWQ!Zp!1AA7&wY61cFfuDivP5tLwIepdkJt80;majS zR8=LjUnGMp%%7BozF{}wU2*I#&bUwXqJ{bnuwtbuJ}$DM!Tz!2y0(N89X2s#&cc#j zn1Hp+ha%NYXp{YQh#05{MLl!bWPd$&Hm)yPr;UM0%!n!Lt1*LX+u1kPPxw+GMZOB0 z8N1CJ%O4$w<+piGc$1-+P_+nDpM{{Sy)AnZq>F2g$53mn6B=kJfov~<_LQGz*QJ)R zfV~Cyi979Rej6+>I}7?7;v`xnis%`}dz`=8#H0b7`_1>EVeS*?ea&;YRONt$oZVJX zFESxl5nqgzq0p>4cw~G+>{(@wzbo#uGsQX3yW9hObQTJpck}to>!n0WJBoCjvq9<m z7B+Go&-AGcBFQmVlHPD$nAIE!zSHFC&maaHI(?~ZTMXFW89}#q7h@%7N@y;>$Zpsb zNKz9kS@)xXRQF;Q9b7PviFgTeOUAJ4rf0#YCY7!fdqBeMOzv>b#bVC$+QoCW`IcWq z#aR9~-m61$#pSU08eq;@8+17etmyVM(&Tx};I4S#&^jf&%`*inSvgGnc^j^qPNR=4 zciESuzEtaT4`y}kg?o9j_-^S$+UP$ONA4%iF<2+8*;@xK2et}X(^t6vQHv+pnlX4Z z)|U0x?E}YOoM9y=IYT|vo=k3DV3x50St_ex=u2nOWnBn9_2De9(`s}|<%@88${9Gi zRFN8M?C71bImUnt{c5nlk-6JJ_h2SU9Z&`RUuGD+&!1AvEYZBH1$=CM5NdUpZj(B? zF0B;xZB--<fx{@}%OKj_qFsN;bO1T-@xg8UzSwv~8i!Y11gTa>oOY*x9(28g_kObI zlkg8_<SVoEr6b*1o9aN(ClV`<OvaYdnY3=&3UF;|g|xa&>|A*PKJSo2sTmX4+Th;! z;O`h_pM8ee@HwEAdJGLaFbF5NO`u*%YoOPyA8^M)oep<Qq0|s3IzGCS&72uPXA-`$ zrDh{=%F4k+&hq5Y%b1?r^%dTn)xnlHY0~hVL4lm%WU)EM_0QQttbUkCFRC(dz1;}V zx*v#<T7lH>!bIqwKL!7g7QD(e6@%m3ptUDMP;Jtp8)p5n+gcr`|FY$so*QD2wlDf# zpUh6QDNyRAOKj%s^}@b~CHP}QHd+jqgFPDY*bw4@0X{P@eo`dL**KB)SYte&^&j|Z ze}dr=&M1Fc6NNHuzPGMs>t`Q^rs;98zn=@AiA7TIgc<bc@;`7@`v=3^3}|ZAbvRh3 zjl0qs;9<KGs+~_KtzS;;@ADt5xUmI_mfB*>`*<e9&lxv(XWrzwO~af$_LSi2Nc~q8 z;;PsjS~7eL9)I}*wk%IYgNOW`-#(SL3ire-LEG7Y4YHV^!QDP;PgpO02G@CPjNgnn zce%z6mzi^h$EZN6SQ$p&hYGayT{Ome&7v`}oaYoKP2at)i_X!eG~VWe*#Am8MkPKI z?N3d@#fjc*jae+!yJZNuU3nC0G#CoZGAL)aGI{fC<Q$$Qh+f%`9^8}AsO(;JWThN_ z^;im%oV00xHh<^jhT*;c6eUYO1(NR3z7X_D!m_(G+3?<3_^PUtnT<UT@Af&+wR4T& ze#neu-&&yarisFd6*4%VGp!HbwxuB_dL$Nel}P=sHyTI8;>R3A(kL7ae%z(2x1@nh z<=KYILH^h;!T@q^=CG6%Iq=eED;u=W3kz(^+2#Cx=&PwBoL16Bn_~-L+oAo;_WKp) zF}aexzcK|oN2p@)^@Gg!Kn$sPgt0H@rwcyDeX-Z%?F|~8t?d4iaEe(rmTEYw@9_g2 zx~b+sB~S0eAaX;mcfO?ei}(NX-ocQiKJ;&qJyzSvp~hs+8S2+1&hL2vG1YgNDxYc8 z{)(f1Zhh&*%2g1MI)p;6+0dp&o-N&2&9t-ju{Zn-p_T6mK}NCI&vPjEi1(w<d-x9M zUKzMNR-oKXX_$1^h1BX6GZooSP@L<AMMJcikz+ONey|Z#o4-Ss=#IUcZZr8EJ*?lQ zD=@q@oVxezg^pMHY|(Ds@8#ZM(}f>caC{=?o-c+;y*t^2g*lS9heu(#Rs^#+AxmdA zOu-Z7wp8NCUE0OIbYPz&om)GUM#eU<wlk}l$L>DZ&t(zlo=al6k6*#z8lF419SiK_ zIu<s5J&PTuf{G71Kod5Jd3meZz+7|EJeS3?VvoW^ug@^>QVnMkhf({5%dqp*c<Oig z5x5;a0CRFR$um=g{7t*y&7hH}%vnPxbM<j!<~~q3d>{H>I?mqoH=^HPf#MGIM)^7^ zs#`o+T)g(SBy+$ErVz<lgFgy{WvGe@H_Kt>g<u@=b`@(lIgXAWy#S?|a%{+3DU@!g z6$}kpz;SK2@ba(?_Aflk;?H)m1Dt`ew8@vXLp<I(k_JC-$I$xFVd(ooggK8_vYJdw z28$Q6AzMY}QL9EbE4Q*2dU-G+Y_?GTRf{Hv$z#x}7vhlF3MgL@$3B&N;ajf^Jp1!F zvx@e@)WQAnX<}JJTSBe-{gw*0%6TLh+t||EGuCVb@SWO}Elf957L|BEP07j%f9c+2 z4|cp0Ge3QY+6)6QyAgxpEkhvF9D3`<d2vlMP{Z1S21O{7v4;jVt6Ne6pPO{e^hN!r z+US_^6t+jXQ6HCk;Mv6A%XvKS?Bb4_<)*PqQ9KJ{BTFe-#&qMm6;3Wtr!T8Q@&3#v zN!%drY^@r@R334E(oijm?(cw;l?Gtr0)KW|_zj=BeR+B#4_8M16|90SNfPczCl=}8 zTNx1|TO;Uw|11j19YQm1trw4X=SWHeK7y6cU|K8nRnT7Pfn|A<MZ-t>EHGspP1lGc zI}>fHD;>;qZ*u=wpfqlC%0Xv)UtGL&KNt-wM(MOn)GWKqwpY%ByAMNH)X!L&UmeeG zk2=75om0Zpiv#J==J}$>_!Y4JQV6&>sng|&cC_?CG`*|Q0^jgh@;0-hgB~XY2bVLj z*ftO<j-6pHSDuMG=W@Qp6D{GK-8ZOvs)BY3{%mW-04xa%K<$gG*@35t6#r@*etV)V zTBjP}o)<&OeM<(YZdt?TMBZU#)ygQh{erl6nj>fCs-ovg<A&R9J}^Kz0mE7X>9IpT z`#L_GT-O-kov(e#{B(z4JoFOyoRG%!4_{cj<5BVHBn=$6fbSqO)F3?8mwd`KvCy&> z(qEsDM3)1df6HBx0jgN?{s{<sPqOoIr`b5e$-*XWBg{DHf$i%G*wo2GNpJXE2)uU{ z`c}>sEte)z_!N6QJja6aL!v<IS1PUZyeeso@xzrV8V&P4ai@3tKd>;%rX!EsD6V>x zB=X;Iw94U*yP5Ko*D!-}tNA`kshKTE(!<8jgM`L6nv{{&4SQQpan|H(2yJzwlQji+ zer+fUTL!|1wdyFtcVh|7ZesAO1bm&{#vB`o<@p$~Pt^<Any@^!>(nOr&~GF~?;eZc z4U_4`yJi;fY7e_3ds<k!a0u2o?`KjiDQN0s!u@nN#U14vSqyK$JMTB3e!uisS*HW{ zReDpG_D8n8Bn_3C<}p1uCy8*#r!AW}6EXa(_}`eB{H{X~Qpg=jox9jyzW3YoH31zQ zb;u|7im<F<BlErLjq=sEMTc*9A@50(;IxEySUxNVxkY_w#WHEUo#9Gzl$}VI?|`Qc zS|`L-n9}LGWU4<X2a>ZNAjwt^nns^$pyvvdoXT@`Zq1BslfwG47hu)X%|e+iowiQK zsDg29^zPeYpDZQnjvOzswAUd|&t>AvC%R-D7fTw?|FX2cQ*q7E1Ms5EhrRDm!Ofn! z^!iB#1^-c@O>ug-<ib^m{rZLF?MufS>uiKxDl&9(=x-J)pH4MqYZ^4ZL}TZJcve5u zg%<1_gio5rK+g(CDwluSAi0xFH%g|H^z-+E@;Wu<*J(!9AM-Hq8=w0>G{m}YYwUkP znTFS9k^c!ZG4R${_Nrn9)b6?n(w_}PN$VU@{n!{Pi#MTVjpJ~0<{6OjJ_}jLwD5_+ zGceG;BbnyCOz_UDhfUVr6dRL;g|ez>c21Gp(}GcP8_ze@C$dpf29xzRJ+u{r>Ej6{ zT4rN{<5$>W<+ksF%kyNiUcC+iZik?QS()G-dqfx->4!1p7hqUiAuLKsV|v0N=-(EC z$xD0%6LVE6De8jftCPTa@@Mx`rMDpFK`>P%8`GmRny59F?~eKBy+x;!{a&>d<jVR( zGG~pRx@~}hWIem?mrHZpH^cgs0hGKVRxo*(M;=e|8}@wfVmrKEvxf<Pn0am%8s=r= zRiCl6>TEWP-tJ8anJrK;b~@!xv!Q4!We6(sBejIVwC(Cq;ZD$7cD`7te(CQ>8fmA6 zZm+kBF(xu>=DbO;eRBYX8(O33_d&Gt(@t2}5R8kW?eS%o0(y8SGNp<lT6jN(T`rj` zMnt-kIAFS<(JoK-jv7L$TcfZ!nft1;<DhECSvLQ=K2|?^1nKq@&`GN|Ro5s(SE4nI z9FR(ukAv`8XbLHZD$;(3xlHwbBX}zq<A`~>bV<WSFx$Kg)=e_Q$6Ec-?vx27t?w4% z^W|w<k{nBIi=u)059(XCbV0`uOUO%p1_{=c%qmk3+`68L?(Xp<b2=a6Pv2p?{**$R zI%mMvX0f1oDijlwgH6>p1kb28W_EWG`?=>bv?`0N_CX}`EU?13@_bw{{sud!7lN7X zE$sN{U}EQUu^M-P+N5sinpMhPE4_f*X|;?^b*D`Cv20ZGadGdpF(`HOC|mwI488sf zp~yN#tbV70g;HCDHP;+)W2_<csz}3aDj}}2{{B>^z6{RY>=C8<*kFz@5PK<RV#Kx4 z)UwTlJ(Sdl6_QQjxj$O8==3f|=SO1Mc00<|*2VGug&0(7-O!`Igmna(Fkg#XY+Q{9 z6NlNdexDYz9~Yfiw)!ezTFFJ0v}Q3&uzD|qp6-pU{+3YfHyt0%(*q}zL$x1DSbIm0 zW;V63q=CWsLe-GcqeJLB(<Di76s@zYa8EAbyY=!|e9PGb({vKaR$4-{=O9fUv5PH< zy9!E`jqH%`PiDFBkN7V=7xj8=W?A*dLhQ@2<gV>Whd6^#+#F5{Q5Lv8*M~X57#vjO z3wS39{+`>;f)fKMQYWAGwYrhd8ZWUXBmu>%<8kfgv+PjMF4$OKjM|P%nB$ec>~ZX3 zXkKSet4meb#xGOxo2wrV9(5HyXO^>@umQFy&J}_@o-&JuzmO2WT)Z{c3>WuqfuCD$ zHwYa8p!y0}{mpXW=h-#&@vHPG{b)9+X4}#eOIPT-GE}seF~Ib3`q(i)ofgcR1-h!1 zwCTq?mgcFB{jc=Gg8mZRU-<-%a}KFPy%rUmvB3JF8BkYuhV^UV@A5g*Y0MpCES|F) zPPt^Vd;UrI_^2a=DsFLK)hmx;vU4dvZo4?-+-p$Yb5QKMYfJc@GXd)tLV+!?xb$pP zoS=y+Lyj>Ak1Xzq_onvh{xo2k2R#8JQtBBeJjpGggifTD)t{K@md%3FV*cG+>Bu|B zLrJ9|h@>8060YC7&y?pLfw)G0aQQa{7xkQDt8=xnSA0DzXeh*%_-`P*4aW<&?J44B zGTF`IE~r6cp>Wh`G0f#4;|o<u`JA!LZ*LAW%J*kQ3jF669A;xeC!odHSCHR)M`UxR zvVvcu(35w`?E|)p?rTi(?UUi8HlYaLP6;Dp@TSIDB769TGdr#0a9OM<Y*Laz*&{ka zmsejnw$p+$zCVc{c0F%+yf+O~7L6m<>vJU&c0UtqmPcS_kQPQ=n+B(=^{8uqHaz`o zOENRfna=eZ_}phR`}4bt#qU_ea$mNB@2p~UNZ!ES+~ePo5f{W6>9e76b_$brdI{CN zqVR|O1?KIqg@c|-gWazStf-(4)_xd5+vDFdgPZf<ypFc;=u#-^SB<06PuwHmBoB{N zCy66VGtk(~m}aZ)Y#1@J7Yz$L!LqLX2jfj;1?h@#I$J)0tXKI7(!+G%T!<0&<VKR& zrqPtwSAnEPD&Py63qDh>h`l4Oi=p9a;wo`9>`%D_&v-7}^Jy-2)J(_mr}*A-ybN{T z%OKzNPGs8o9V*w2#I*I9bTQ19oq3;uP^?Va-|X487jt2K-#mP@{WGMkUIMGu+ybSd z4Q&5YMVyf~7lIu7keQwUmHFCOJpVP+@9@L5gEzpG=SjF}N+y~Yh2qY466|*{8Xv4Q zpelW3@%5Pv;`~ZybSkodjG-;ecw7?NWO?F2uL5*Xab-4E!^K6X?CH?13%scvNJ-!2 zY0^<WTwY|(-ZiGs<6KRa^SIT$@uw_d<3Dy_`7mrhJAw{9&gVThL&%ueBTlQf$78$8 zgp~3#EV^|%MLm$l=gJx=QTQu<%vL41wUFswa%CQ^>dc?Jxqo<N(ILAD6zjW_dEfWJ z7kNRXJgJ!#-*+UtaWXVFtv8ci6^7TxsMFJulhCVX1bWP=X2JUcu}u<#y|3}E%^=>_ zH8}vf8;j7v-Hzr|*&>%~((PP5@T%|P7WPtM<dJk7ID86u&-SO%OMk`5E20JGPd`Lu zX?xUE-o=~>T$xYH09wCm8ukhWJo|4bMkS4=6zN)4%ek*5LJAK1Jch31N05I@0Difm zPBu$y$iE-b^*~pC#u>}j9q7*{t@o$?smb*Ip(5ryvw%kOqM6ftDPvR%Ow0HIU86>V zaK9Tigu3I9iGldb*_p0xe*xi1x#I1<L8#SLNb{RHBjrE|mafnv>sPj@FgqBRH?&A* zsE3QkYZEADz8~Dku)_|M8|<q?5S0IPp^@g5OfzFNPUIc6uVJ=Su=Tch_;?ItX!WP5 zDWjQh!6O#^y^8I=QpSe%91}Jw3t0E;q~zx_ONz1WWSdWD;fWbfBykRYbf!{)7QD)! z*aPyoBuxufJA^{;fnoR|u@Q#X`P17jbBu^t#SZM^&XBdUu&h9Z=c=mM^s}RwgWFt? z%}B?^f0d}#<hxkz8HvFa<;-K3A(h--$pRk-Q0E03_AOlv`v#1oO{yk1aM=xLn!2C$ zGwp-SYeIvA>=HKD)fSI%pQGY7S=!u`#ND;Cusr=PtW7JSSI;M6)1LDT-O7ZHiQyox z>_sgPbC_Ng;DB*Tv_X0SJ24+bqYzyjf@6sW?qOS=9TOf*QX?JC9>w90;ml5Daic;Q zo>^s08BNncXqKUi4m-pU8FPN;<i3-eoIiNAFQt6*#7p8#I{Zw79lD{$XCwq+<44sm z-<J=IznjyP%t>US{#U3~oQ9(w`A~u0bgEM=5ar~@l6L!CX5A->I@}%T;W#h)fF?{+ zK83bn7LIaAuCGrC!`CLaS!d8}h`DKkkrSrlv!}VD(+Y34a1>{t$2q~3)qN?dN}0+E zt<nBg`N?@DNi@3i0(^Ntndc0`8OavoLd#B&QjMhmkBPL>MueHAf7zxqUy7QVKn8#9 z$k00%9_%MrHbIf@$_=ImvpktXA3u6zS>>*Lv|4cf-jB)?hl6VG<;;8cU}`vQj1^Av zApXK~zGG6QV}-#u`(d4U<Mwctwn2}zn+H*9)&x2^IT7Fg4MxAJLDUhOKv^ZRDDwN* ztnyi+k!c7%_S-AD+A<bB90Do-g)Xx{ep)!sIT=g&^KZm@SMnGzgPJZm3ppp-VO8B? zF#OMg_H;K$8V`%`n}3Ik+rpUMfKWKmu10%aHj1fnADNOkn94snqfs9BuX~LE_nv5e ziL=8mX_M$kRS1sS&a=;x+n8GMYZ%$(hMUJJu`QbSd6)JoJ1#h2<(FS9vSJABocR&b ze>MR1ZDy|<SA*vYevfTC$wn{nZ6IAcN|%(w9FrDM=NYc?Sr3JkBaVS*nFLQ>SPUom zy)CQn89|%x9pxS!5uUUM(~Y^&c>l(H7;t}zc#rpk%&*;J>PAOcss0S;*HQ*wzx$Hv z?pl^G(U%q-tAZ6@ma`Jec6Q<SAg0w;$@)DR4a*bbz{z+zeG;@u<!}$&*4hiDK>(e7 zZSbaJG#+Z?T!<^Ov~tQQY!Cg9RmXO-CV5BF3Myt_|Nas39h<~Q1taM};9d8>Z{7<g zV|hMy=^j?Ird=@9J;wUizJr5a+Ne+?i%(QJr_5-Vq{m(g9pop9LA$asMsYbi#{K8p zDz`RVm9c`l(zVdt?JSwNAW_J87R`1fTjSE#w^^SJ9W19-7dHpYhRRG$dcBUb3Jhi7 z)<Zo~U7!x@2Px7o`@2l$eiW`c*T&BNealij{it+*Us@fn5C6Rgp#L+47VOZ%zbD30 z^lW6;Ul`QSZPkMrd3jX7g^4rv55*no^<dDfO8yUqV9(q-cx)<-<BBZss(ukp+przx z-Y6#c@EVp@?-2fuxWI0`c`W$L&Op;+npEViNsV9bKupm-n3ODFOdRidI4%$sBd$Yn z`E<%|eg<<*<nfAQJ{$aPBC|a(iY}ji1G&$pqBN`(x8@`>ds8hin;MAc%%-3N@9G@d zg!p2EIxU`1!;Vgm#PxrD=tgW7+#6?(W1J)C{M^qld228v4$@#V_g<59m92&d?J$;+ zbq#zC+CX&yu`ldReSL2|TxEBj)h_02-P?RO@boZ9<fr0=MLCpsQx*43=|^mjH{Hy1 zC7%yl;NEsA_|ZR@#&3`os+vsco}xBYa9@db^8n~DUm@J|UIb@j<FQO`u+Yb*P14^? zm6>1dMJDCQ4h~R3gHNWc%qt4tcPCQpnGhO#?2RxdMGYOgcz1n$KZugr&T7;XXz-WK z;=zoe@VmLcz-~Q<@xAltSbH`zk?RuV{`g?yq2`8#RRhrVuoD=rilTkH#?j8NDNtmp zfKtUIOlb0=9c7v{mgkonGMq7J@WO`Ms&;t!N(?Fq$JoH9CTJ8tj@n@wdc4{zG45B4 zCLS*I$Y3B|bPhn7DeAPTHIl9`RiRFIXS(}NhbHKiu~=<o%uY<_XSoqHD7J{&<MsLZ ztwPXWU{95L-x~f+)}-DeBVnz)KYA>*CQUm(MD?o>p3b?>>+<ndLoZ5;3Bbc2lE{5P z1YH}hOzLwbwC<!8eYdq2-mFVy;Z0#!p)H5ElLmot(`qJHZ7*!n^x;g%GFVw*M5@xZ zxHZiNOLH^v&954<PD+#JxXAE+#t$~~(LShk&t&zNzd=!k7AshIg7xP2efi-IXs@nF z=8Z`h>NAlZ$$SHL%MHH`+Xx4~N>ll<Pwd{=n+<J)|6|rO`(eJEEqA&97T?D7qKnc^ zOnu2WxHTyaPsq7bPw{Ud<+PdGq9e(aXAp~a>tk`(%}CfgcM;s@OtrH4Q}E^)N9^8T zCGjxW#L`?Av)4bpDP&PAq^q?x*zuf~+ch=vt~<bPcIngV>uZEr9|5iCDFhw)0!{Jr z*)i@6nW0cj&guGWpHRbw8{TEjUmmbVCvA3XChsKite=#P4W3I1r4MWx3hzwByZLj4 zeOvmWiJUnsZvO-ofe!Td8iT5>vebUYgvt*nP<6)(mNl~n`VW~#_NxQwYyi(p{`7|M zMfEJDw2f`$Ij*>KaWH6!H_nb!#;0j~PpGg6KL1IfgUgdx{m(*rJB;5~Zx++-qmOuQ zb-LKR*apqBr?QF5&FQfwzu#4R(aPS{Ao=|Til1*3k82pBpkPau3NDa#Y`3IxXd}D7 zP7{`urc?K(-wnT1_?&0T3PHY{B<r_%&;&lms9LyyUC$`MUOY4LIr2YtepW2SUNoS; z;!<`|Z9K9FDXK8$IYp~%_XX89_~pC<sXR=k&Ny8>mtaUw&xhfVdVa=KZx>XiTVuDk z5-M)DqN=K5&WST(9)DfX;GQNb&DLWwLPNu2hxh!PZ-RE2>&4PUJ<R3()?Gdyz%M_9 zfy*GKf6j?}*GA!pI(^)_A(ULAEpd6tICg3Na-r2thZYMH(av|5P}y%1d7rb!#JUQB zW$@hPcRP}*bs*0;E1~UVUy^z=o_$gofYX8RBr_e*&$Ux>-v6XztM6JC^Gh0KLfY7> zmD9*;^=&xfzY|8EJ_YN3b7pe)L@2m$7?kErphIKjakf?}+I+4SO{z>73`wP}{wAdR zB$*Pd5`=&uCt)x7vaIVNSaKs1)yJG=17>)^y%BzRcr{};XT4(@?*`+o|3=Yjo^ANH zUW=}6h@{&OT|m(&AA6dYFlXmFwrHI%Udpg$JH4c7(5I(t_2H4&u{n>n8<&gE<EPNW z_pR{Gv;(wW%)&ddskleIhUxvOVQsI*<A&Mu*rAtZyu)aV3FXG(tE&a>yY|IXL0KS{ zUCE=uF-i@=0ZmY+SV9vAUt)sSc=pzQ99o~3=1y`i*!Nmdyf<$%=xkFW(+)jU-ueps z%(yFVb|$RI$iRpEo%bn2l^U!yaj<p?8Na^*Liulyo74qs_nWhdE6YXc!>8GSJI?ew zDhng5CzCsLvmF<Y!YSRWF#XRLrmUlmbIiS1KfQE}+26$a_z3ts<qSl9t%DU((`Z^2 z_tuRFqH7PE+53ragsnDtcmdZAM^8PPcvu6!wVz=V-J{4I-!UW8F=)kSII%tV*voxN zXd3qw4r!f)OFO>9=I<jpQy^SCU9E$QPvl^GQXiU|{;WYVun%PJy(1oPk&z7Q<3K$! z8l=*1D!JdW5xUp;vxW19(EjxYq2jm!t<AjJzy_I;_0nP3%3ZXh)m*Xs{&%+RP!C%+ zY6)vE3c+HY-}bn4K$MZF;-&Inrr#CM%zp;apv%Llr@4Vi?B!^pk~9suG>QfunMN@? z)v5M`1||LV!)mWlbYqquLcBU=1$46+f(;}t4iYZ^-3K3?H-XkcZ?c=bNb-+&%k5Zi zI<Q{}{TkBgXxs@l^^^?06brEbr_taOHl7+Q4KU-7G;SQ$7qhbapo5_)=E|Aj35RBI zxVHuTHpyf7z!79{Y9&NiI^xhiA@nHNl>)N<V|Ol1!<PrAg2QunN?WHz!~8l}O`8Ku z%MPb8m96Z^CwtOxp28H0uR;0V(G=+38~ZiwVOz;mV6R?@u{Xvs9rt=k@X;-7yrRHP zo>oP_x_g3pTO3||>x}K=vT3$np)mh@GOOU(_o92+w6S(7dE}VU=8qxddHE$PPUvLA zhaX|CFPdQS#tJBF-p{s*k>swm0Nm$9VSH^OC6uL5rs;A~cBl_cIZ=W}d%aMlYZsW$ z3ZgGObK)soA$%{=5%PkHNf%GD#dcQId0+>7{ot^;KT{F^)APs28?*4yi0|xTpf#=< zA50h9d(qH|1~^)+N{le-LknJ43VlnDK-U5(y7M}jX$^A46<OPuOF|jM)dzAe$6waV zzJ`7HGnZYmdm)bMtbsl}TapV$SVieX%sVpy)vhLyadaNC1A!2F=?05Xy9dX+cL~R~ z`O%M{AlhZJg30_w_Dn5>?)r{kHTH91AD_jkH_O9S-m99)49Uee2WKzY$lB_Lu|<v2 zH2b~{T3mOB8|{-=sA3TRpZNjrH~Y{~KWFN68w-xIr+9|(ghW<ih8JFm?5O-}nCKx; zQPvhXGDMRUJ7%DFxIg72MWAK;5X{IPMjEO=z-(otaACF)H5ERB<X=<J_{vb4sA2{? z?4Q8<qxIrl&dDA0djM@Yn)5$~&chMQHVWf55fUwgNLE%x8P7SCA`K)Wq=f7uyQIB$ z`Y0{hLnZCr=bZM^F72I$N_(gJ?!TbkdY|{X?{lu}_v0fTlco6kqkXeQ55}YPkbGiQ z3Mvm3PDQ&OG}~O{%Kl7-I?XAxXk&_R$SZBGTV%tXf6Rf+LG`dVB$@Ra-_Wah+u_iq zP`JA_4$aO4zyoV-HY^_F;~RY!1{ANL1nog^?yoKO3Y$jX_ImL8kHSN19thW0_Y^Fn zP#!g50a>haVT;xc@StG~wEeEe?bm37oMX-B(yqb64jw$MC>y2*v_{je`ds@eTJHDT z05AE+U|y0s=5Gw6Z;O}1B{zNUmTHdvgS%2yKrX)3a-?;SOW?gYw@qB^PtjGrG$CoS zbm4*-7Kc_*!P>8|)#$qPEKayzGL<m!k_A&pDtCMOnr=@VkEv}^eJ-RS?ynGBXbla# z5+imIiB23o-T`ffoQB=&i?C_xJxVDul^*_lBp2Vl3JZ39C5^A$vD<Ws<oTzh2aew8 z9~+0d;cKbXRh+wC)R0=cNSgkx4?J+51=twD|7IW`$azJBZXAHsHw61dH3*BRo8paz zSG4|W410Cy1?6bKH|onF;Y277NxdgOeXWFlZQV%~2lArI1?2CiCU%c5{PJ83VyBz1 zB6|xger?LHekm~s{!dEtHK`^)h0A02lh1^4bf#4X_7YCfD|?as%Th6|u@BHmHEG%J zJ<ww2iSK<o^RwteS~qW|Z{uq}{I~6noH=cWH0$UViZrm{d)kJ$X#8l*2`HewQW`$o zc$sDiw(r4~2%NoCnay<EP-(F{@5nR4iLoQ-h^jKC+@A(M@4YypTLCUzZGw^C{kY8; z(RJH4Udk%_4q;`nn6Nk-I}Z2*Y6@|G-@ga;71@*PS5r(icV%AwOPX2Xi+lW#|1OH* zE^||9V6|Yl1wW&QGp%ri>uSnS5q>1k!-|mdg;J@zHQ)JUggtJBbK%`V+((+?i(fZ` z)9w=P{q6-^ElbAJj|y>&j~}0U5K84++tE+qyji|465qGKN4Jweu(Mhe<_QjbU)=eg z&Kn13OMI#KluKayFNIvRJlWOEi^H>3@OABZI=8fiT0J~VO5>Zr=8YFi*ZpYrUjt6? zUJr|RJ7W5&J+M<plRKuoqi;%`xYw##a7FOYMQce~J2wi~2BkvI2NTSVp1}VltCM9{ z16JRkkEJgCaqr^J^jXUQLtbb@P`VB}`1RrXPa$mg(~(jq@1g|{vv};rE;#>LB9|Cx z@|0eCsqW5w%9J(f%9I%1d~g?-c#gt`Wn(d-(Uao}#SSxi56n8M&7B-_Im_mRWbj-U zR~4#Y>g06Zx<8Wd*O}wdhE!M|yp?P-OSsGYcr^Mq3T8HHqT8y@(#6%|<=$mV(7&BN zK3@7p3g5R#jym?17S0+8jyX!~uss%~Fl9d6Ycgm%B`}%k@ZF$%Y`o&i-5j3NLrXoJ zC;pGWrZ1P?nvSI6tDh)uh!_1`5r<1o9;7<Ac&Q8M@be0>YZh6l$ayK;E4Tzt*vx`@ z@qV9|UC0$ijyyBeftMN8%2{nq(Er3BUK!dOf1lh1_qKGz{FWNQSj(jVe@DLf&lrOy zkHp!r^P$4LFSmKq&)2?jF}$cBD~Dy-NCUFfVcnP@Y-yeW(@(WR_faBAdPtY2{SqA> z@w*)QLWiZr?&x~C87|sZNh!fIsHki>KmK9J-Bs)8rRPoBYPS~Dw<BvGohNs_Yl7kG zBBM227lW$;c&gDe*m<xWtDSrSY3&T~Z0!@$AC|-|vB#t>J8vq=Hmrx$UHs5)h97?l z)r38}Mxy7jEp+p*v()PR2t0crj(0TdCTedgZODB^aek^8b$dEc)h0+3J@_9R+Xzn8 z7x-h|LX&G|%L@|5z{d0aF*<xJDU6gbz9Jb-x@z*GQ>Hv!?U;1;lqw&XVaeYOPr<Q~ zg;K`4xkRyx;j7au(EHnyt*_{T_pks~x_&`wozR;nuBnl;ev01RC3TvxDF;iktmG{1 z4*c)PE7*8RRdFivEu>r=3d@!!;+zV>PB$KmvPprw@o@_!ZU0TvXa0lPH$`?Yd??n` zj>f@fP6>X99%;Tz<u+}`;`7EhUa@ivZ(O>HhMGJPe5fI8I=2^mU69SwZq24^b@h~d zO-)*%-4?ehuLaMW{&Y6*1zkVg2g_pPXvm9q&@^`xMe#1hm0jJ@;X)slY*n$&U@^6J zd_tR@jB#`904^<UBAe%*X@2MzuyE2w{x}gmPZ)9F!%QAhHUQP!6Hx!_YARY1CzqX^ z3r($eLaJb@bn2rkm2ZASi;tIbL`IeDzP1<bblf6Ep3jGKsSm)^QVS|fbNTBKOKz_D zPDbBHVtsNOa`-wDNn}V{g&-!*nMybAIr6b6SM0GPoW)C;>rbV?=zu`1d~C&J)Sm1@ z`+@PH&oJ&}E8e7Kg6FDC@akGETyt*|-8;AO$ev?Exvl8K-_IA%UFutT{*PGvQj(6d zCpW|F3pb(f$wv@8MKA`_4Y|s02kcli5&y0#q8>pf>2_;N-n!&DyfnKb4eNNBoW{gb zt4Fz#(q9R9)G!RQ@ReU1-1jXT`vuO=d3EGhcO{A+w~};w$8eO|dU*EK0Oy5Fgnuq} z()nK^GbZ{l8;YJ$yA!53^qz3bZCVGzMdtfaNnaRxUyT>dT`%vxD)x%XsrYQC9)>IW zgFNOg1()lvy?Z*h*`H0#*MGqE=4TMuJpvpTIN&J<!9ele2?Zxo_`nW3ym7fF&HL|| zG;vk}Ub!_(wr)D(TV~yj?$j=$QF8_3!|N)o{AoxklUw7$mWQPIsT-#Y4kG<@!u1+Q zNcqodX+`sUc>3}UyiOa(R<|@1Z}Ocu-f_8HX}CZNc&o|^G50r*+f4~8qWJXkp->(@ zh*E-9kniabq-h)Je3&{8(aGdKeKhe?*KCZLSS}3;vBJvqg%CWwkQbM)hvNCa$>mo* z7w@vB-}MJ!S#mAie&UR$@3~;j$6{1^bQ;#oey`Bf&*S-Xkj>7Q@W<OHq!$&N<u}Gn z(8;?sd%WIEU)wwT{y5=-iT@3Tnek!VYtUR@y)G{Naal3Xnrp=?L%ihu;xk_<+eqVe z+Oe``C%&Ya!P9)EfMq8wZmMfbd#(SYg`$_~`74)hbQy;2551*Ie?NRzFqAeu)a4JU zS)8W3&v$|}m4Yh@`J;sao6NT6#g_`9!}4?(t~m)U%a%jPVI$UhnuW2NMtHL(o^59< zbN_)cbgilzDhgDo-|{C={@D`q-yb8@eV-LS3WZx;f2x%7#EVp&Lm>Z0A*OkD<tVKR zI%cI07G~Atm;Xb~JvJOSM!kn_lVYjoNzwCZO6UDEy`_wh>2%<wDOwE9#nE2cWOKe4 zhYsJa@cZdWe*ZK$syLmkR<swnzA{)l|06_v31_c=eM$b<p2u%`Mk?JU@hp6xNwOL0 z_H*S?;;uIEfGaIH?uD^&4!-?uTNG~d#?XeCX4;u<z-r!;_*HW>?jO~iYn5Vs%^V8( z#BWtR75E?ddzxXt;f-XuS3_i&yYcZqOXVT%y1uIt%-OTAH#+I8m8~k3S^3czeCT9? z<6}DVJ?HasWM5a-`_KoYG>2f1Of9Sx$4cu8TNu!D32Yl;i4L1Pv%2U!EC_l*w|j{@ zv;Qbm`71K0Vt4Z3=LMMDD;%Hq^+C@!cHo%Vi3g-?guC?v_%b@cmO&qBs+y_PceI#w z=T=MOuc_gHBf<eDvO-S`T%cu{K7T*G5w@KYnefY&5TRns{i{kTX@91?rD&)W6n-9t z^)%)7<&L~0pj0sEo!BZzu;4T&(2NnC*r;d@xvULD=aIs9W$%Lx=hw;)MZfX5;xi3A ze;7uU3eJwFFAT2C;+P^}|Ashm=k&(X=GOS;SPa%KEamNQ^YQY@hhRInE%muJ4$qCM zfbJ?q!s#>&iu!cIUxjZ-@8}Y!{G3Yzi%U=*c$)g29DtVpUdl%s%{jhn8*EYX;?Li7 z`Mug$?9`Nk|4q71?Ke5mtxauU|Eyv7a+e0TS?!4xYiCKGc_*b!IgU7a+aR82@XA;2 z(FmbhC+Wn8si66O1cpC90w3d4C^Nekye#aAv60hBQVHSdFA8Yl@LY^H48`n_ixjw8 zmL^Y+$A;QYc*wsiy?LC0r2z*irXifC#R|7+R(D!(G=Qof+<~Cd&Z0MVihgUm!=@KU z;bM1v-r@ZVj+|)2eRk=CY&(paw6h>lEe&S`#c_l<m%ab|1Txj{(u%IH#5=&5k63x} zsOdInAUG61-2Zy7ZX1aD?%p)-T@bxAE0k}#@A55~(3zhv=*t1mSIPeV^C_{bAzp0Q zOT*VC<3h(;@_jHLD)Ud!q1_-d^!6O_Lk*kuX;9CXDRQM?NblcgPG{7zdE2QS<Z@KF z5(I<n^{K}&{nj!F=&HlrmyN)_VgXp(JCU+G4&k3JI-GsDm^#f^Cc}>*s5QkD8yhR= zyXbaLLEx%Seeu5iUV1Uto0X@nrY%Yz=~`48cQt=Q9iC;eqxvKsw8WC{TU9~Ctnqlz z%MjPjdjjWP8$;u(BcL&;8Jv6BpyeTD8j<TuJ5y}YL-aG->)!>-zJoY)lQ!P_HGq8Y z+tNzGI|;A!=Ml#JFg$02aCNMbzfRt+nDh5G_5RV77uepRIlt#Y^N3`+xp|Sa)PEB6 z_@;wRD>Go>sfDoh{cmz#)(+~PpO?7C1vOs};4u^SQe#aYT;J@)BQB<MA1eobHL($z zW}T<hP(Pf#zCHO*8ix!2%M{LuEwE|8DE?cpN!}awjc!#KU}d8Y`z@G7dnN&gY9!<M zkx$8Eq2OJWouRq&CLuIeDKbwfgPmPpjCQG{f{8?-HoItDFOiv8Y5`YI4};mIx!k5W z7SmQwho}BMdHOCN{xjN!3-n`n{~1TF34A8&*XSU4-G=9r56KH1Ct)Z51juhY2(BD@ z>N}~!5cb%)haNw(#;2O49Dh2KyR{cQtgLWerl^!XX09RU5`AfJwF+xD)`7XWFS#CS zB28aYzS~<JJwLZVFhzh;c`J0ttCqnif#X8@d21IJV8*niFlF~MdhV^t`=cshP_INO zMcm~!B>RAl(LA3wTu!c5M(mz9l1po!$@^BV@)>w6op&5*ri$LWP#)L@Rl??w#^yR0 z;kgv1E9aA1r#|>I=PG?&5<~W{Yv}ErX2rnMCAewUB3Sdulnp~x(YM>-sMbN46=tQt zWv4gbA0EQNdTnw2VH0e>JQ?o|oxl&4sbIn@T^!ba1a@Bijppqavoeh~@U7N?Mi+Zv zXyZ(|_k~1u9BPEC^yl~ny*LOp&8f7y!khL^8^ZU$g>!l*PY&)G$v2E9@VB+w<-5rl zJj~vPr|OR3;%&DeHZnshOwz~G(|s|%?RJO@GvklP4nlsQF?VW-#4*!4aoEpO@Y3lA z{IVOyHJ3DTrTKL#4z(jMzDdtAw9v660fU`=_@>Gmn)l0rtX|ZT@gF^Yzq$`suGvXt zm-FPAot*^(I+n(tGve8=qd4AwEIT+@_+DGlnrCg+^{LWa1iwyxqftlOLTk4qZoRn> zHi^6I#}8^pj=f6cpjqi`l;$N(@7Mtydg=0E^R1vWaxWB3sFQD<(!|^!Rj}`($UZCo zM_4(I%Pww&lKupK-cFny<;8>d^u|f|dr|&|0Gzop690A+8H<&IXT0JHU7dXa&W@c# z&k7|*)z(s6wK=9-x4<c$R*)KL#qK`J{5It_c#KuS?w`D|CE*BUXy`$C-!IaDTiWO? zc8D1xMK5@?CQe<@1`E1s%DlC|!tCe^IPoo*$5ifu%%P^Zp>)2SY+OvmFa21hJO;dk zH&Sm-559V$MBdh_59iJ{<G41*AoZjJ%qiCAb<LY0?7uX0d2NGx5?<1<6+c1QAV%@@ zj}9L^+K;r?o`Xksi+PRHFM1`MCebyT{O0(0UYvG^)LX5g3xDEq^giKcH3{STb75S3 z)SQR@EP+0iL-4X-%+Gvej;$6OaVr4)Jf$=KDeQ=gR7c{cx>Yp!R}fCI(UezYmtfA| zu_9C37qj&5Lu7g=uFUh2yWAOzr<H-%hPJ>s&z>CmX(+iEJ*3%Nec0~XbRYfMUbsiC z3f{LcWI9=5+Fz0Rcykk~gGynST^oF*8wlU7?eaMqY|Mi<*rS`bF0V!fxI5KD=cFsJ z<!XN%@0||^x3<Az*Sl0ycZ|+XwNWg7ctYC#;~gw|wu<JzxC_CzkHF4jA?#T&hGXkf zLH&~nmY3#(&g29fE_fd=H?;%%%+~zASdX_BmP)QShT*ko1>D7QrgUKCUd3zsCaN*A zqe8*V{%&lBW{yjs?qw-A+}$9#*^a=;O&7@fWu)NLcE@YuBl+^Sbe!|Y9}kMGhpwL; zdHTMj02gt#i0ev9)h6_EsS|DvYKKc)bkWqq7%v3ZL&W73ihr4NC`LOFl-_sY1K(@u z_!9+`I)p>7nbvSt<rD-8I^e9Yr9M&Bg8Qj?fU+(LAF*psSnFiN!IhERAwM3RYV>h@ z{a9@5;eyHKCuEJ#5Zo5l3VXbbN0m{XIqF*omb8}Xw9gir+3yJrZren$<ywlqVqTrU zTVzvIB}(?Hf|Ny9Xv_g$2)j0t%Z<mQqk6i$>(gyndHWK2dHyf``S=m$w>Xh*tu42` zl!3bg@@V4xQ_%1%lRcs;rRx7aQK?A@%&j`Ez(O7I8ANtKrz<v186sH13VC7d6v-*S z>PU>_#bN&%XoZm}y2o}U!}dSPx0^MN@6nSTZ%6RSr{UbULX)F?4wL<u7El%(tUX$5 z<eNPa_lo^y+K57YUgyv6FGk?fXTsGq<2Sw016Y~8A3la}mxB8_<BnBExN-Cqa;u*~ z{(6_BaTQu9m$ioMZxh5F47p69ht~U>!T3~L!6yDencid2Yh5)Q`LbO4sn&rFN=;$Z zf=Rfh+Ktof9@3u2&g8Z`PwLV6EB!H>0>4&9aB=E;a*dh{-(5aJ^({AOJE#OKH(aBx z6JNvAaZWg5jL6uv3&Y;=wz%PZHVv)H!n6m$Jo3IBgj#ixj-CL-qg~J;LK8HNvV><r zi#sGvgKOn#?5w#Ewp`ZWighEQuYnzV_t56A+pa5Km?Uzw9E}l!2BYy*u~)1r1+Tev z<fi)<%BNS;(`RA)$>){4`$-rpZQcjFj$8npv(M<*S6_UlqfA}icHy(>g>>IzA6+z6 zz=M%yQp?5lq#13^j!m|B)2~@_DYIe@{sL`=gz&+)1)OzG3w7KJpoen;m&LWlP&URl z$GzFDYAe{C?~R$4e^8cTTefucB=bTiyw}u<9SdGb?>@v}_axz8e`-R`qF?p+cmTdT z`d%)Xdy&>%5iH|o@$X|k7<=9t$Pu$Y!pYJ!e0w;DBY#&=?tqPQrx|v9Gu#Qk#`>}G zEjQnBFF(=so?ocppvYb2UO(*q&J1glZc*@-tHeuUaY4mMT%e_bA?J%|%Eu^*8$6y} z8p5&J*9J%J|4qsR!+BBeZ)&(Ld^j6k(onINU4Cga8ki}l$m^f9`^kAo+_TH~{s;p; z<ExGv5=W!+Gm%f+@df(7oD5m5R{2i88V(n}b-}$4E<wl8H2xgo#hNRDKYrKa5Ul}R z(Bm_lyyAyB|E|Inm2udA=Xz4Js36aXPU!YL9*@3=#zBV)iIkJbT3_@}+Kt1hzs%9b zzYY0(36}%k8lkssA;8W4*wU#Lyb1b1U8?MO>AW3qSNb6z{jU!{?m7``)rR8I8G+pP z`%xHW{g`m+1?atWF*WMy;?>9f*lSU1d2YQ2x7_W79sWh(mI3NiHNTv0e%9fMAN=It zq9{!6Ttf*ng?sm*8UCEO30jKULiN*qkm)M+wY_cGXV*U2Hq?rj<adR2M>VM9?ta+% ztPy|SXM?*|9*5y>)$}&kh1+i(4readW0${C2%6z?*QpVB;=Bjw@9%+q{wv|do!5{` zg|%RrS>m0lePk}V^0=b8bkAV|`*c4lyDGWk=VLjzy<Wnu7t3kli3~J@Z<P4XfPLnz zfESuAkg{Yt3~Khq%qObsVs3=x%M58UsG@(<CMX&Di9TKb1c9E9rK&X#XtmgJOqt<= zW16){wRjAdpY*_!kF8<m-F&>}FJ=?NHc_FjCHI<GBI(JUrRHcmtmtaXd4}f&kHiM| zseXmDF8^S}=fTvmI7P6DCU9`t4<8rhF6?pgoN#zL<1)ztU#ssUns5}nruO8v`!n&T zB7s*l48YMM7t`&Ut<<}wn15OBhozau+;yE154u<fpLQ;!i@PTZuCzTgn*E@2n>(Z3 z@?1W=Gy}Cq`0#<p`uO}n2iCo74;nki!fL98a~1@BVn$Nib@eb-`wI0uW`##qN3eg} zi_#T$OR0Fldw39DM@wSc!}|;?T<~BL#@%}hdq?#}*Pq6CwV^v_hP$Fh@=uEVa0s^4 zoAAzY<+QZul;B?#p?10*&spuk8}4<&&J{Ocaf%&=6z_qzucyF>d~ptbXa_yl+=myl ziumBZ({$HZmCGH3U$iA2BYIZD%!dQ<bM|)#*{hH3+}F~w&-r{q#}W;{dr<bDc+Q`! z#(rls<ln>ZD^AsB)0020TxuA{r^}siYx+rFZ~3s~w`VLyofVv2gUL|ce<F{ay-;c& z-3_(9Q`o=y098HL!j)p?eXf-!C4DVKgQ#`T&_0(dJKTlvRB;xmA11xLW-Vq_|Iy&B z8>rEEBCqTgkCCM(;9A%g`Nbno?03Z*hb>FMN%Kb`{P-ZZpH&NrRe`vwM&#&He$t^j z71XRA!q%H(I4k2c)Fgf-gDQ1;G~pez^L0SoKPO1<KYesOUJtLuT_x3D_{uuP!F|z- zRPO4E;U=Xx<*ON1_vnX%7rEf2DIvVCqYXC~9H)n^B+T71pH#J7*y_a|dD4x(cyRVs zsw5q*{rOGqu&|ZSObb)$m}U-LIu%Nh`)A10=Ut`UmpgLgpM1Vhk%jLKZQ%4ZFUGTJ zSl(pAhnBmryFsWFAC|z8CRw<o^AJg+k1cyd2##KiA6Fbn;cD?*h1s@4qpQlCJ7yxP z)hXe%-plDjLI}Up_rt;l8|mRSRlbxQC^vndh})Zr@RHyy#D%Nimp&tLZK80W_dc#j zI{Xl7U+1He=xa@RYKP71vbnkavLjYLFXe|$_H5mviC6C&gwNwgu=>SVjz6bF^Jm*0 zx&9zRaBrs3jeio}e76y{e$waL&lXY6&R6o1mpOE=`n*)27)EhNJu%wVjO}+S(?~yA z-e_Np(P#DWt#b<cm1)wd-Ck%cd}%)&AIhrXUaVYVjeE|>a*Fd|x#zbiY+ND1s}I_I zGiw;C`n&NR=SB3{&KB?9h(MJSvmi)+m$YYKEDyYV6Rs<3;KK?NexziNxA)A2l5Js_ z{k#d5cvZr<C4LZCZjHw2Yh}6@Esg8(26PwZp;dJR_o-DC+~o1}PVKd#SkaBI{n}5p z!Z#dqXDknH>4{?|+wp%7^3ZUGEWIqK0Lug0sP_F`_?uq=#cJbWakdYSxZR}iK0cA+ zrE4@_bvmW5zM=42m;w<?^7*dm3z|A_EM9ta7WxGIh5WAzXz!A4oU3d=&(sW1VKtJM zi{G8G*E{iv^LHWp)i9oXC>dMc|Dw1FvHaa5j~?d7V$6}%zIGqXIrCJq?DaO5PoK!d zVf$mS@kc*aEci#Bv$E;WJsBolb))2|y6m8q2Td*8;Dm1~c01h<gUlL1XSO{jTuA5W z$Jv<QW14Uxh)iyY=w28LzII+Bk2-S(PBp6VnG=FHQQnF>{(A+hZ)$V9IkvcF-ZZhZ zctDrp-qHP*JUnJ9`oUUhIP|7qhevhCNOf=2QJn=<Ic_-Ri^u}bk3-u#XXJ=Y*XV9c zHs+d-=A@FVq?T38@daUAIm{RD_1LZ0pTAI1Ja8f|KWfaEt0mmFW(sr|kVqjtl=wtk zXMDS~MmTno_~iJn^3f%#SfDkY0}`{)Bg30F4%fpstJ1Jt-Dy(D8{`wwL^;lP;r)gZ zUOGJpi;i7~@8hn5Ku5#D#ky#|WH_FVoGrgnc%$AXFLKkXrEPP)@ko`#&8zHiiqQa0 z4a=jr0W;*hj<=x4k9_htV2xMXb;JXK!+76>O{5ZRh3!WS<`U<Dke|37wA+e3-KY%3 z=1~LTc_mA&ORf2}-#GqUVU3E`ovC+f684*<iXDb!fbL^E#q|MZJSgoWbcW+_$kg{p zuS4dP>K?>RYIA78y&}AErZ)~dj(GWyGUsSd;=koXC^)Bz&VMn$!`DZ$((nx296c4* zmxZ(agqzTKbgF#VE*tgY*T4hqLm&zM`iWJ8rTcs1uyXw$C~?=qqf^F-jGivO?iYnB zt0cVIITA03JI<exOXy?&DYBjDy(pVG@unVnczkI;JRovx{{}jO+32pk`rHM1@5d6! zeDzA-8TSs;hNo^E(wKl{?>zYTf;8#ciC#=O^>92rfPF<)Vpy~{>s7^};m25fmVQ%u zJ1rC(4hHc4Q*+>ZcQr7q-b%qU1W)|dK=2K5fLF!^eC6U4dcD0t`g_$Bz0aOJQa7m^ zd6sJPx8lASr`ip>UdhAt-OrKj+UbhMmHlzqH*IL+K8`(a#9?;eQ#n^Rg59QuaFY05 zmS0;gyaDsTCBT<`XDx(Xr@P{`WINuRWkqube1yG)68i6K2Fpx4`a0}2a5o*CGh`f^ zy03s`N-kWSbxXQF{J!LSMiWo2`2eQxrb4<^95*<KnH;WBSS;Qrt&MktYp0IE!OYh1 zw_+-7*tQP(^;F@2cH&&*X^5*tcUCbf01JQh#Wn^F(D-vUSQ|{jvl(-JJJ^orT_d*3 zdD39syj#M=o@3CZX#o1p`#^pNLiyON3{YFN3mhu%!Pa@ZXtf-OzoUck=CLJEt!Rw{ zYX|aH!Pz}pvz9_{_F%^mlPTcwEXk_hbP5UW#^(|oxl7;|DZcRt)vO=HZ`68mx?@k? zxjazxm;zzmykWR~XCG)d>wvyfTl1ETE>L*&GfV*B5VdE}QZoWW=^yM<IAQe;;ab`H zoZ30GS6oUwN?(0v$hRKUg8ud{+-A;DTrky@_KtZ<eh1s*Wt(r%7`6ijF6{-Gngggu z!2nkOH5`i$kHb5yLos}@U@;r?=J!#4>}?T&TPoc+Ma&`^RC-ZR=4?sjbPz^gYlp8B zpVP#{5%LMQt)RU8C=?}sh52P4Ay4-!^f@|^XJ@^I4I#c5>1;&H^-OrBpEoa@5v`aQ zZi59Sdwd2vxncV+N$_B-2L8&^$89enFlBB(9+nh=kHQLYp_>7}5%0i(f?3>BAUf-H z;;bPt4gKXsKUUU~pMc%gSB-_;IlinDWXv(E1ErCNaxs0BE-E|3a+Zn*?{#GgJk$-x zeLqFn&$RjcAY;Vm!UL}eLbFdN<no$q3>e&#Cmt&0MeT&wGi(G_T-CuH|8?ap`oH0< zVjPFo?T6l9gqL3RCY|o=gkAcLz&mQmVsAQBdcUGYK3txR6+=>4L&F31?j6UAJN2MG z%aYmSLO03!gg<V%V}cu${l(5G3Z94L(v$^1siBPu23~j|J<2b@*zK!n#BDh8;Ykh~ zk5DCd!F-(m<Rmrpilii$yD(;Y8@zGsCwX@shJPlErSW%lIB4x-nDfdQDwpO`a+x#h z?&yf>dZxJdmm`{V%;%{qy5YN~vE1CbJNY^sf^VHXaeka8DVg1n2V8j}W=#2P-@YEM z@5%DbD>Xp(=Mr!Jtj3oXnB)FZ7ciJ=K|%ck@WO#~&X01ZHfzowu`P0-qBLb3{lJ@V zX{uncnBzTQz=xx5Lh9r9(sreRP;q=Hghu&udhsas{pG}ZTAE@WrU7eKg-ZX*lX>qB zQ!IkseD~BH->6n%|5zxTk%H~DFKZh7sJux*3D05q8gCphI!|&vYs2mi1a1BZ_ulph z=+l2KxmESRHq)L%&1rS|)UPYf>4g~UmdDjTYE0Yg_=(#rxmL`&oZb$^$@fmm-#Qub zn#36X@mAzbzITDImw|_;8gaqxSM=#m9q7Fl`APSkFtuwmP5JYU4*0eqmkrw3=o|yx zex>8skvpjE(%~HOP;>@v9)hcV2V)=y$+utM6z7VMRP^5g;TfxfnQ_C>V9{s!@XNh$ z@k^B8L_1+Of5EU>=7e`NhTvPN2D$}A@qzoj`00aFa3^W1G~?Z5(bKMgs*D5h=SDKc zeA33rW$kd@m${&mDEs=Bjiy2Cwn)D3{?LSmb8x9uEoBa>giMuE9If+}ns#&`oh8M% z+B#lp`6bRKYm`BsI&e(Y3sNm{!Gy!pz*t8c`zW&U9|rNZC^M>gqD8;PmBHhC-LP|! zgxQnc`PK(7gxksED6g<udZ1&A?{;b7OYwIf<)wz|o$|5dKW}=YGX@Q>uYlas44Y0= z9+_>OgV)EHvhScQ?76h7ukN;JJpbK|hk~nY)p-n?)$N3X%58a+sS}>JSV?Y6ipkhS z2f7&SR>XSbaKI9gvGgp$F6Bme>9sYS-l+mDCmz9gXKkFZzylK=x$wf!m+<hTJ3Ne3 z!hhGDG1yw)x2Etd{JUL>N{05dLnRPu)wa>#p?y%}*-v_9VnRWs1(IHlIv@8e=07{z zla^W$`n&hUk6JmHIob=af{KuPwpI-BwC9oy$Ka!sf%W&bFz3>7`kCa+Gpi=T4AUSS z{70}1JD#V&vNS9bY?#gcw#XN&OHk>X@VtB-%*Wpzf}hrZ<T$J2;D2%-G%fUl?zx#F zTee5OJmvvhm@<JUoPP!Xoh{&MibIRIQ11Q76pwU>WPSZbbU34wzV%e&4NH4S8{bWl zZ<`hJ#K>!~Wlk7{itqjS@k7A>FL0M3qaa<)3Kq@3OLZ>W;M(e=5Tq+QVl`fDa6TD_ zX*%PH{pz^jrZa9@+5s!<UXlN+LUs)sL#nE`z;)dTdVA86S6g(z;(#HLly?wPzNe7Q zo!PW*nG)DI56AB2ws6x%i~Fjy=fzKkb3$??m^$=i*S2XCuGEKBtRe(kay8{V3zS#Q zD`L$YZ(dPe59hy5#OifMnAG6{^=XyEZ}ZdfK>AjwF5U#=ejDPzb6PCV1(8`DhmnuO z`yy3cJm=ZG;*Ab!d$@CyWW<R_f^qb(9io4|9OAQ`*sy*CZ*&w{q;75S%kgdWr%8pg z*M;M=K0>+ht`!EHZc7u_%%Qw;cbpT}pTAN8DqS219mKh5uR<SPlfrPR@<417`%|+$ zUtsT?5v*2oQkpPUhmTIa4ZWA`Ap1Gac*?UKZ<-b^c=IEL7yA@FzZos>G}XgHWtU*X zGtnCz&<vdnt=X!}Sp1@T8g7Z-!LY>x`L)85B|BG0+-`w=T!S#SQUgyO%x8<t1$6cI zHpQ7(GkiOD45vPyD=Atofyox(Tj`z3L3Z2VQOXQ4V@l$zm#wg_eKzjFF{r()BQL98 z3tD@e(7rj9e~p-ctqr$9{Bk=Sb42W9>z&0pB7;UP?$1YOpCk2Y!_oi4CAg+MNV*po z$1B^H(Rsft%*>9#MbbHuTj@!?f0v;DeHHvSsV@d6=Q0;hqlVKHS-;;hg`MRx-`Zyr zaOo6x8me{#eh!bv4I<C8Fk=^~Zd(SQIf!Emdhz!$o~%7RpYM+z%~bbCQukW}<BdeO zySY909hS=xUp+a=emf;U|4#On+F~E84O+cNV|-=;7RLYv<n@3Lr&DNb!$vw$-GQQ8 zj*;%T?NZ}_7z_;*`RPwXxVGD3su39~r@>u$cE5UxN%3Qy_2#tjVJ803IS9MvJK}+= zF1+~led;2)ptZ(dxE4@JNu9%~VZP`^2)|uXs@OYrJfOH@F_AobJ%wo+we(PBG=~q& zMUN->yzgQIrA@Npg4ap-dR-y55?N!7pvk0RItx-vQn1ssrP7{4W$y4ohK0GC>5C#A z&&FSXs>A)!XV^Q^9_kHqc0}RILz78*W`RBS8FR*!GgP<F7i)ayQ`YFS^sXWmhZc^) zWM2(_8|cRV6JF3~n|V^C*!K=<7mQ&Kp2?}=z2$%HEX=WU;Gyqk(&=8_`2F7_I2s?Q zXsM6I={BOvJ#jE9#^-V2{J|Vz)R#NjDs$nQ%aY#yo07ThR(fL-iIr-5A*@seYjNMG zy_Uj#Z`Tn=&ZO_^HDuK|8aEgCbHILQTzkue{SAjg*jtf(t6TwRa}1?ly4$Hjioj7` zek><V67#-Js2+btZuiLmCU)ORVYB*SpG7uUwf`=;9QWt2EzVrHbOnuF@(lF%CZc^> zHMu(6f|)N@!U+3n`k@uUEu{sxV{a=gI^-mEn|x3DZ>bqq9a~R6e?H4km9~nEuR8X% zT?4h_YiWeC5B`%yC;j3DGG90XU0im<$TNbGIer?!OMU8MKN$B9>4h)8?4sBIL^r#$ z8uq!?o16P6@s=z-K6LpEjNhrv%{@X<D+Z(_Csh~|eGQ^Io8zxJS47Wqhn(XtOZWc8 z(~1dtxT7qT`!&3hzIi%Rk-9N9(QmjB?1A=Ig8BPK2QU>~aib(39CUoKeCXl=n6r&! zQ^}j#%s49<o|=TMRD(D?x(D}P6o^j^w?o;kR319D9txFP^ATMu-t@K@6Nbb>*d9&% zF<uof_vwZE4&0HHt$T1nr!=`>X$XEw%f@Bfk|6L}G1-euvCYvaxr=^pcJhqG<bz@t zpnVwLw)pXk;xX7SRhbnYG1#=hpVX#i^2=6g+}XA_Z*?x<)lX*0o6>6K>a$azU-U`H z@i<B4-`ir`xp_o;_JT!`V7z`MN)-9X&xVdXw(^5C#i^0b4Aq6oXUD1BR0w_pc7T0W zds;Mh7-)C&!R@A_Xj-b^kq&p4hnxuDt~vVLuPPFcg(;-(Zx=}09CA2W@}S84skHi& zg!cX7(c`%t&M6tr)7`a&+su?lQ9B-d;uP#!^Mxu)x?|j>TweJ)i{BKBEV*EioVgRv zT8?^B^OOvnP;wqV-^hRgX9MuP+9mLHnL{f?&vePC<MI~0Hauzj2^wP7K*JaBptpb1 zcpA<nHOn*%tX(B$oI7E`+7Z0qL$b8d*c=T0eWEWv9eGDdDQX@w<KFWZ!^$Oz{4UL# zXYI4c$2-PxkN!L5<msC1=_xueV1S-Vow@#pryTuBbR<sAmJh`S$y6G{8h2ZR+p~P? z{Ifr&Fa1M4@77X!^KgjXV!(w>&5DVigVCS6@UrZYY<ZLA&>b&i)!ZP~7QRK*&fnx( zJ2QTw-5ru%?SSK#!^pX|j)FsWK#RhFX89Du&O4U4a!m=puNcJJv#&yj;d#84%H?0T zg>Nsv3y-{&3%kcmm4`XJBmbqFq?+b3DB9EoygQBL&nh)g)6*TR`si}Y=Jwzic1XHe zQwJ>%68WxR{(0=V0On5$(R1AZ%z3m8KF^85u+&gF&3_OlgsWoXS{FXGsux=>x8Wzc zqmea+@Yc}}74^G&;)ENg=pnVxY>`y0?%EcIW=}%B%eP4X!xwTmzm*a%Hh^9`C+rZ6 zwC(74T6fb2N^Ag=Cm6FU&XP)!7XiI~Acv${^OHgDqIVyJeoYR1v&TD{HK$NApP`0> zmsu-bJ!wxH9_!)ozrSh81P58qL<f5xGv%4Ot|$e0vHfx}`?Pun^??I9cIN=v)UG{_ zOE1CXwQp&AC($9hE&SFOTCvKZA7neQnD+hcj7^tHp;R@LpFH*CCD#*B<;OeO%l-|8 zMc8u8_Z)TsNB%R(p8bp?xnjsIIpusb;}aF!{cQqz?$_iDyAE7c_lmR*Zly&}x5B^q zGvMmCeUxZ%5wykL$aTpIX#8&#JUOmM#lDZ|LAo(@^KAsZO(pDM`qAfA^BU+y!iSw{ z#Fb$Vuq<m9oqObfMmK_R!J~e-z-BBCY&%^ZuXUV~zAT_n!Q?)?&>Oq_ej(ZZ{p729 zw=Hiqw4fqq4XIc6WNf#?mT#q6@V`;IWTEcL%j%|4#->~Fcdfrve0Mx${XibPt1}Jn zwgy5wS#apeD%jHM42-lb<n`}O*|o5g#~RylliOr@k?&3TVWP`cn}fN&oe}MrdKq5) zRl$321F)wnvika8^esl8U#GgtlSS9^n`r<WT`uNZ!uR#5Xg|!3KPDLHUhHsd0Oy`f z;(p$TAhs?6PKT9I`~FDh{fn^g>YLPHBpma7?cjTM5{EXlVgWIQ4q^{E_ntNHDsDvu z2?3lip-|dervbYukt5#s=2qpm$;mc{XP=$K+P#%I(Qp`@GST7|{RgzJWi5<Y;eh?l zmT+{xL>#{HFubbi2@%z4yj;133io%%mKGi1Zj2H<`bggVO9PDWPZ2$BN8!>Cy{G|i zVBalwd~;-m!XYpoeVTRnWMo$i?Oz9<FV-m5gamTQ&|SdmZRnDBE`2<@L%N_Ihii{@ zL7N99v}3?k3OZ0p%lke6x0%V*cA_Dw4_OUcx_jgG6K`OG$gw7mE5ORzUBP*Kl;G9x zfzG%3@IzAvL7fPYHoD|dSk7swC}{va|Nfp1y;H%yJM1}kxD9nVl8uoU4@0ve4qYB@ zfvZmcLDk|8tYI@8XAC#t*xgYyCN_wN_H<yY`T%@fm575TYGId7^>lmoJXqwr6b5*k zu^d<q$F?b9huIflYlkWr`Ob@#j>Pi%w-)lh*FE{7@iFM@n2+t(`l4Rh1U@-%g786g zMm_Ic@Z!NlUUhjnZ53Yc{by4-?Xfbt<|V)sCwn>C=OX<*JfG@{lDOPj58t;j#l)>f zn3fib*3a9b`Ji?pBK{M+Oa^0en-!4fl1d)aUE!&lBX>PL0KFdwc=?g8Y`6U_D81_| z`A05+npq^hvFa*vC2t_)-a04?=)haW?%`rm8m-$gjQ{IVfHPG-OM?e}lYd@Kgvyt; ztn=w3Et_<MGTx2H0Y|m*YyZbo*e{K?TF7AdIS7wNZKqS{$A5x<L0gt^I`^agP5Y=Z zd?rj?(G#^^2J_*-Uod^=d6+2)4zd1LIAr~e%6^GGbA3F1eQm|vT8sNqcm^ek{?y$a ztyyF2Q>Y)gUrI{N#x<jCQDb@s9PRMkcfMO6R;{<-x<|cnf}J|Yeiq!_;BF++^78!p z(R}!?18<ufjz@cKg^Z{nJaJGUE*f%GdU|63E}bcH^|&NV-#QKte3--!9Zu7hsMfxl z1&3gJLlo||kEe!I#Lj<pF|flD@D$HSplN@+P;P;fwFlsDk>82_-I~3cqVahT2mU$R z3Z3%@<N6;1dArAHdAh+->2T>WST=SD4GLFbcXr0^y{)MEmOIRw*aqi4>IUkk+w+d& z-dMA>6^5+6DL-E;fzMRIzW;X|+F!dwA^2C(G3F<|GiuA~=F4DPA3c<Gr$F@RWL(}& zgBQN(iCtqh$|HCAV0>0T3{dq(qxyDOKDvpbPCIg~|1j#T9O_%z+kn@+ibmbS`O=We zN7A`r`{Y|cZ4}F+SJTh!okU*99D~ki;LeHa+`qyGI@NwBTZ1;RsFf@3)^Xz|jivH8 z^WI#Ud4qPhGR2xkbzhfp*JRsZM-1CO9DfGwfRS4b$;#70^jAtbZt6l>`CLMmCqLo( zZwA>kne-J=te&8c=S#(GG)MR(|C)noQie3B%LCbL`$00;Sq<*3?XmD~8C2iv$%n1| z_-U9X^%vbX*B<R@XTfq%$$0|4lhff)ZYq4db_w2oh~e^QMf_oZIJ{jfI%dEAlSU0{ zC%x&`7dKY*M$L*v^1N++7~%0varo*h`t`*d+-h~$HOc{hR{ta~qa=K{CyH+kQfA|` z39MBa3Ui00ajv9=rLm1tmy3CL=43RdiF@=c*A{toaWeMo=}XZi9k4F>j&x#t45&0( z@qgzg@rVJF@N~!pdMfs>ozkw--zz!j@0=>v<!a&GyKZpr)?l3O?}P(Ju8_x$SI5Y? z-yv!AduSfN30e!re9)vN^0b^z9DA=L1{ywqkgKX#;rB>tC;BeK12@s3cAC(B`5k%0 zhQXX|Sxv1JqDQw@yfbdxg7~X<Xp&R{T{EK5d;b-<K1pzpPoI@vO5V8R<Yjrl#G%l} zvJ-vYY{)7)m*u%B2c_ZeDU{`Kfr{6L@{!kR_&#1>FFpn1n$B-&$n^y@)*x3_bV$L< zRXO}!WM;D7wqnoLs;s2l8{b8n;Qir~@Y9Gi-Vrez4>UiZepi&(Vz42e$k3tykC}AY z@CF?;Q^F@bM0T#SgcA=YNEY5=zwYSB>Lt!N_H#MCE<O(3ryHPlu{UeqE<zh~AJ$p& zf)YAjmOh-%M8zh-Fj^PR-NtJ1?J_fVTV;tCRqc4K%RDI2_Tap4;!Acp6*^}3z}aHH zH|J3&9)6{p^sDo4ig4@5Bd;Y37g{E#n%KdOiuZJ;p%<&-C+Yf}Cc&%>V6(X2P@!7} zZ<Up?`uG)!Dw-~3EF1>=!X8rBk%?G$q>add=WuWjJs8$ajSBo-(dKnFA6e9jCx7wa zvf~+wS$m^+(|@D*LE$kP(Y+O%yK^5<(@D0oc?J__Z-Q-(ZLp$9pSu4z1Z@ZR=hlP9 z;V@fYa@u|Yj;Q2Hlb5fOW{$GtS|=se8}kijD8B?li|eq-d6{BXOD<c|P(0kBFRt0z z3;Wi!k@9N$a)?eVtdv#x^1BjjT5*W_KeeF!ryTi|Mgi6*RVY5Dx6pxS=b*B`D|-i? zC%4Qpg~BopziHRexw`kTrJ)`+vK_jN7CgGBAuvt*ByE_bj7G0sNbaTGIc$RyE@=+M zK%@TrPsfZ5U%a5mNgY^fSW4Z}b6I&*nD9LX@RZHD@UGJZ!97k#r{Owi_ud{)U2@~$ zmi^E(ey@CZcwhF^=`U9a7OnT-Z?M)x6KBhjI9ET3Zu3;S>9>XcEVw{zUsl0}%O1G- zcz0g6atV1ggyF1lU3vEhOWb$%0nEB4_$?mn6DeE(8OQGW<R4eY<D$pj-6R8*O?qRf zvJOVt&+w%KwRBU?!1x!xA@RZ!a%>0$|2?ZnWw9Q^C6IES1XHVRCVay+56hPAhHsU^ z73LO(l}qkY`~CW8_fwY*j|X9f-#i-BcNtk89EBxQ6)>S_4~)nXT-1%;Fs*eV-}q|5 zad$S;bUseM2aLk~>&J27f{*a>%^_b~rJwZl$N_oCs4U#nPJ<m^nec_;(Kxf*g7zF% z#<^MBA$3rhZ;|+O?7#1T&%e#F?X3v@yZ!>~E$_k`UMa|;b_y)}XNuiK?%r`?1Ff>{ zF2GPb6f?z6tH-c(x%Hr~xYVMI!jJ!?-n){x#POX}FZdy?R{5}^pBXCc$QF5_-L%Q? zAsrlOfVaPnN1evD)Nasb$)F*RTet4Y-mg1zmohuLvHJ~uJvbJR?{&t2Pu27|ERw?( z<)O4c4d(_rq4NPlew^GvaBPh6>+W`}Z}Sjl?CZsv&hH?2TNHnKrGb{~!*TJOZ*=Cr z5rQd(ymI$VQf;#xiq@#I<KJK`{q6#@J{<uskM>eUz+(6{>moIMvFEb&9r#(Y3jey2 zjUzfVL89v@?Dr^{cV<3>>StHwE{oNqCyU%5sE-?l>@A`2YGd9rA%#QQMo3MEKa>AO zH@@-Sf;z0Jfzp~va#~!(Nf`kw9T)q>Aq&J^+v`xD=O+{|D=l$il{K1lSH_wVli0xL zrDC^+1@8W60pMbfz8BZR7(FwLF-b+^EmmlE+lt3LwdCpE!?Dd^eJ;L#5pLuJ@trjz zaO(Qk($g3{-ZHEk^gb2HU0UmMSb_=vO)kdGp%2N#_Y|x-@e`VtmSSvPTWITc5FT`V zPX(W*(#p1ZxM^V&@1851$+5@a*SKgdw@t>Svqd(<${3ogV);qdczI0w4tQYcWXV3S z7jA4jm#*J!jUzAQvn=K~^L+|=*Dq_fJ6^zh+S;<k*4^@KmoT~9a|FEVc1N=HtAmD4 zQ>dGd2dAy_WfR@czQ4*Qp)}>KwD6KLcWkdkPOX76^&XP{RSoWYDFwfT=CNyh0ZWGq z(PH>t_~nv^39AF-3HN&7{T^fZ>1#8rN;?O0{@X|iGP0G$KFRH92(SCgp!9VT?^J&W z!gQw?WS)<CE0ozkxQdLgn4rzZfAYY0dVJSlj%3&D%<tosc!8lkw(=cHr)ONGu1A8o z!<a=dKs$&{rv#yr-XMB>r7gGH*P7=({7M@)*kNeYVM?FTk&o(DDVCXZ!m-`=lS8*@ zuwvITXo^VWRda^n+R4I3?bHrJMHVvsPY-$JTV2`Az(MTJ{c%He1}3S0kn674Q9#-x z9=UG_-=68nI?L9Ox3#+H{*~aA{6Az}AIn`1<O;ugNBsUYpQk8`&o<!>xxBW*{zonG z<mD5J`}(WofsVUjThj%&S{^Ld*Q}@ao~Ae|Y6z~Mut-uWZ_Q7%l6dvBdVsWi)He^~ z#1GDB+4{Itn?C_|U+D`58%*eNga(JrOl3zy7wJcjNmzPxrqnRp2ff!A@+ytT;O8lc zT$m+ZGZIYI<3x2LlfCrnLHPHs02f66CtrU4fi@UB;60yCT-)fzQ-{xk!S*9z@#+cE z=;{I<d0E#dYqkW1?N#y7>Q~abEdfYVH8`{&8n%ivZ`)PwxYnTroSKX<sONmi=|~}# z8O}OVxHAFIoL}H;`csK2a&|-Uyhb?HZW<iz(UJC!9fA{0>~a32Nc_<S>Bh_yyydUM zx>tUK=QrVL4jDzNpMq)j3wPA<9l~cK)48OgBP6&bV@zW|IH??go$E|_>L3l|!)APc zz#vd>RmyoEe5m1rDvW$E8D`G+K+j*oefq~8l@cG2L-rfUDtSa!N&PYYUkT{0$>%Mi z3w}3o86?}s^W&gza$Z6PJFJ{epL3chtnR9!yTyD`KrvTe5wnEAPmsCh0C}vt4Y&Ho zimsyIi_GuA&g0LJ*4ZK)wjhd}-MzTMrahcnRR-ZXMd*4cmJeLM3z=IS;CkgJs5M^? zF-8ldgAcO7uQ3fT7I%`Z*9_#^DM3)F8U_DTbRK>=zF!=#q@kn|nkt1tqP^~O(okv9 zprx%c8j^-oMhFcnl$A|{kkxZvM+jxp7fDvg%+8G8_4^BYy`G-uzOU<?&*%M~8JtS$ zZN?aUTa#w549DNs9N3=ie_2A81O6`V&7y}#U|N<XRu<;44rMEL>s2B6z2=_0w(qQO z<sS&S9zbr(Oz5{}2AQ7S48zX)z{!iJd57aWYg%?1<QDe9W46AO!r6}*l6*3g@V(Av z6S{Otj*i{=2Gd&_*qt}pY=zhflP`^+pW1f#XygVsnCd_|F+E_qyaMhSa*u}5D4G&- z1Uhv4k?U;6d<Q&++C6(<{7OwsI8npG&nY_DsV!%VbNOs#Paa0q_lCajIlF<+g>>Q_ zNuf55c1i&?26(a!M~m5>_HS(5(_&^k<D%1y6TWo2uQ&Yly#_v!ZbH#1GrV_<^9xVP z;P?0hTD$x+GvPV>K{A(^lHob{^H>=#$7SH&4Kq>Z!$4~M;ZH*rc(L1Uh9s<804M*> z#%t9b;$-9b)G=+WpuFCmJt>UGSk<|hyL1vA>obJjy%>lGlNJm0Z<XkKmnl_TGzQ-l zyBwDav6$%|2CajP(WqMkUpUy1sX<>-Xp(^eyUp>xBf0Re@dON>Hw*I)I1_8?M+;`( z5O1cc!HJ^}h1#bDuq|3mT)=bf-X1q#cD*UdKXMmNp0}X$x^qd*Jd|npr&0697`$>- z0rp2tfVEGxp@_5V40Q9^%;Q0{g!7a^RU7jiM&n4{Gx?D%hhq!}qw|=za3QY{lK$L; ztFyE4$ssp#9yy7A6?DM9Ai$ohPdNXojx~kN!-i*!J^nNvuYQ^h1#;=Mw=jan@?A?G z?o@EU<_GaAuh_!%ewZI<PQV#2<5?>h*Uch_AO6fyFNxB<6Ub$X1zDGsfV7PpknLxf zwLE}KdQC^ilYbyCEf96ypA`nX6|&;%jZPgmRk6K&D52G5*q*P+cS<^v|3WiRqq~qz zsET2Mf|Nw7BZWnbZDSW^=Too23&fo8R9Jp&4DFXzp_ctFP-Nr{lW*Cexmhn-*QSVi z`-;Q^D=grdZ@qAt`)wAf?t&H5)i4f?aHdKce#;w78<*{dUt0sI*laY5^a(}zesyey z_6GL;Ypbxkc>-adQPi*59#`?c{ZADK*0|*re1rBg6JEHI|IR_Iqu(;{=ewFW-mh5t zNjbcK<|f!ZiKW<G+$HxjleQjN&A$5&LFotUgnenYbc5V5-kx*%j&gSF4F$B&%7Z~E zuLPgz@@U|bhU0Ga1Jzx%64f*YKmBsC>;7!?v|BIPwoL+WYUAnqrFBkM;sQW#R}y7Z zM^a}6->KeJ#vc*4;QoY-aCYc>QT~KHllUS!OPyw8yJGRK*b7U|EpS)UUD1q1g4)H! zY|~KABAY)CCzQF;_6-44Hha3*@|o`)3@*Y|{`p<_eGI5(EVk@D3X-r-;QmD$>n_|E zn!>po<*zmRI<`Vkjt4o`?-Qqe<@ddZ*Fh}cb42MTNebA|>0kN6%tmcAahxFBmYpr8 zt)59v>lVY8Rp;Pxr89o6zrrj^<H%&&Us&^RFC6MNr7gxgn1=EksMQ=mg|U-h#?H@d zO<yUt_ULHX5T{F|*TY7vtAom)#u$}ggTf!qwU_zpbX>;7;oI3eY`~6KeDF=5+||t3 z(|d6gGh{eDuaaWR%$~v4@6zmYeUsCL@B;Wctr;r!^IadLl00WM^$g)m??Z>74%V<g z4TbF2E^SO7>p-`7a;hNkyD;O%P)r)X7DjhB!kxJBl=}4n82%I>t2v4;#srI9JAx?U z+;Hmo>xQT1=;Gf4>bO;F6ji&=r$sW8AkjYxy*U49XoVKtzVe54ZZBe+E;-RND-HH- zy(!HI$UwQpd!c<(0nbc^QlPONTUBbvRadbvY~UOUvFDkqha)LG;Wx~>RqU`@?S*hN z|Cu1!wNR|Pb_&$*<{=JDqAJ&P3fnP`CGnm?^O6XfG-M;|`$LV4o&Pe!4MUi9q6R)) zDuue|9BEF`ZCDgL0Y|u)<G_uZU|b7#SE=oAa){poH$H09#v?OHt0<QBmh6PmM;qYC zgHbeLXERi668Sze2EWXarhyVOvR5=kUr@#gJL`pp-h9@g$DJU-cVU=S3_R-|2If}{ z;OU-`!g8x!;$FR-LQ!HceHt~MDX%cahRZ7)lRG9-+3A^_k2;JBZhGUToZ*x`az3p| zoJ~7Ur-Q<`5b6rbW-HA+@uF5K4Dtk)B?z>#JsD@dTQ2NBeT<#HGYlI{>~PeJD#5#T z5V&Lx!b|gH$#KO7u=ZeppGx4qtPDBiSs<s8;bY4*p-RmVZ4&^Ko^yw7TL8>8&Or5- zYX$jFk$CQwGlU0w;fRiSDF1w)h4kpKBYnbf(y(bbxOJ3R`)N6(7RO`3dS^Oe?JDfp zW6KV8$l=q*;q>^15`M6-!kl&|>fe)1Pj^)c>iw)>jc=Q{#n6sSdwU8>Z;RNuKtHg4 zo{Ob5m&B?A|1s@8A{<toiFt;DsBD=P{yyLc?>s$)J&LnY@kta88W})|1;fBMlYfu4 zUtxFS%<1iBWBPhvA{k|cVra`kVPC5>{#dpTKHa?tr}DzkvxavAT(7{!gDZv7J0GF# zT{s&Wx*L`%O9{iS1JmyxPUb`QL2boK-Wit1H|<u`*LOHpx*U=;)}3N+Hh*IQ-w&}D zGna|E>0bEduQ7FPd?dU&>IZpM4@BceHH<ejWwIGtnZju=I&e1!r`0=hr}96RT^&T% z9tB~ko*o^%<ABRwa`xm2cV>OmfwOUHg^8=}ad-Y|$(BlEH1|3S2M4vVm>SNSpSDB% z^sSKf^k|~0@=4)<E(wF~Yhugm^O6a5w_sM2nPm7LeqI?nN7U^<4bT1gC3qdN!B0EC z!NV~c!q_MoY`$ZHn-Z1_YZtF&^QGpn{q{HDy-qjt+Pa&~i%db|_*<e(i8E?IA4m1p z8SIQi2@MwIqLrQz6;5@)97zvT-PRj5`LoE<Fp=Dz#Im;yd&R)M3hd2)zNEKDMcBwQ zR!%`T*_K-~S&vZ|4vq|@XT?*6$1_KON}(Bjs+vMS^{jD)(^SW<QcJc;IfcKQU$Y50 zuOv^bO=wNgC&8m@ix@F=D(C9?Vd7O4)N(YUcN<+$hVzoTGSWB)v>hfH*$MrY_QS|e z=UAk3n<OtP0+mb(@b582boAE5_CcAbnKul#jkTgLvbJp9K~?-0_{>Ru@;LIB&lKNh z52s6I>X5~qnpW%OXo1+w+IHJxqx2`37aS~Fo~RQddB1T`6z?J>eq`HM%tp!OQIvVs zj^<o7!8M0+u>YZ7@L?H>5qjy^x#~L@+vgG-0hSw=iCZim0rOX5T}wydi0jH&_Ny;_ zZJdRFo_N!(o)h(E{jKQr!0)VT=YEi@Q(@&!6JZof$D#{|Sa(Vb<gh%v=EFeZI+whf zGI3py$j;8%C;I)}&jMH)4U%twuo=>9i>(P$UKr0dF5sOVen($#o=#@qj(%tL1;e4; zaNn_mDdy=4lb;`eS;<#KlQ-Nq?l**5TIJb`ylh&s-vqU0PGF|lBQWoI7$zM<y5td# z)$hK;#+bing8%T}+3V4u6&`~xFK1))i_f4n!Gc=ekD)fVQ1Ul$q62ZR<S21Pjc2+} zJx9yfG^;$C@^uy#`|+&r_ucHMEk|}|++n@-pNq#AerLg)p;5wJt-+_eA=*O)Cr#nA zifi%s>GWWB=&>(*HS?F#m8vw_-ZO&w-b%zO*H9>LM7*Olgt`O9;N9IGOzpx(?qZmP zens9Wb88^}@t8<0NgCj{wu|9|Jlgxa0<2z0QNY!M%;$Fxs60Lf<$6d-B_qktOdo%6 zKBxMq<wB|MVwP;6M~w@|P@&IMthr-BL)0fzN3gu0<a`{$w?|Pb|6aD9(qU&`ona$& zE8*aRN`SJL>?C(%)bbovqSzNF{!D?G*)HUI&lFo948Vc&rLb(S3z<b^GYZjQ>7!1w zKK9Dum#~{GPI)yOC>IHiBd3$}kB6Lf5k(#ald(cv05F(${fb{O=9VjwUzf{dA{3}3 zB9+#D(#AI55Zv9EMP?~|@ORT7FtC^|Y+V*hevb#i^i&(%H$IQrRiALLWh+xRf5`;9 zsVF<L7$)=?hK<pHpEc*R6L0u4*kqTuUrhz|dBo+~bx+El@ED>GCozN6S(x<}an-&N z)O~X?j4NKw?o{To@~4hy!lcps$#qutVkRp7NrYDe)X9(gc*>UAL;1IbY*(Hit^CZr zft+2MwXYAwco@(K?>w|N*20Nf5v7lgMH!uRlFS{3!v<TTf%+NdAByy%MIYat8cyGn z6i{onD>+;n$Jqf>>D#PuynpCE*tgEX32#P7PTwAZUb{x3x79sXGs_fqen{rom|65V zc?NmgH`ZsI6PR9`BQ;Jiqs$HiQcL*4Qf!_`<Y&*NbN)-%jd6ofp<o2*Mk%tE$QhWq z;xAj{7>h@12f>ZgKG@P~g(nmqi;srZva$!gF;FI*eyhJ{t2k?AQBp9A=ye3f>kXvP z2RA{_vx9YLyOP2C4i?^)M>;#l;rkh0?6dTJke)S<+|Q-rlHc*@T{jL7|0#D$dn?BZ zZp!1XdP~@6d6QK{4WKa15(t+%4pp_Uoc@$=X3Ur;d>-@z|4C9##sSv!qh>0Vmb+2v zD=XUnrHGx<Tfy|%KcRA~8wPr2)AO}1C}r21x`)JJs+$6ZJqe+rOLlmqWixoXZ)06a zIXE}>4lG%>71n>`&(1617=M2ZiaobMV)=q;`92aBCk=u9ht0_g<}rmUDtOUUje4y~ zX6G^w2`jYJY3!m?aP5OT)ogOb2XPHhHbsp#RE%P_cQ3(_MdoaieHByw(g&p9#N&Tw zUcu4RJE6K@BF$LH`z@2g=-kiw_+^j+R18n1ULk!kw#|Y$I0fL|ncWchasZWgc#=ky z5_N^{VUf$4ptLKWb`_=L*|cX2o_DgZ@rGFYd;|^%oj?YcjInvEF$Qk4MyhIoF_jH4 zxYz>E+!#Sp_vTXMZX4R0=t>v*DAKJ)Q(Q2+8|v9#kcmrS6FKAY%#Qo;D{&(v`)HtT zb36t-*enS-`vxj1bYNK{-!mRegg=E_1RV>`4SO_>N$vIl-Trp;I)G=fJE}$f_BTS| zpbJdr%Mu}MTPjX}BS+rXCs6jp+34Ooh)Ql`Q)y=)ZIy|DoVpoo^_Y9i#_NV-(AZV% zz?ongX(I>q$}Z&c+MYhQ{e^3vy;$tHsid^h2R$C;WAsRUW;dsWwZ*qP-j_9D@9ezk zS!g23C<frUG42$5Y%~1OzA0pZCk5y@({9gkINwo@dB4)eYav5%&7xtf$FesTUZ`M| z+p}nly*v3H9zo5Ij*4+7PXpT{MceW=v+wsJ$l6~9*Y#+V>{>;XdX6+;yff!4kHn94 zTbX3KJFbl}rk=T1oZh9YLWq|FTR3ejx~e3SxOk{gRWX>7Yp+PEhYq9ZQsJO|WdMDl z+2nBaCAf;yF($?UXO?V*EO`klNVx=`z7lBe@u!=oZ!y(zU)kz9OJU?*TRa)(MBOkJ z*EcV!|5Er3cFt>Ockg~;)%@>t<5_WXVHy@Jl!AX)$Tmd|rHZUb{BN8#)pZP}&LBD7 z?^eY2oD4p%8w2mh)`OqV9%wF$rCtGkLQQ!asF#i-JC%6yA8thFCZ}PR_Ic2KZ9oGX znqm49XPo<=tW(#{X_)^u0$<m(K*`25JX2N$iid2dKE46E@0?>-XN1#O**F^Ym$5^a zlSo!A0bj53$LlwS!^E`R@Fe0DI3Juy$GMaE?(H(jie3vpXHUcE?PF1SD|c_~kERBf z_0ZSe9j_Ti<C|DJXg|A{McQgfCdV3KytNMN*?ooe$<V{JCR?;_^QODMUJKLNEhka) zrsT?q-!Se;D=f@d3$pg@lH{NWGJ9-{a?^q^J^GmNyvu|xpME6v-Z37=%vQst9l6wR zs}jsJZiAjR-5@u_kLL85!b%-;n0Nab<}<9Ec_<)llZwXDt<IDdGZK%gK4FVeet}s? z7_ONeLZV5ncy`<-eiBKdLpc|rpi`AU%R89d$u+Pss2rwyy3%>gk@RQHXXdaiiy0Ph zkLWIW)Tvkq`ps9wW0&Pori}ZLP6g7nN>dvD>refs6SHXe@)z)avoj^_?qOqMY}gIo zt+0>t!=G90;(fdn=u`L>%8O127x?~a(TO9hXLUK?*DkUCTVE_cxti%``BAo~9?ZCV zibX{B#;+Q+!jp}=*d~gh`4Vmh+Sv$}ZIc<tq)^LWWHwE|h5jiva9d`Hcwxj_2%xdx ze_Ipl+?=VObS-;0i{G1!ABmfunPF>8qHwNK6~8>X40e_BG@^ks=qMkhj!r}S6@Ag* z(P(;HiR{v+QB?7Hu`tZ@9;=Q!A;{b3(2e8VlYQg?gdTCDoi9S@WqvqJvucObK@QM0 zE(bPC?T1G<GwJr)7|K|Xfsb_a={C>Q38rPtcZnYbnJD395NMS_0A~3e1{2j7DF5`H z&C&{?^>W%+A%BW>-R;d556*`Asto#A-w#yk+-cLNOx)(NnZ4q?*ri6%Sg`Lhn3tD; zTzd)guq<Oqp=LORdqVa8&co?9N6|p35>`5GA-nP!vFoP^1y#tR<f9I0v~YffW+pSk zVEnebL_BHdPZ?c6n{$H6dFMm8f3Ov7zqycaXkXrMvA~mV2L$h9lOeXh1ysclE0T}H z2Y(ze#n~N<GMvc3^s0D$&3JmJT?>t2(ilD?8!x<@jNVIYLA>`>d^9M5+K#E>*xo*9 z(;0%rpVaWe!y+f&hG3lHZ;tX7k#y@JV_p|c@pft&Zu3sYc?lm``Nc|M*%4Fr^_f1# zZu%}1B-o>_ayv86ejzMuQO7SXjqs@7P8RGLgpbb*g_XBQ(mnqh?Cq9MEY@%o+8Qa+ zH+;zE<*UH<vAHlYVF-mJS!1Ve9-Pnd1G^7_P!Ja^TnH_5^cpLVx-w;Keo`trMVr%{ z+BVkx!W%Zc_r~=%)tH2gDLg8Su1clBG_C0vtz(6;oY{Kec`tfz^8iM9%%jmB`SjnF zne^G>F{|*2q^~<dF(bEvUAl3XT^L~vtGjkEujK`l_(B79Ed@CLzDW3Y!k(UoPoTl` z`jD0OSC+m~3mbZ7;-$4;;ivIvcJcXSGHBnzQqCu#{I33Fla`By`?v?8#GL1~)p+(l zgr*ti(vCUaV*i>3@IK^&drNjR2g4#5yx}G&+~s}rlwP!*&sJV)^X@1OMT<2faNEIG z%zV8n>$h8%PHi@K8a3@RJGXW*G{|eC|GBwnwRO3$W`#9&ui*3PbQ1BmJlZdhpdFW! z@a{fccHD0UPW;^^?roG|HI53j{fagzpI;81GbXVk2M1CApl-G#IRu5r_1u4FPvyH+ z(EEr9sw)hqmK{?_W>7EO!)F!KY(w$Pf5tdtXDYo*t%VfTt4wZ;8|L&f#dkb+AjI?` z<M?GP+3OLrE_p4)j_ZYgMqFkl*8^}fDsgw&a7^%>j`OpNI6sm5s%&nvclpbi`ocr3 z{7o$^`{aZ{$~7>`(H>oLRztvEf9lU&-4BOO!^7QY1*?0-lB-@mY}!jXy7F`=P0t>W z*3YJ6=dxVd^kx_-t3MPsMDQ-wU@wB!{-kZ|hHoBB#uLx4gL&U%7;x(z<np=iNec_k zz9bm!Z^=&BS#y_rgSgz^19xz)eJ|a!?D-K_%92?IKYwyQO-6^)%@w{v;*%)y9`=(} zSU!cJmmTO8?-vaF)gMpghdNyg+y+v@Whk&8MUZnBB>x2JQzApN-kf9G730~yCu!*J zkwfNF=7GZA8e!~hY5bGj52f$g<6*vM7R&?^^-r+DoVT7XB@tXs1W{;5qHuD?PoBRr zm3-WsMT#1Gn3>`v!PV50J^1R5A@A0~8xt#h@qHh(-Z;bLTzDVR_9t|E_^}~M!$|w4 z6*(Sz3$2T@oDK^^=-dq@zQfEW9m7yICZ`?Fcszvn2X=sY+ctLgQM<6K<`}EWm!>w~ zqpZEzfU_8Xz}S!woa#K8&hOVE*L5!RYo-Mr^7De2i`>Ve+yN2VfCI0ov77x*K~sE( zxUo<N6L$T8@*TY>Uek!x87a`o=!b$~Umb$BMXc~5&u8}fCOSFh;^#3yt^-AunjI`j z;$GN@N+V1?YDwC;`$c<=$z&rR3;$KFf(Op_P-Mw_pWCb8>W^@Y9>d?k#AjB14Ek-H z&N6xlI8)vU4O6$Uhf*q7ZWq8^3EljAyj&cEy<z;7U~=exP}myjO@8#6-5OjePXE&z z57ta&b}0(v=`7HnO#{g==`3vJJ)=G*QlJ*fnc+jZPf^(tsdYRRUCcx?yZNmA#0&A> z881q_eHK<Mc_5m5BnzpN`$;5vW2nxb?-Yl<X8RCPW`Zn)RCL0EaZ&X3>K*o8>mzgL zY?Yd2(&Fg_)_Can41E5xneza=@z#wl;luD~T=Cfo9-K1-tK??bDK`dfhFenIn+P;` zpiQS|>tJC6Fr^`(XlgnX$5`?i<GNx&(OQ9`Z2Z{#ANA~Zv^9CmNn-2W`qKAf{<w0l zIq6@0&PG=zpuzkcI+wAXmEMlULGLGHoaZ~ZczUbA*jo0k#1~5!KN2RtT+O_>_ouDg z89n)VxO9FHov-@MdiS%Wu}aSLUt|tC{j*@9ibkaH;T-(8!iM6w_b+u{5bkIjiTm=n zuZ88aRX37I>QFQ;v`eGRYA*^K@f1{Vnq!JuU&>h>Bz!CJhi~&9uzOPrXi(D#8rJrn zxz5tUcgJnO<<lj|&eZ4k|9>oFS1bD}ugrdIFruh1Ep+qs#K(g_!s}Je)LLr>C*H?k z_|jyymEDBGr{Oqaxf0eTCs0XkHg|>F3C<5ZaYz0Gc6#Y(&Ox4q^%wmm)9MbwSI2=k zN==#!dL?4(qJfmw|0DaMxdu%AQpNa`6#CjJ5!a4A2-mkwWd-l{u=B%g@zH`hC@@Mx zofWzGZ;3O#96ATzt@fZDCwNACP7viA`{3;P4U$ssH@Y)*AbA;1!)5!sV6_Z)zkceA zN5>4r(u*S;zL_2sqj;u2cxMLhfNRslWiq5Yp8uY#9<pg`=fjs?AJ`DndPu%<T{teE zL~dF9z0{Kd>2Kq)Uv?B;l$evwwR56H##l5BsTO)ZPZw-Q^}+H1)5O=WEFjzIAIR;P z0b#B0#Lg`PX@O=m8Qka#n{}t)?&}q7Q$H6L(o)W59?Alv9-iCmBMXao)~J5WV$d&s z&+<-c;P3-C#VaqhXoE6mYM-9UyFc+H&HcYg^C!V*es*e*0aU-fj1_9jVNHM<tIxB> zHTR{lv@8}4`u>Fv(b}TT@<terD%kSzwJ={Ph|Kd7oF;Idl9CPIkuHj$kjw<m6y6LG zjdRfacQB>O9cDiH|6rTv3N|C44fJ+;VsGQ`Fuz?9)mxR(u04<DPhG=0<8$yw&UxW` zvK)PGH$$-?TWBj+!xxWP{j<DYxWmPs43|vA&JqQ7FlvEl+F(ophO?n++Fac6<3RoU zTaGNi_nmNj|4mkR_yQ~m(8R*ty--=r0@eB4^Qxr@mc9KUK6sNubCmPgt%alb9;=5b z-Oa?rgFo5CFiWavHmBpu9R<O)kR7nE5I%N{!t~AB_~8iuT~t_Lb!!-%GRY+Qm%HG; zjG8cO{{>bib&n-yNz+e7zNgtR6~}KILa+B5lY*)?*~KccxYK=b+b&JI#q_DF-j>O( ziJ*g-V{n_D5<N?_hdTKQIBeflHn3Ma``Vy{a!NdV;^;*MD<+XsRg-Y1X|JfecRbG4 zv4jiLKR`|EW=A8fJa#uBgx<D4WIen`>AtgrMSqiKhvR39;+aQmmHr2&vSOs@nCr@V z!g&sSN(P#L4WgVkM<CwrC;KMVpXy^@J6X5b(4D<0`1oHI`+Lt33yM|I=)zVw{c0^M z=9%97)34d~2xP^c8DjCBQ8fG$X9k@wW`%7Q6n$|f`5Y^RP|o@2WtPemqqrMK@0i2E z`k|z)%zafOrn9z~!|YODKeFoj!W4$R7w-2y$<EhyFz<u0!Xv%y;(;y|H1>HX6!xlN zN^7mL)pZeT{x_HgUQ(y?w_mYi-s3s{b||(;4Tm|lrQlKUS^P3^Dboo}<7ctC*z(K- z&G{Mg{^j8~?WrzU9~cC4c5q(ZiM^na*afG|E(-;ltD*OX1u)C<r4Y3Erl6haN!vS0 zgw7m$*5xZhi?*AS25IAuTepPwQw-32NIzV+?K-r)+sgB^Z^XQ^UdS4H<9z;{-Z9?< z!+j*|!$$6JnQw{Ry?-%jh!?iJ_rk3Aqu}wB&9H0rOwJOG2HO(>xT>j!^*viENgZzt z?c0r*8sGm_Owpw&D>Kky*?+8Pq{K<weoFAa7e+}7r{h?A&Y51)%5-P;W(zrw!+7#L zv3B_fMi%<`DBq0UjGarKYwxkwRe|_SC6huXq>}BSUBb_QDtPjnB7Jrqiq1`MC5KO6 z1pQmX>ByOJbc%ZgzcjpqMK8awTf>a8c-v)G9RNJ5J(JZh8%A5x?AcCLMJNlM%pS=7 zfEtta5|@Mkn%J@(+*%g1@q-gsuSR#w{8c9=WvI}1&%Z3p=`^V9%@nukyOZ)JI|wY! zz{*kf(AjgF`Hu3&!<pO{J);_4$-9uW=tlkndSM3}gNtWe=Y6L^RH8PX^DtM!6fFVA zr%S~0p<VE@Y!a>h=R<>+WkcdKQFv@BO$D*MZ`0uiA#d&qBMRJ5GBFbp#ykR7v;UuE zQXqT81?E19^OEdzAmPt=oC+Q6B|r1eHOfQT0y!G(JzCg!|0D}Lui<2Hg?nr2ue02O z<*fbmEOs)3cL@&()VE`jSRgkS=VYA}MrtdPq3S0wSy&8@qVv&tgf}|3D>~gcWQel- z-X42rB#qhJ8zO771cT;nk}H$)aB<a0=*k&DsY_PFuF)Hrs=*kFTFNv1O&eL*Xl*|0 z?2$amd<{L<M$n6a!?5^GB+eabjQP8-fmN6sebwP!-OY)TrK!Yba)!T9^hflW@QFoV z{OC~CJBiu7SquR~T(G9H8WPKNs3uT{??Ecq1X(|Plwg4}4~oS(U2|Da@d%9V8OZMw z3z_n@QB<EVi-#|4hm9@^*q())%l%7{#>zyJZ=DJRP6)(n-4BGql@(C6<_FXJVas{D zI#})XibYEnf%Qv0*uJ+Iq)xez+NsY_a<2d#hSoU=b$m~`sf|q@Y)wATl+nG>gci5U z@{Ft+HK#9i`jUEsshh~*h@k1TuPh6L21Gz$q$Zu5qff_A=JFoQNGkiWPBQWN3Bi{| zlg1xqw!<rhYE%m$tCu@>iqAlK_si_9=3H|A*BgTxPQdSadA#9xkS%_tNVALw(~h`g znzCyi9{lG|p_8tF)*WTK!5vyF_9@ZHpEBf9&bycA{)3zP2~_VU5K9<CxqF)>$M3q5 z_CI5qr1gQ-?D)i9-!{alTcfBms+OJSJF%bPfe`TEBfRZaCEe*0FyS-5heTWz9-lYG zJ|kOLc*;avAkX>tvUBnC-brZ4v(}IA@jY(GRq&coK!>6=>Ad}TTy-d2+@TYV4X%Sx z-#CCuigWOx)=PHu@k`jeG?YDZP$sPnZg}O$0(f~Q5Kd@+2kAp|;8)LCRy0I`Vs~aR z4~Jc%m>o&)UGmBAx;rK0rqds#BB+%2fF|8&G)v7USJh<PqkoT8O)#aLYYM0<R>P5) zd9>)}Nzu(n5q&ntq238a%sN%VY>OlDa*#9qQgf%|Yp+3B9EcW|hT?h!L<^q=@aeM+ zmX`9*DsUY0Ix&-T-$vq?fT?Wi!K;oeUynTq^`ffJZn&hvoQxdyz)l%{{@iw(F~<9_ z)eh9$(jz{&6c5jC60^8hD*Sdk4jCI<$<XsY;1yZiTIWHU9{Yv!>EWbk)r<6>Bx1>5 zm3prwrkLi(dxghG)9ZyZu{<)DMvt$BEuLOZYKNV0ki{TzM#fOua%Tu8b53CWvroeE z9CN5m{Kx99?t;=U6Y0866HE0}plu4rAj&qK?iFofFFxJp{)N@-dd~&nYU2nze}6vS zEJ>%QX=V^O`j9Z^=wJ|!B#Xi8m%~nzk4z&>o6IsfW4WI14>VQ?@@-?-hFhuF^3(`N zNJ`nX>|{*LHx%A`wX-Q-R*9V&dSrju5ubkeB5rCLi38RiXB`ik#U7qLY2z7<wWeXT z!uK$1vKWHK6?fUf=TWraTPGu)K`nc6Nz~YHj4xp{DV^LcoZ~9Dz^p(TK0q1G*5%Qq z3ml&2a8f)b-J8nfc)nf!hmdxBGyB&aLP2Yn2%Z<);e(eloqJ+Ni|)N;GA)4^*!mbI zJy*lE`9U<iJB)Xc^8~90YuKK5KBU>AEhO)Y7lsDyho_69F&uc8_<k;Ae)plAU8h;s znP_y2@S|1c-z3)-&BE@(2L-z|_gK+JCp;4xLCTzwF@0?=)qhw8W^YAyzD*WKPFX6l zQ=#Nd)ol7m-dC76m{x2%A@=2+UYXyDw6;W*rQJOuz8zsiI{pK({HP0h2g{O=&m*Rl zJ``7`b7!$oBuq8ZL8-m@xU*|ImE;*?{L`K6msX>ANZybAF`kIPGs-zv!f4iub{O?{ zKdcCMBZH4iU`3=M+dR*h^bT)<Ej@W8d0Pc}-8cDjY$3a|&RD43xfsUGy31Y$&c(M0 zBSpPvWy;R#f~N<0ChzV9I2_O5TZ1v`-+v<{tUAH$PnWP@`AK+Ia*1h=ehqcs?XV+a z6k6pDLfGJo-4#<wW?>s>=&odMPd^5)J`$#w9F5kzZ!iDT7cO6sqJcK+*qIGR!ewC< z)N@8k=jBXTCs#myK7~`$+icKX*2Zd8HnBV52f)y>g=xi|ge8}hajdL4`i4}C%Pg*e z$*?~3y2=yY%?l!B<3OsJn2pX4TcEmB!t^(XQk%CC)*kkv+>&`D14l&&mIh0$DKx(S zXf&(ILzScUY^J(A7S5hZy_Rx!LRB;9KK+kflDEO4e`;99T^8dlDq!K>DLCrjcedU; z2siM#^~zBr@ZjS(ytJ;8wU0XvPI}hVUnvphU3|tiN%yC<my$6(cQl!+ePLtx`NpeD z7DImJ;s*O5p*ZL}?EN?fcX=k@wHyVyb4P=;3grZiSIsbQO$*!jeI2k%fxPE<l3jUb zh092h?as2J-JMxDr!O)0x3}1^v<Rx1V?pCAhT)|Bp^*7J3#AW-<Fa;TJd~Azj_t<u zG*pLX$t2RQhEc3jvzJhL|1c}sdzT4^W2vT39GiHk3~Vl(facszmR6q2uBP*M3xBER zhs(32-v&{aM3$B;OoWlQX2YXZ+n6m)rVpnCDjs=`$#A~DN>C8}nm0)V^+tAVvI|>Z z_7Tpk?v~7z-2^js4~3|Pd;Gq=R@iLvlG%ygkp5Bvs*hyYqqG4u>iA00AZrqS`g}nw zOzDP#Q{i~e(hV-%?T6{J(`b2THmq_9#H9!RvBP_2l3R}xl1w~pJunZ?ztF}9Th_pV z>^K^7wnyygZHdJr9>BH*3bdl2A368;7B}9~AweaWPE{&UM3OwN`Of#A=VaKoml|vp zJ1elvW?_xPHDTn_K6LY78yqv{|MLnv^6nnb2CJ@TFK2vU6U`sOX`dCWZn~{g<;n5X z@AGqs)zJfRv@w~S-mCEM-Z1vr<vI)s48gG-n}onq@5P0`t?}@xD7?R>51Cl_I$g_p z2ahi8f|NP_^!@!IcIx{Kuv~l+?0foBFY^F=b*hC;%D2R4pU1QL*|JWnhwT^DUKPNV z`bA9qe4S0oQigT1hFCgv1l;LfFDNG*gWpmWP^|MAlrCDZAqL${)6j#nS)&nSW$JyB z;%LFOK+wJygDP1zc*fI(N=y2Z%h7}EX`VB!Zj8W9nx>$@d$ut)1L^wL;m`@Lq;)Zh zcRDA)Vi_e=Ic&=|oAaIBN>eP@sY@q}!>Bzg-)SWGV>DYtq3pxK)F&+p)@;utojM(6 z_0}KfX*V#<_NkP9KNpukUkW;Mf}P@d2b(=PIA~Wda&6!Ti%eOplF^~gs<V<Ax|2xh z4tG-2^$2cvV^F<vA?U>ZgppFI!d%@P);S=Y{%VfFx)k0UA1g)1*H;3uDby&F%*<;0 zk?d?gYQ46cg>?<Y(UC?ZYobLzwiS?RjVnm|O+~|e&S%+|%b6}+uq8E~=3bgd-<!Np z-0tC2{-lC!3E2T&(f!GI%2byAcq*%H-vY;4Z?gM0oT(yy9^OGdD;=LMME<H40(<O9 z?wv0FV7x<U+KZGt`iqC(o&twsod4@3P^8U5u`+%Gv%L|4r*l4wul%n<(Xw7FHvOS! z`a=an`z;1%r8Fx1y@9n?Jz=Ho{<Qy*HA(TE)s?HZ^gg5zu2wd~d|{8EKKUrLMvS9A zzs;E1a!d9`xefk|kAg45{UDID>yGq$17oCDvbhUFIB!{=-ssrE0zF023S7lHd?%Ai zb*GbjpLQ6KF`blLez2smYWS~N6_?&0gf6T0OMa}-$5*SOm{xGJ(@Y&X+Rl5l>*9*U z#W`MJp>IRczy7kv3O8BzH-c9c+_^hChK|<DQ-Q8C9!!};3(R^8AEK?{xlT3j7O2p0 z&V8&*=ttWNJ7C}%YbP&hd3@V`NsLQM#*wkyFYb4mxydQf<5fL^;_6no$Fp7kR!iZn zg`eSL6_V4FApCIpHY|C6Qtaql!zz2`V!~EGG+q!4vmW{2+N9pNRV;@?Pr0w%H3Jh% z@^Izw9;U+V$SiRw9I$=~JJU7`+C0<wyEGJy-)g}Loi5mI|Hg5|M1fWO(?X+z<sju| zO>gfEr8!C#WTI^b!CSsFdrBm$%4<#@zsKWsov~EQ?`C!G+u*XX5?hz8FX{f$3~e43 zd}h_i)?0mrKL;!zt~iuR$A@wj_hT41wIAf>tJ6=ZN6hjU-)H@dq^<vk;^%kan5Dv5 z-tiNttWFVSIdARe237iS*9%{M2%<tG3vfHQiap9*3lB3_gOC0r;fUX1$D>gbXvnz} zFn;44x;8=%HGO;7%9LB|R$-xNJM<XbhGrJBZxgK8iD(-jN6+PaasFv}w!7Yru9see zz7reZkD&rBuuK&P+bdy8pf60jp^MC0tS{>$MbF5DzOBio-A5&`Y`+WDeH>0+7Y~8a zgXwtX0OyMN?`L}N29rZU0(D$Apr4j*q_Z}b86@h^v73!-`^gFPPr06LQPd=*2;QMO zXa}2yPNd;)H?W<_hIsMsM^;BgtSkQr^i}Htqp$zO(<UttWfg}06K})y*ETS>ESv7_ zccmkh)9{<zN|vr^Mc3{Ga|gpgAwi4pZ1&xQA;!sUFz2WJ^^r%H)fyDsFpi=^_ra7@ zXUwTC6m_1Iis!z#(wW)*;94k$<Mw|NXU(l-&vm&Ar|BKz`!w3Kt5y8Wc}&y3nc!j0 zMe%)}ife|i5RNt4Q8&-@+?Sq(`Qwr>w`wYFbeWIaxBnI0gQD<P;Slm!n1DZ*G=am? z30UT*09o!$PRbiI@b;-@cIqql)-UuRpC;bRD^;ZUo=cFppdU&7)y5_5pT*iXZ`9bA z3GID1GS?X!*s!F5lu;apdz(_2S-d882X(SveBQ3>-yed)Qoy=(6-@6ehbQj2SkRo0 zyILFBN9{QLusRth-Te#U0eZN;`V=eOo=U~v)DdR7VoJ_UIJsgbs1NYRmpZ*@qsVtJ z`<>}&!w7t=x|}KR(xp`zbIGCCLwK#$hs~PHSua9=lvXmPUXepkW{@T%d52@)vS7i? zcagB8PapBq@)+7Q{U|%2t3d(l16WJ(1o{xi&l}h0iGz-Zljinuv_8Ot8oYeaX)F)- z{aPd{7bCT8=A87hzrxxIP14?!PWU<%-THRI$BB_tqmv1a+>;w%JyATd@EiP$Ou{h_ zV@Q-qp^=hvf*#NH*=+KnpJDUpjjA3cjgzLwgEz3V@7$^D%U~=$U`ojkK0~B)I9;eq zBa^wNw9iiuy-(S)u|Dx+V3CRWde7P5cxOBkzn5`C29ti`fX#giFjaw{&34SiX=A>D z^dNp;NX{3IKIBg3=s&`2&3sf&jwKauU+UD!C-teC`1oQAJPp(!H#0jXl{7=h9ncBR zX6dM5HJi?7&7j)eyM%>hHt0~KEL_<pIzC+MM2<6i(V`+bur|xaZ&x=#P0u>G91+U1 znZBZJ@jMJE3&0|u1k~KO2>cS?!xkG~+MVu(88Oy4d)X-TYMYIL!gT26tR*f`c?eJE z)ClOG$m+YEvCgtDEV;-9UG%0n4L+Gm8a$8jvZ<QsM5N+SvrXXrBpBtb9hpx;C8P{m z4_D52v)2tX&|$_-VR-yYrX6#M<qplo9c#ncvv<8{2$;do;}68@s!A|-uM~c&7E7Md zHlgNWthi%_JE-ncWX-2Lg;o8ou|famL5)8Lb?^CI<G4Ex<$fpQ))H8AEt6R;52n}a zpRslF@+3Qj-!pfuWWQ<);ORqqdaK$Si=xx;vwjxkpR}ivF<~@3=>hxwUl2QyvqU&} zY%-=iGiHC~7qAhlJK>+JI#d0%l&Ka^r01dGV)H*gih0G|WxP*d|1t@~pBO>#S|1$W zzaJ$r6ReDJqi&wJ=|9H{S6-hB^)DBKlY}#Ub!3H2mkTgassx@jHal&VfA9EnajWnr z*MXidx*=q$CD6%beK2sW0XAIC!<?C&FmQ7ncv#G5v9o@_6O&e;ew;&67>Uu(xvqPA ztJA;6hb-U0ggNAz@QkS{?y5_KNu~SQkSh|Vu<9y{**$|^T^fuVhM3Y3^BQ(|?jMl3 zJ{Aqv-e#*qbm{u&OQPgRHd$^;A{QThbPlc%?B9&S_KWlJtEM+A4Ar29;2IXeIU`x= zIW+lSBc%BEr=QJQq|g$|#f|Ftx?C4#D!qqqF^;HyV?KCQE@ypy2hcp<dBUs+OWYkP zO?x^5K=)TMtG!@Ht$Dno`_L4<|EjXMl1Mb0r%SgiW6;3z8_a(&iAim>r7^3kpt0|7 z_UPRQn7QmDc=flz>fj74I~Ys`tLLI#8qYpPdtjE)d~m&is9N46{5BU@J!CP3f*e>{ zv)V~Jz?2qA$&!Mc7AhGGfH~z3IDgm#+Wgy<;)Q1Llmyf1rb5V=JYHZo->^BiB;r1P zUte+96@Dow!}JOV3SPckD6BD{XqgRcebIS1SUn2{4dgp&>vDFi!Vv7f^%XRnRB-B% z-e{87hn`HF3mIjlP+Bm9%4AjO(2|YN>yRf&3@oU+CJokvl(L<oJB7Wz3C2arw1oHL z+?`XL{xppQS@l!w#eXdzAId+Yu4d?nn*e@$hO@&BD!4pEi;f)Cp)teN@Z9bUdUKR_ zy9OSFA?gIP%QV=y1D_cSwh-cN^vQnxC5YFsVu}97;Qj~kYHeSVog<5Ovs9=!gY()Y zi`moinOJH#0bgCzgKf)CI=x$clf7IE6#K!CqAmWh;ol#y{R!jHZE*;Dy6QT3?=uzl zsyku`?|&|KE)_1!3B)h8$i}wrWNzQz!Ae_YR8Q`M4<|=rTi+;JqSlY*ml{9}pQTM} zS;Wfr=;P=)ne=#sig@>tC-$w=MMoQDaqQ{^;zQ@*bbAPQ(yknciHG&+kQQKs%1An^ zZh$h~mQ2^928u_dLACQ+u}QiQxxbKM1KiF?W?g#&E`L1XjJYQ5J4oO(y;zVRbf49= z6gy5bYiD0pFJX7Q-avE1AUeIlfK*K0u-F^f_%h-J8@Tixn8y2&txXDD{*l7$O;gZZ zegIC;de6*~Jt@>FgT~hk1p~zk?0VERG%YECu$}5;aAhQ=p7mvGk|y94!$#5M<!wpG z6gSNMvzApRP9noHYg)5Mlb%id1sdlv!2CroiW%J>-k7DL(r-`Jv`7P=g&44as092a zwVU}!sgTxxVbm>C4<X8SWD)Qd_6x;u-DxeGl&M0`S2hbpFB_a*G^t|cBx#t~lZmHn zvv8iR3{^UM($LIUIwc!M<IL8x)oVCUrs*eCjti%+d#AF|OMNJ2eSh3}$<ndztUms` z#@S|zCy>*t$wFkBGY-00EefT|+`oI5b#e*#>ua&Nd%q3N>&m0=)yC9Kax`S<CAhpK zopq$lr*Y+fVZ??Rcw?(CF4<^Itt(_{=513Jn52U{8xkOqyMLc|da$sgb8(T9z%x?j zOlzev9{=crviIlVw;)GWer+IT^Ihw^>pc>!G1<8Ci6Z{JuZtyTKC$vyxv(xZ0FK;o zr*)nE>6-A7StL&)-4|nM3hxJa&(_7&!Q8#{X{R{EbQe=E4CV6|ds4}eM~|dHy5Hjn zmQh>S`Eg-nkf($uEs0ds-YI+-s!9VsS<(HP0(Na~Ds65AYEqldbek$fz4xB9bblx= zNsYp}v19Pn)@c-FGMg^&z43;0eW&ViI`rRXzLTE(id_<iV#sYPGKqXCgnc<8mi6j} z1&@D2kJN4P=Yt0@?)!7-yUB>zJT^qX+TlWQWfyF-jb?8Y8(H9EMP>o<xOdTc$e58w z8eYn5q=a__h8z`+WzV4{<}tJ=O`Fv)?n@l5N*7D^GBuvVY?JPTg9EGCded4-Uzu$} z_O}#TcHkA*?&rLRUDmjScWVb#RI#L}nYi&tCtGY*CS>0c=*S`!MsNB-NU=J0H7*jI z{x%3v@k80F?r`jBOTjG<_29kvbb*K9N%i4;DBrsrbmS7DUd<cRdX_Q&UvqJlT_Fo{ znT1O?zh-4c9WdziedhKt4=;Unp;=>n$T(V;*3Fzpt?q+pV6L)c_Bdm#^2?_9V10V@ zsS!5vbLH@~Iyf^;o%P!%Md5PeaMIxqu*Kt`sJLhXR&yS&()2f?cfbAOga?yocIa{O zg=kCRSrO<T(FL6s(%652?zH2-X=LIY!!V!EYp(H3k=|2w<#{v~;UZ?+TLtepj7B$& zonW`q1uOVF#AEj`c1OXV;$A3%RI;4V_U<KAA8KR2u6$yqIowrK{a4J@sD$yOE{fh4 zH$m=y>p{(7CS_G~=3MAehfUv9K;oEB?t5+McX=rGU%is0y_!l76jlg}eA3~?*91~o z^BvkvOmNvMJG$l4m#XJ%fU<>%F%x89plK-XUO$gb)aggNnnEbII0gR{WTLsIG^KJb zZjqKJl@wL8&2CocaWRg@YGl(XDSxoOBr=8Zi8!z`g-xq^D!4dBg5IVHPGcA9gZg=X zb`VC=SKVOJutKI;bQe}p49~vx6MEwOgxNEEv8P)fl|rJ?Z?q3ZCJ#heMKkJCrGPtJ zi#dN?8w01P((c7!c<50KX8rgEy-yr~iJMdKg{2oseILi3sNP{KIlsa&wo}+pEKB)u z5tMjsJ#?gAVJFIr(A9W@c-7n(#|`LYdf#p7t+N(&M_p!*9LqrOTNt#b=`!~mP3oz$ z0I{`+{Tj1`d8QUZ@1htQtuBqPKl`H=@2eGzx&l?B%y99?RCZv%F?gG<ASl;_I|W^L z19RHTbaSWRzoN~|wMGS7(@jyMs25FLt-|&ko+#c^$fmDSHdwf?)5-Hvhgdu%i>&o+ z(Y4r=zT~Llbv0!?{XQ2f?WM*0GY&Xu910f)9RI|wees~hX*CdcJed44AG0GNx7mH| zH>~01C*i=0N>P(DMCQpB&`Nc0YI-^Zo8NH%tjIh4|Jg$8%V^H>-3W7cj6^tKBP3?o zG5eW|g>hE~<Ki0;;FKIk`U4g5-7Pt2d~_A2aqrDwsjZN^<{7&<Zn?NQERnp@_CfUu zBh+cFW=V4nKwZKO;rp0LcxT5js5h(ydH*`7ez*?oW{rk(jp5L8IRZDYek=Z4qfV!0 zr(&hte{k40oR!bt%G3^j79O{!fxEsQ^<AU{@@$QG@AEt;Kg~1O`hnE;ehPND<Vbpl z^h0geR=A!LNtIJ{v1+6TbY>_~;XM}!8m~^P6f6Z9TM27>av5F^vZ23cHA$*`JeCGD zg3LK{HaB^!a6`49cp=P|MH*PKua`}jpVkQG-?f5OD7jJpkJ3#3;1cL1^&himUNmdV zBS}KwUpOM=g@31U-ovpN7TR!<jeVsmocYClVVAVXwto&ruaw9AaoON;`4-cSs{}j! zX_TS2Q<OG{z}A3`5MnKb8jJJjzC34Uqa5ivX;IM?cQPo{r|moQ=;G{Y^syq6)^QiW zi6_!@VKFetweh%F^`p2yv{uOc<U&g-jp$YVG8Ve;AXt(EzTWo*{Ce+WhQo*8z#&eg zSX3sg_%RDqll1AoF3ve`wiCCk^gsQ#ejv#@DdP7BjgUQiDN}1o#IMrfI4Q~=vsQ$O z)45Y7=D9A*Z1N(zaYpRU>K4#^7wfc4I{<^{4`o#jjtHj!MHMqKIC2xq^GjqVzl^cg zT2G>UK%b=<#n9e=+XairIBfm!147kKv!QPS@Z=p+NzkpWY-_ML_A^Q19?B->RFR8! zzLYVKN)r;)g0QuXdveaH!fF4F?1z5<yX@2=tV?<>oKJnuxqtJR!(&}ox~UmfFPX^Z ze&l5F!Q<go?pWe+ak#wG9`{_ggn>Fqc*Wm^owBbLJMFW0*7g`ooxD%XhJ1Qz#QS`_ zw;ko3Be?ZU<*rIix~;`?E*pFxWQ`@ZR9zCS7O1fUBh4Ix%{{1kq6~R`d;#~%)KKTQ z4J9mE2hXOT1>-@w7||q6-!GhJas59Em9sd*zr8=tGS-9Ex`j;slOz3Fq)K0dp4G1( zFHIqq0!4qh3R}FoVaA2|bWJ`Q@5wj=obe~WiX51q6+sc372wF4Xj0TW1dijHVe0NZ zLW5rfZu#tmrNQcG6rqhJgZqQs-k+=`NKsUG*T+3?qRDBEB1IoDVmmGm!<9P>vBcu2 zs9JImF6PJL%;}ulCU+S6=%rAYy%G1yzh=XHU&H7Nh#uzrzV@;oRjS3|aHDw?xwuP+ zoX&fSztr$!>@2qW!zi@#Rid($?-*#kfQ`G-u_W#*w0iFWkL$_wbkkTolkQA<y+>gt z`ePrZ6w0}%NQpiL&>QdmkD~Jqr1E{kcp@Vsvy93pC6P@y?|mCd_NE9CQAVOvBBh-o zDT+!%Q+p}S``ntEMtko)q~Y7p_&vYB{ljt2d!FaMuj})HxqO~^$$zUbVb?lPzIlrU z3@Ks<n&jwVwl{qmS`E&trqj4)C7wOhqFr~L@qKb5IBnbs1z$e0B~{$Xvgd%%)YY4Q zPx7JS4FU8$VK+>z>P;$r93l2j24^CwQY}9lW+yLX29~_<bglqrhK)ndXwE9zJpj7Z z?u!0BC*aHE|KQ6<3CpPByt=@i6fo#8xZU79>J8IKx9mH6Q89!wCB}-6Y{MmvB|E^$ zo;&27JjL9w1|}&trW|w;Ccj%O+|Me+9}ySeJq#4;4(h=BK9OkiHkdxt&BBlSeuAOr z7S`FfLAZ6V23$w%g)B=|)OQV|R|{85iX8f~+ok4s!gmzrq;lSVSv-V}(UjD`HYDG{ zcg3E+2Z$f?`$7LpypPq&nR)3(OsV%|N?n~zEeH5sd`l0!5O`Iv+BTLFI`U~xeF<di z8G!W@HIgwtFa5^(t}87Mvm=Anu}LeY;efGnG{m$YSzj7P1={=JOoR*eTQHIu%GX12 z-B_SAv*?yI4(p0^Xubbqw&_0i%l+nzK1(OOJ$N82i%k|xF7K0M*^g&zs{&PB?#<5m zy<)GpTd}(!h4=h4@I{R)Eo)E5aVt`A$J9dXz0jIHEFFv%75wk>N<ym8z@w?*$UZ3J zolZ~MyNze^Rq|a!-%Q5?eE*x+5l=>&JaA-uCS^7)5tlmbXTIu%STJn~3tmNRz@Txs z?XM1u+2x4=FAuO&!}?>yHdB&p7|F9)PsH>&PBi^mCO!~mz@Hf=w4R?;u52{Lg3%KZ z74HcPCJe$Y#VJ%hTbUj#ort$qXW|@jHswaxkmCz`3>2qhTTLcCTK*0qQ*+Q_-&j(f zF&z7DPDRtYmCWQx3iUg?4VEwV!snH};n~6van3qbvGwc^h<cO51{P&esW<{3y>_B= zHg?=^lT6q0&cN@TJ+WVz9mz`@nA=@L>{ISc{|<&><P}*OTo*=0KZbxor~{1FNFnEE zdN_05DOmOMGSiuLfn7VEg-Zup;eo{);I6Yi)*3jWi$giw+O0y151kX}p$=y6kt0vG z8m?Tgfcl&f=(4UAdO8iqFPl9m=|%#Y1Qz1OOcOe_E0X5q*^r?_o@m`U3zq8bXV&K? zQfG`2_vvJ#;|Ue2vhIZENkixZXE&TTk738;a+z8gpPLzSH_+`E{P*oAq|}*F;!!Vg z%bE~8a5I7Jn=Ma~SB9ef@*up>#2LsQvN-Z&4lFtS!S(O43{<+-i>{mBU`s#Dz=KO{ z;my`QnB@WZ!V%EDERuAsL{Mung)tq=*qohvVc03|7jaF+DT%dAQAeF(Msrq=uL0e) zize5#-gseB0%^Hb3BQ^~;fB>0+0oiB4Yp6^X};wzcCDJ{-`>kosnQ(W|LqLC(px0f zWCgI`h$T?onT4+xD8O}%5wx_Hi8<30Fyp8NM1^mI1jn_|k)e;pb@`MxwTmf^zbNcE z`A%3RE@Ja;%%BD8^}@UzuVI_bPu9KW3bW^a*J1mE@ZzDnkmMLf+pYUkbW%TDT)j-( z90C|Uw1b6!Hn|>e6L*!U3PyziWGqc3SLG?}#cORUzI{bpTW=$NF!jNw<7SDIl#K+< z|8ik<;TTMUMA}xefZ2ym!Q<MYI8o~b^VjTyhg+xe=FtdvbHR~CKaFSB&yET&V)z~T z4A49Kbh?q^M0MGL7+QD-vQy_`#>q#}Wnc>~Q&O?C<PU5)=s@L~51IP4NVegRC)pWq z6qjz?1i9|Pl=|=r^fw=j^X?24UagsfKZcmoj_hgp;z%L=%v!;wY#BoTZVn@Db0ezW z9FMVE%}6$W3uGJa1aWP)G~u-$$yl_&M>#h<J!l{t^Fs8k9YXusL&Ya=%-~?ZSuk-A z=UY_wp|vk`P+@R7{?Uj-vlHLJo6pw*52?Y*IWEw1FOsh|=Pv}5i?8ly;4w1?G)oSn zu`gAb<S}P(?=T_fyP*^yu7-DgLfGZqDT30Ze{9<u1u|_JjsBgU_%!}3i}LWsZ#TSg z?db8;0uLbT$28KnFJUSdGvQKuIL5W)W8fnn@^#6<nRBMn_1zli_Ae2A@_5ECWH<Z1 zZz*WLkii9y)xc@3BaNLFEiSQ|jXrN;@y3o^+Pz&3v_JlXA2a<ijd%4#p1)E(JR0v> zW|MMCG@JBwBHph$#rki_p*8gZm~zXIo;=i~M^o<${cpS!tb+Z-A%1NvQ@WXXE7!T^ zKl=dcH|6o=w5gmymxl`NZ{gI)LMRT-#(=>LR*8nvq$V>KZk2~897FI-=YCdVH3h%b zB+$)4cNwQBkjB?&dhf9vj7F6)y>q6bin<~0bm6|fG6{~j+mk3U8pR_ibZ23kG*;7_ z-z~x+I@_4KJI*t=5^cJjU`&-6d`54qFU||;fmh1X=wMi1&J47o*FTDcXUE3kf1Uhw zHChR8#AH0zzn9n}(GCJvW{b-C)1|vIhN5BOJ&``_W5Zb<cmD=s<bWT-fc*{ZLd66z zV&G4)<!&6!_ddz&)6OvIcVpV9X-&Ef)2OXIhp9P_V@fZE)5tR}7>lLg?-Gm3SBEe& zu)wqv5p3*bRl4#n9(Q>r1E_5S)5iYf?A!vy4m>9xT?!5d18F#aZnS-ipu=x?_hoMa z8W;;KVAO0pzqJC2_c`FHTu+=)Re(+ta?t$JU^=~RwX5tmgqxEG(~@`z8IJysMI>zp z-2xe!XJJPL%1**PJ3d=Iv>9s3FN(^N3?Za!7LKZPpc9ia@l@q6rl*CpMdv4jU=1V- zS(>pl1`Q6yVnxw+P&f+sY^6YLlh24Vu18_^sBU5Dog%@&z>-G2Q51K6<av<JI#>>8 zM5nVa#XW<aDNQ2aqJ4wVWv4REfp&P5E`_vPXJG3ko_DIe%pwErDSFmyN|c=^EPtOu zajVoRH*S?wTSJK%uj@-8%GPN6NfT#x-<Dogegvm)Tc9A7A^D02Z0kDii&$}oQTA(i z_^FTNiJ=_^9ON9F5u@;xa8y!OaDh$RRt?Ye7qW~X-nmcv3iF$nu*BFvOf1i!<2Ck_ ze!2#jo<5ZfzYE_u1L<_)Vn(5}DK>T>Ie!d9-><`9M)_x0Jid-y{lXbtpY>t%;-%ma z>5VHVJD{Pr5-wY30C7c}-*Br9j&D2*@rypgK$(-my01z&aV$RrKmN>CCpnQ(Whry7 zkVU17YM8h$41M%hu;C}}!Ts(COzZm`<|Z|>Hx>=T$L3G0X!}IMRqF)#2W4#U+IXIE z{wvgH&%`@tJaNl{`7l{H!rT;i-hJ~9$wR{=46GfBcKHd?#al;0Zhw9@cxi+;HtOQw zF%f7G-4i`K`;c>_H9K9MOL3dDuxefbt|{UU`_*RjWz`F~ez_GaCyd2uei`JMJCs_I zYS@OSsrWZ_GXB~)3TyW!;=JKTxLL-Jt#&`d=H8dk&$E$q&o2mnrR-)w8~3rW_#{k! zI05SYRfG|y^B`~+p9Njjz}oQ<u!8sUcYN-PJsM7n?Q>4CQ5#k9E}UWi8XK60MjZ59 zERUaBe2@+J$o`Zgj*D_3IP*w)#8Mk;3y;9T170*ju2}kRVlPO%uZ+{%>)~IsG4{xx zMCXpLV{`qUfJ<!#_8c`Gix&;0hZ~hi=TtWJdBqtIC)(L$Zx7hB$()U@HACML6RJvi z1;wUbG<T#r?*_Zjgtba^6RyCR1U;&iKP9X=KLh2yhhczTI=<sRbF=KBaOz1KlgPc5 z$}GtjgEvot2O|qfvsDeJ_uVFRTzSquk5R?VMpE|Qp}&%UxgUke_nk3*t(+uf;U4kM z6LmUqE(V{q#?gPljZ8N}om!0AS=JvNyzAct{(o)ge9so>JoFZ>W$NI4^A2HYjj@>j zX%4O_)uq;tGSogqQS9$xK(6D$`2RtRx)znO8;_5&K4pQV=y`+5SR&1xp+sY6jH7nv zI0`TM4c+<4R6Sast?YeAw2)aO)W4X*awd3FKn;nm+XpbYB}x=)=FK@2H<;%EQ!J7c zv529;;I`r<bfigPw7MO;m*>LFxTovFsTELHV+q9z)X8<`KDPDgTBvEBjCuh^_~Bzu zoVce8*7MgprNkVQ{O+*WjB(gF<qRuY6@?Gu2eTC&yeIlx1>IWbL;HOtcK`SRVd_0E zl0DGQoHYJHLcla^a}J@mC7N*kp)Bd~^W;vwi)_T{RMN_>Xa3<aIQEh!9;%S%pO1_1 zWFtRA;0bmhB7|gKIYHkqDJUyjKz-ERam4mEX=~LQFut9SSsh2j&oie>x4v@~TyI+7 z41>L(EPj<d>KunwL*wY>YtB`EltxJ#oM`X~d$F!~0Bw9$#~%Dj$3?v}!BT!C9a8H< ze=VFaN!5*1-&n%32N$94OmE!1#Ebcy7|S9HbD54x6a1KzgpRg7aQyjcXn$RSWK8zM zaN`2nJo-0lvc17ldR-E>r4fV${$Z<cOJQxQ8j~(ifk*8%Opr-ogAdx_>T$tj{wW-1 z#y^oPiyTOAR#<^$;cLmtoT2E|V>FE(6pf7~!B`(tfNADB^vuQuE-4SD@U>+uZVca5 zhKFHRnJu}vu4A8i^u(*%4>P^RZy-8+I=OY3QuGHs`l<O>n32^5Yb;$bDI$V)%1s5Y z`-;?@q)1+wkA>5V4cW$(8kj1Vg{m3@S<(D`;>p{Cv3qb6!~T)<kmo$o%j=j+d^pO* z29bH;7KVoaPaoML9oeG5?E1Rl-}cGq={rX(xz-F-qX2tyNA6A&{ux?5m(_SU;mNt7 z*jgJ-q>_)v-3JRH2PHJ-P&xarqLlS|9Z#C2MPi0_Bn(;714sH<V(IpL{GhE!QR;~- z$Uhss-fn>K{e9S{ID6FWKal6zU4&nk66iw1P}+H9JDWJMH-590p@osQ^zXh3UEXt= zP5s)791UWz|2u7b{;WY<dAS+h8#{16K?KS4lh92{r)!quY5V#av|-o;tW-V#x%sW) z+x%Mam}te0NpwgyavBcT=ttGnz4`mCnay8eiOHX*(~tM|^kAkbDa$#6<Z%`2c#(=i z?q0E#?`@(=3&}t76(reNV`S=iLA7rUlj_C8s0|8~w{)2B+o7G^8oygG$SGlGJ13$0 zJ1gd6zaH+sdCj&wHKO)=;oQqK2h&2r#L*tt#5VtfOjbuAg}V+gs3w?9wTI(5^H`{I zvc_o}Gs$I;8y&54<o<65uvh-WR;2A=qN@|OBwO?Etp||!Z7`NtB+!;NIlMe+EmQyc zp2hO}V@P@uoiP8!?sT7l#DAi6`IThI+U`P@4&T_A=gJgZn?m23`Tn_g0#zN^#hN-4 zXh)0$yPPy|SkOjR?*5kvBYwhYr9^zEzeyUNIf>YoAyT8Lvn=AUFDWLBhBGUisbP{g z%vLm@x#dCZz-MRL|8xi{p1&`Vcl4$CSNn0+kS-qC`j2;#GDxE|gN-@-ik*5i3cs8i zKr<%!(>?1z%-Jypn@ywWldcyIf0RfgPRilLxy4LwLpQw8n@i(L0CL|og2k#3c<ng} zGrLx>lMkIKW=Rfxy31#>PmSr6<^)>p6OPW`Qz)~(5C-_VGA+)!tl~3(izXLc_x;|@ zHr<>>uhQkv7rWT)^~qRxJQ#=8@_vFcurTxCqI215NY32D2EGE)Qi&v`1)k_hf%snC z4!3s3P^?itbu6=B%OdZvBfCOa&2xnAx^AJ=-3L=kvq>(z1I|pDgIVd3xYwbY&9$f# z0)B^++7UkEJ9mY>mhC~x7w)i++97!AeI>MAUkbjq8rb!wH=AVkNqp|;Lr0HV;P&(! zvP3&LS9Vvj+E0|E#UzSr&CA)*A<tNe%taQzs~id!8{wPNLn!F}A?TVmiY6`I!S)B~ z!CRj^a>yAAn_exI9C&;h;6*Cc90<j0hm+yTmU!AbZ#+wLZFAn_upcao55efkdzjJT zKzg7eVDd8`#0^hbcylUU(rFf_B<s@rO;gBfsveRb&jWIv!8V#p$|mt*{1cw*C^DwZ zZ-3d42N&Q+WG#$35DfGAF4XB;6plMRmlPb#FfDlqO?*YHz)G2YJ>U;-oX1n^jXd6o zIt?}U^VqeVJ>t8WytD7YJD;2Hvw8PZDAMmGoI0UFXD&wJ`%54>Qf9<vt#ZUWE}VmD zkVTtPu8FdXJ*e}1CPg;4i)Ie3g7yn{+D>_7=%Ob&V=k-q8;Iev$J1rI(b9~9-WXoX z`SMHBg&`Y!DC%Cl(Dum>WkSN3wdyolk=q}wZponTEmzvC_5sS9WBDvIpJXot!5}*k zJ|_2JGqPthMelqx95M*is7kto|4q4t&tPcfC04v98Q0uMCxssmq`RIh68aY<(7o1h zy6>t@Ud<obR#hXo95fdPJdeN+XUs@9jOVzAU6S%m5P1yng+-pS6mZuQ@Bbb_SFgJ9 z4zC}|dCRf|gZt9@-nXD`Q3JeFRl@V@XVLM?bMQ=$R>5z8JWi{uVD{cT2bFjPreD{= zbC(P7&V-?0-0+a)X#AAsUE0gO=DK0w^+-P7+XnhdR#0m(g#w)BP}t6CygTMXwt1(7 zmH`8B;=VL|DokdPSq>QW=?xs|$R(4hmstB0XZ${*C!YP)0kQqHD6MM@o95HVMtw^X z4z*Oko{urYAAWbqZkt90>=-ML4x+H%rYIYdfpU6k_~RSzE10bS*MZ-Kdjpb$kly`h z`Lz8^`SU_oz2jMwI`9PR>yF&tJb=Vc>HPOer$L2rbVsF2ROC+OE3O&%vMhz#$}S4N z<<n^U<akVIY!h{_U1M?`j-uCvTdbo(37@-#bJwdqt=++AWJ9~acd(pz{pBB+KPQ+z zrwj%8l!4+h#c629xd*RrTw`80j)CFnUm(w!dm~gC6MdY)L;eJ7ZL*{lO|F9c-j~wG z(V1eeaK0<?io<EvCE}|WcJ%k#EHXb=hy$)CQF*sD_pcjKxke3}*`pV_kG#)9?+vA) zihsl_jnOnwFPk90FBBEugI#xif#<g$AXUqx{acCzv$1!Dos(Wj%ze+pwd1iETD%Tu zR0>_WIFs&;>?4%PXpm}EJ{Vmo60>b&@u-J4yR|JD4Iz!hFLznDGv`h1Si>~GoM4Wr zDJbt4Pt(n`F#ozg{z!QXuaBH#C+6j2QnnZQ^Sz3n^-Z`KZ$?Rnx|r3ra8i{GN0T8c z^vA}8oDyzAoI)?WIk*O}d=(g`<YH_LF==HTo7Mb;&HrOSi^+-<3LmmeIwf@ATK3%H z4tp7`Lcg|JlKfY740$;hA*Q);#nCl_;)7`<yiLc-N4hwyx(P<+y%v_dIx4Cfhd`@O z1yeXaqj703!1fS#^j{T4-nA;+Av*$F{(924=i^-Fw<tni2Yo6ZdQWsKGZhQn_eox) zn^WMp_0lk>SoC>S#oa%{B#It?nf9hZXszEN<aiGx%b_pX@Xb6wCI5tX25K53EKiHD zbr3bEThf=m-&ki4WmcTFMVxXun=;1~P}S3a>_53+nsYr;tO*Lhq3kcKvPooD+B0y` zrnhiaDNvlXG9O2{HM9R3vPipd9G17(;+%IKY(m35IAg8{-OjVwiQaQaHGCJSb@JEk zdlq#cZ(>IqZnE+{uCV#hD9CeK&zkm)Wj&g9Fd0pEm>6L}Ba4r+jqirQkP8)3E&E_v z=d(%DyHb&YulK^fjt{_#y<_uoR||W);=pL=Vu^X*=Ym(@a;7wX6{u;NQy)`H3|PI3 zDZkF6ex0rCyj}>`n7L54OE}!_(GHpN9j?a?1d!I=0@R(61BOE<v(w*n@J$Nhr`k3_ zwp)!puIP_<73PUPQ+I(-XoO|E)#%;F6Yym1bo5k@WvWi{xFN_Ie^(8|(=D^8(#ju= zeI11zs#BTg@(Pf152NVLe&{sb45Uh(5Zo+__owHhqRJLn__8mXy>6U%HR&o;_YR?B zJ3Yueb~(&>U4V1#k&+e~qp8toTKCDGJl`u(;H4D&IHQ&6{ZtiJS8iZ^xO;eP(L`L} zbeS1H*T#(Nd|tg{H4Cgr!55Y7tm#)TYW#bace^hM3*Akqz(SO^Df1cXvy1G}v`f$y zoq|igbwH`69hn7((bHRi+dj|2xyMhiLq`kP<JfN4@bjZ+;{2S=e%6O}+$xnsj}}O~ zdITwjsnKTlccNnbBDST*i*_8{0Tx<^*eA|*F6bxEqL*)irU{~ur)!66zq|(Lf;f<N z#&W00M!|l#9|p~xC@Gic{OTox(dhCw_P(6Eq`ps~c}gQu<~+BlmCb?&9*@}q9cS!Z z>_*4#^1PtddC8*b7ulvCmgq3>ZR7i!(;>NVJiPBTVSIK4;hU7`pG_WibZr*zEDyzu z$9lBi?Y^Y@`%hu;0%wVhZ7a<7Ujg=uE$RO55aH;cC|1AhI{RdC6=DusqEUANtpCXQ zdDr~$r?n@V?KMCx4=0-TxEFsu4#Z}Iov_8!7vFcSXYaoLfLD3D*f0I#!a$v+!oj)$ z<mNY+Dz5gR)$gNe2G7h4e9$dEY3z-&R#Y)6wZnxjs(3o|4a1%`c;FFdR|as;roJY5 z*vRs%TNpJJg=0B?wp_YohT|`Vqg{m`Gakb47ToLAcvc-{qj}D5^H-MjTn5g`=HiFH z+hLdY1Qu$Og9(P);mcVQ_V)Y$EUL~T56dUeYkDP2ukTGB_eP>sbr05Ll*igbfQlo& z!+&zaaCxuMO!#sax;!mG`V;Zg_W&#%^N49i8PnnrS8^V+4N6s01X<2<{e5e{#QNV! zmh`MIpKtl1Qe`N8TWLbOLIQB?OMlua?uKI*jx&=n!>FRVH)+1xz}{+G(JGrX>i8K% z`+n==lJ})ji@H*Ft>-MN*W>&{mvq{6OP4Yu9ca(Va2h#rD9=(%rAg(1`1<E^HkLCR zhU^<fF2&PH(b^JwZj{1OZ_YA&8-n-mXyLueL^@tJ73a=tW-!B#R(Fh{3d6^c9ch8j zAN^xZ^Ut%^TTQUc%@#MAyQ0R2GU04(4BLIj6qjvI0dMY!oAC4mTy|f^@I|M1W5Npd zIJgP^#eHBK?i%u(_h39YNrTp^ZigHLDLlIAM27XBrGFMrp^AiyqN-B@_c@=1TY5`D z-|`_CZJJDby>`J3XWqAcSHkWgXYgGN!7Gg_G+tL8xhN7+y1c2!(mYY)U_O;^4Who* zX=pU20cvE{f!TE#%9(JVB@NXlgDLCT*mbgWaVApn#fM_4*?77(?W1twxi!5y*q1s# zq|+*mr|_u%9u~D_7EQjhT<T@Bl$D>qE%e@SPq?{hDuwVa>W6RX!k%6u@O$T6sJNd- zGHp66@WF3ZZJULQd^ksd_rw=wRe-vuGn&Y|pz`Wuc%$}`X`h_cpcQcpY>INQepVAJ zCZ73Hbi>es5c)e+33DC^%<GsYnv@Tu%a@fgUH&yFz2WDMsK1O-P3T~}8{2*TFH@WI zL6p9JD9qaUNBqrkZW*_qfZ?1xEZS?pQfr3dqPiYXbvzx1{j#F$ZQ0nT>MgrbETJVG zQ7AcioAp<5A_JN8(Do*Wrr+&@TZ2EaQ9Uwfo2MD=4;qVq=S{`d9@pXH+y2ahbI`76 zXrelwKN_6QW>LOVd7e$4cDtFgFuDHtdgn&L{pKfV%kD=v=S`>3rOKFiZ4E2$xC)cs zo5R)b=fr73JeKu4&wlR<zy&;W8vcC_-ik2BUk)78pXCV)>kDvUi#jFyso}h9Be9*X z!q2L5Hv7<Sp=gZ@E=>!<;HWTq?mrWECf7<GZ#b}g(VK241k%}iRczxSPkKAr2^Y0Q z!j8kdujJrEA6v{R?#v5O)gu>s)IMX`k2D})nL2Gq%EbrO9%L=k54|_q2$$lF@s?eN zSkOHHV_Wx#K}NEqSU-iz+^(@6-*2+`@HK4rf<PuMoQk`)t`J`z{?5ujC5Y#>^>EYS z_2O*x*KF@?87kZUMRF(V8yjUahRHr^YP|4i8|yc!FT1t5n(w<6`1^Gb7Q5$Q?Qd^t zeZNBzr?Qh--<?Zec&|#kK3GuSsYfgLobJlG1>8YmOsi|>;O>?tFgiJbu6*nvq@LX> zY+PUt_w1EG0pxMEo*tijjl$B3e&o00Agto7`$f&Rv{x>Tw65P}m4_CxIf^5o*@e3( z+M{T-s~_1<7)DK5e;VU9$YH>72ejL|0WwXDA^qqXI9+XoRi)>pB^_3jreDfz8fGKE z%#+hX&b3&OiYh*yczTE_RxftN*ONwI-WW?XJGB?|kK6+Fw+CUiXA2zIU=OXg<>B#& zM3k{k5%W3Ic(}(!*j`-1qK^-x=fS4b{=|-YHU`o-uV9$|_ZoPVe1&6a@pKw&aO#6N z%1uAP8sv4U$k!03J>Cm9yh>nbqz68>^v5{dBhktYz^5VSn8C+rKL0qvR*mmN(?{N7 zg<UbI8R3fFJ0ozF<6tW8o{rvp@0Rztl*#`H7i2Absby^g+hjIDTB~#b&V=*#o)Yh( z?l-2LHw$p2cwSt3Y!~yr89`!ADjuqR#F9;1@cOebO5sdQZ5ubdr65a(?QgN-9{bqq z@&vSfYfL`f!^y=Voqk;}pd~G-Bx?Ccd*twE_ks<A%hE6~-BXDBuf~#QjtM5#*D?Jw z>J)0d7Q7eBlW8^5lOIRK?IBjQdFg#=QRh=wW^s=NP2SIJGc_r*r5lXAdecA!ZD_KX zMvuQ+K*Kfer>Xr1mo4&8<IW7a`_P`A{wrizBSymKu_Cj!7>O0><wAM)R9bJoO?>$| zMqC%A1109lFt2$k*5tilx2uv+zeomtgr}l~w+%+Ro6*UTOJLe;kH#-}&qNY}KQw`@ z4Yz@R8VlKw1rI=Gxe@09<j@JXP$oCgnjViG1floG;Si$;(P((3Ak%S`jd{G8<=nI- zyL8S2ax4P3Gk)abKZyJ$+t3oBg*9%x2C|`=xcQnRSs4EWd(9|1raVeioqC*INL|D} z?(c=hxicYfvl%`LZV^&+ok;7iAB^3@d)N1mu*7-9dXEStgVr(J_t6J~68H}EwmhnF zN0F~<I8D&<$HCv!uz%uFRyoQD_57<r%c&U}haP4&HP@iwiVj9rrQ-98S0zeo+oAaA zTH&!_2KrhALC;625U?PTU5*+^tM|#VT@D0Z8wX%qcq;P?{={BAP@ri(5t3Wlwp62Y z3<9V81_isZ^v<|~734-x?+r#Mdg{R3e;1fRm#H{>STqj1yN{*a=C3i&YMrx9ri^#S zIK1K~YY1p$Nh;Zb)=qCwUZ#Mh2I@59eLB<3)27#-M$#1}b84xL!C`Y3!7r_~Lc*<q zr14Y_w_qrCG`QenIT>u!$tIINh2%6alU>Tv$Ij3!oO4=@HkCJneBfQtE!K;!FOGvg zuD4n0(>NTV)gK-Hv*$CBeZtL|PFS!d2Nj!7v6SW0rPCBwu|t>o)8FWoLQ9i8CVPe9 z)))N*^Q}F^%&98W^TPzNRM9{qVZUUIgBQ)yuVO#))tSK-KPa{B%llY1bh}htVqIRz zrb=oX&lkF2t!5&|j<6!t{(fX!JeiJZj$|h9V#sGsA<DEq63mnhC`vX$=(=nL&HVEh zH2;J+?{XGBs#KsMQygi%t~wS@2&H>Nq_FqD&Fn)TKkk~G3!f9z$@VJuD7}lMO}(M< z;HQanG;bda>WW~xA9jPW_*OJB3Z}E^qv>XaGCm8OLy>cn(4y`ej4@j%tamM-f$D#S z{4@vbzIUAI#1Fud_k&pK;n$#=poG6hKN8Dx<Um_V6^HQI#`AC=QtnXV-M-tdvL{5? z60?IfS_t^BS`$YPkV<F%9)^kX!I1R3l3hJC3kP)g(a<&z+BfhS>x`Cy>&aTl#KUdu z>l!sIS2lylG((bU{{y|GtDs`rR+jO#kVYT%pobDw`snUR{fZn>QMD(Ylw{HkyR|GS zZ#+F2??5y6>0;Oy0gY}<1?l77q+geh>n{4y+iHD0D;klM&6V~j>4nNU=9v2>k<2P$ znTcjTn$%}9yPY}o_Lo40Py<jr7iC|EvZi|{*y%&oplxYSOAg2)@PXIWW!WT={ll4F zt!&}GaP*&1$gFox7Zp~;(IY)u%3Nqp<9`pq&1WlN8J{Uwsl_m}zX_Pplmm`&(|MOm z7ef_>K;6wt@amZ&Zu0bmUD^KN`|b$nh#nNv^&U#k_af!<S};5{2|tXggGVR5C^>j8 z-QUvAhL=vlL-ND1r`%WgwnT^A{!D?|AkGtra)#-nvuUr)O8C1$1?Kv@(p0$~c)K)| zt_9VLiN)jTT}~+Z_9!6DXX%2iYHzAuuZiP^rQ@;v7vbBcQnA(LEL>%llq1ss^~(A5 zSJH=OnD{{dTt!+KT8NLj%`rvaoxaI?;r+qA>5onlP2joH_Qjq&SIqYvC#|vU(mSEr z;4)-H4P!>TX8xaLiQD`>!dzxW>(-gkn=ievtEU8y78{V7ss;|+uFkfc-Udl~66yCd zSy88THzd#QiE=@6Y59bY(tiEqv2=J6TJuaq+PiJAu=hCzgX=`Mk^96g<9UzpIAgQw zWodcfTbBKIIq1D|$EAyYv0CSmIBh~Ri*_3-R2UWDzLtaRtpVqpdCa2SCR4HC!b<3P zVS%-iCeXV3D(EgupnjW1Q&G+ev8LOd{**SdNz2NGy)GpzcpzsM#AM=^GM*h&@Ph}V z&x+=%hb5KIE-~%MIkX~hEsOf`Ta-Kg6*7Gmh`Z+-V#LUoFw5GWW)?)zd*@gzf2K;$ z6dT#5@Y$f5Wl5FQvCOgE1p8f|ijmR`N^6e8yEEp)gXKKexhfUix-T-eSeLX0-Gapm z4wSHI0&PDD^mF1^Y^vfp>7;sAqhAE!r9JR%!1Ko0PjXNzc{ZLMCS|H;k;N<L(f;q{ z%>PdosyEM~K?h63i$@$0=cS-dowKA26vW4o6REJ-nthUe!Bjnbgx!sUY1@D=>^;9b z#WYT%eS1={=jus#IB_ynz5M|fPmRM7nO4{ZykD>NR}9}cfobpBEi6}+#ZR{$fbym+ zj1BDx$7Z?H<ch11F-M2*AgV#FPz9pb4HT5L8QlIFLbvADu`34(uwXKi*y|(zKAP$p z_5B+w{iuNF=Fb*bxG&XQy#%d?RX7WipCL{>W0&8!;V)lPDrIf3TRIfKkG?0gjpG%| zkJm(gEyQCd?a+U9B3n|FivxZQf>|=jusNd_O}t!4cMb~lWp6In{tID`k{z)$j^BMW zZn5<4@345$aJ0E)jNyEi)`h{K6FVLI#8~3q!-Ltk!I|vOz$?PAk_ot?_&kJ;-3BKk z`qJ%(Y4}sI0df=%GlwouQSD|9mFt?LXWmNiwa^uxHx5L-r&q*^**gTE;t9Aru#C;h z*CD5C&J+sV`;oE@dK5eX&GTj`Ic-hflQswk&-t?B|E19jtpVt2ynr>IUCop??t)SJ zUfA~KEL>5)DR@tMAYLzTg~yjgSgGlb|MrZ9_qW~I$iQlFF<cH2H;Bn+uV<C};{?Z^ zfMI*@u+L>fg*A)gY37a>;8FJnSi2|0+OL3S<#PD)N|lCRc+6`51d)HW8jX*OZ#*Qg zN;{WZW5y;KVTRp)_Ijf-$}HBQ@jgGq(IcH{p?w+jGruRyOYaY7XLEOaAa^#rbj3H1 zZ?eGSd)Q|^Q~KHzPs{q<7T2XqsG~6yEv5{{!evTmP&Y~1nPf;Ml@oBgF=sG+>lPO8 z)};e?A9E+2HijlB<3rA#ZvF8LD&r)y>~1iI`$&YYe|@P<dl~f1kHuT3hhf*DcJaR> zjpDt1kKjVdDt2pLuJpi|^>F@FBK~kprL6f$qJ7L+Fh1>sf2y)cXuQS#7M~YubTYA6 zNtwdFy<jod8(6fC6E??<!}J^Nuo7~q&~+yDnsXAKDtV)JVLUkhkVkFKajZzZ$opoz z@B7gXmvJVKDgQpYtGt6f+O`q~oV*V&)c#4=$9vKDtv^}O6Bm;7dkXu0TF@q*efV-m zmF~{}2phhc;^($>`st8BcFE5K<EkaXzp>Zh=}c9W3pc{pfLK92E|+RYPGX}MtcEHr z5vtb3VBaC$yw~nWJ1Zot>&+OtF`+~3lV;0a9O{kF&u)?Ko^Fgj-N39o)`Ff(84G^) z5Qgi`;Y@@|(OlP@nIwJ^52ZMPcJWzOdDaSVAN<ai|J*2ixhNz0uB{Uveq6|u&iRlu zIsn(~wr3G7^=xfTH2!j30=ZXzLF9%BxMA}!@>Mu3)T=guoL{!^G3Y<YwE73i!NA#i zgQ?-dZ??#PoOJKcL6ml90Q^d^gNh4YcxPA=PA%z+{nw6z<fopEcJH&v=bb9n<%O|3 zd0r$9z9vjrUCAy-Zh}Xdb~x|P73u0h<8f-&RWVt`nU1(~&i2qj`0>;Y$<~M5-K|s3 zilgQUV-64J=dwg>T{j+E7ql{5nL+J+xjSZ;FB^XA0_%JIv2eEUW#K0GQV&b`Acl`A zL}@Q2P#rRz<b{<I(~%}PYWrTX=@(~7>u+XL^E%mvTRxO%?@NDl+XPB_$y9qLl1|Yb z@$6Ow>^A4Vowhsdd+kgD*&A$A%3)T-XZx0m?P#rE8oHb^p=LXGwEDD_b&p68A8{6_ zh5277&j`dTKa}aq_Hp>sFM<q%4bkwE9VI;QLzPKWsX?~~UEM81eNF{KPM8sGR2WZv zx}xdqZ6h*Bd&jbtXoKe2d{iA$53Xi0csD7TjEhF$v-VkNtJ#aDC{3ZjWk;CZ^i-;- za>3|47fJK4*>L#q8{ziVc6i|OP*mRj3D^)HJiO``vt7dPhxbkJd)h9zwc#qnDcxYs z>*Lwqp5b()?tlnMvUKp=ZKl6!8qZ=6V9H7r;Oihz$C{bc@#z73@U9mrtl=BlTeBcP zvx9y4Pm_7Y4abu~AJ|v3JS@*67-%JhXHEaurx|W+$L}TZ=R&*q=W-s-Q1m9A-lB2y zbj7KghqCMnA6)smO?uitlP>Pw%p#0F!JhYb*^_QFJT7N|6~P11o}blyS`V-+!5N$H z2e3uvfn;@NHeI&n*~zoF;K=$gvDbfBnNC3hJNV>_I8RR>=S57V;kH3!zr~K7^$QmI z%@4%D<RCsT89<*mY=qri4$NGu6J)y9NP^d!(_MWvw8@`C?x%-fp5ZliGRF~9R7$12 z{~F?voh~$Dry|dtc0pj%XF+;>0dxPrGoiodi(l=OQR(=8Nf&3PHrjC>jA0<{Q<K42 z)fMc`PI<KZxRf=>DB-QE0$p#M&8}{?#r_NR;ER$xhUsU44cpFps!eP})+ILZ;Xar2 zi8<((I~zYN>OrrkUgu0YJsc*tj#Z}Tp|mnd7~Y%DN2^!CxUY}cp{rk6VA4~Tml{F7 zr<kaGr&)-ysuC5~^u>#_2VvFx8N{P^FaW;54%-5Fvudk2>d_LYA7evre)y1Piy^Mm zb0hWAKy0qAgA8vkdN-#90^E#HKko~B`>Kfv3vF=Ejcyh=(hH;4wX=@{g0Rjx1#L<s zlv@7^yfF{olvOfe(FDp%GDLHmc&r^XhTiY<Vpkee=&-p8zMi(66&ydvEO#ql)`crV zI(&kJZjo)*NQPH_2gKT`OW2lmeaK<|Nv1v=sgFy9c!bU~U#I@OZ|x~MSZ-#?nclSI z5oateKql|n3$Mt`B9Z5p#tjdn%RBvr1!jfRJ$@9PuJ{Gd9A#-$as*W!c*DLGc~R?L z0jqRUaGv3P!LYnHW=}uIWL&%1wf{0n-i*7jE{>qQ9W&9WbTv%je&G;JQ(FGtc*<v- z5zn}jC$>)Pzodg5mpRO|c*cEl<`8-}H6GLQ2lK3k3FdibGsmd`c<k#USi<U={Y?)n z%GRK&K}Hz3t5N9Gkh0pE3rsO!4m}@Yg-zAnLY8VOj-32keDKi{^}l{$8vfDnV(JC< z@_h>0eLVr&qlQ6hi~`xL)_{H~_2Bm2TnPGQi!%;sk~`nYg!A6!ue(FB<G&Zu46O~) z{%>?qYegG7YNtVoYmZ3WXGY-mFh`cz7L7|Arhw`?ALi9hhMZ3fK+PsM+9%r^w|-H? zOHskpwjh8y_Y6bTX|{rPfhqN`o(xJlhoJcBf9%GcNu+*f6y@)lfB`T1piRS1aQKi< zZAT2r)-{Qw8c$e^TLElP?1vR{+SGE@3DvJI7EO82uV-!tn|e0{t(J!2)8$LW4^;se zdNml&wO?hbhB>U$>nocbzZ)JN)uqxqONG$Uy12e7nx5WwVWBHmL%O{q$_(p;Aq#E* z%gdq17yWR}MIBma{2E?WPR1XdAy^;a%ex~>A?c;NwD?yATYR8azy$+n{rx<s>wOev zTXu?X8|P4VOcLcj48-!T8cFW{P#pNF7q&)diO=p8;<s1_-1s&T$Nf4F6OP!CS4o|y z#WU~HYx(48Vvk|3MzR9CAoRyOEPDDVD$C#<UAwo;@_D>y(BBJV7rJ59MBb-ba0GTL z^u*~`X3@cy<Jsm_<8g}368N<7K1+)kLo=IXv1{TN-bwMG4J+5Mb)71L`lS&1pgEZ~ zR5rmG#gEWA%A8{5!)V-W2^+P2GUD<8HY3NH_VK+*<|7-S>ZBR@tkA%HeiACVI*?9s z4`Se-B#J*>!RCB-qdM=EaLVN~ENh(3j=uc~DjV%c473Hsa5GqbX$ozf-jhwDHde7# zS9;;Q2j|TCqB`#_^;h4`qI6a8p3^JQ%VH1vz9^5b>%C{Qo18FP^CygoO`+x0Z`tMQ zxy(y?o7Igt0!I43!MbTagvuzR%`$;xq?=$tx*SvSoJ<K*`k+<AX?A#I7Ah*Lz(&4f zX7jhf7mX}FhYn&}T>PME-E0zUN+3?fO7c;?NFqCJB)L{aicwRvq;Jl}iRt#ogmJ~@ zIK?QJzF7ZYPo{K8-tU=0^`VNSbX0+~BjaGjA{#twVS%yr(e!9s6m@94U@bP4O#GKe zfzM=^O<FzNe!Uvr*p>^Hhtt^3`yjfzAY?w!Vy{FyHtFOp_^`MZ+Qyhr^e#WjvRlX! zv<6do>?FJ#IEYksO32}B72pCbHfqcRRupRlL%784jae(GE#RH5`fU0m7`y5}J|)q# zH>0U3{%oPb8+MPMO&+Xt#d4PbS{^l*t<@CR7cVLE4OPW!GWIlk=XaJmr#E|UpGf-W zKSS{HK3FukAA7tq9oJZ7Q>C>x+O|)oJ+pG)%b`(Vq+*KfZ9O#98`7=;`3Rx2$oO8P zB*EStt7gsQe5z-{uY;PLWnPH#I^FEtv>b}koX2)$I3tJg;MFOqR6cwwcUSwc$s1?m z0`*8VsPf0&vs_rbr5^4ObZ}oUefsu*=gN|INasfWk?hQ0AdcsEhppxjbo8kce^xq^ zDQDluTpS||n8Mi?&b=`1!8lBQI9n`vGni<E0R?W##MZluVfFqfQr$5Ve$NOJ9D8v` zP)a6kQ(3_1c|R(6;Yw#G=aW~xhB!kp8hu_@LxPnes*N&X4PCL+Fg_OCJ1+=tW?JLS zOEa)${94dbZHM1W#^8l#S0M4y1~#~73Qn_K0M94)Lp?kJj?Yv$TP2B=7Q|3g%r>yF zn+caS^qJFNCpvg2h7Hf3jaQF`lf-2@-or%cy7^7eGWH<bJiwkjHosvrJ1@eGs{wdb zIe{!r_~Lf8R_5{gF04+7Bp*!|v2VqCmYrNJ-p;NRBRo$qtr;0C<*@^t@RY+Xg|Yme zlZ#Eg>X_qK70Bw`z?RJqfQqtDP`POc`)d~q3eVL@hw=HDsV*xy)x^L2%;5F43`uzU zW%1mSjncx!Ls<3w0Ybnw2fA~?RZRSlN76&taMV{3!~Kl$h1vvs8lKBSBDC3rfI2}T zi+8GIlfm`h88O55gfyg`ciq3eV-wXru@G5pdUCuc?E2u24)4scLH8OwUr@<bC+gF) zVSP~hlRg`EVH`!>&O=alpw<hc8pKb_!CG-F`m0RFB|$+fG%%CqTnhwE4MWWKGr^)m z-@ruv0wg>=1}l;xFhO-OY%w%uYfly6EA^2yYrG>`>{q8ilSz2|uLc!g<L+_J=n%#j zQvVkMWh=#?OJ7TA!)HHyS@4`y@(hLka(AqKt{@EkGL)hpwF+rY0}*{{SlOu`AinB> z^Q#rus<Ra*Pr3M#b-93F_o{JUumk0O7>n*-wg?kXd$9TDF0`~}rTC$L0{u2j!NvB; zn7;6UP#>Yrmj33Mjx$f7*wzQP9bW+BF5G3~)a>vL?=zmw?u8qT=FskE#`I*-M)qM} zJ0x4BqT^*LJRD_=Cx0JdE7G;7oqKnLK1%q4@6I(t&qIT=BYvFQk6xs2hESem_dl#e zPc5FXXRl*$wTwUg(vN5UCm)N7gS>F8piV(gWid8sF#bF@nMO_B&mQ#G$EQ(R)Nf!m zUQwuF2#VaXyO!N6^JJ2&X4b7Un!YT(BT2c^8_ri|LSU#R?a+6k9G+{tGyfNh<=>xq zi&fCV&tG(S6GjJLZ-lXb`eVmz57_x24LkCpaihB~CC~cCT$^8t-%=+D^Ukh>%LeJV zsdO?1@6I6632DJ%O)4rZVP$eMY}v)#aGH19T-1WFKi|0o9M8wgGFfQiJDjdgQpTV9 z9_-GspAz6biK5PHlG3&_VkqxHKEBY0<|_^r9uH9>moNQr%&sKtnc#xkl*du8z4_EQ z_8j{JJWtW4i<UVqY}2cG%<xHnbX0BOj=2b`wKKqbE}~ffb|xJ*9tXR>*Rm6q%K>MY zW6)VEYS=iLdK=6jMeb(5|7bpR8{1LjJp(e-zQppEctF_`&Iz|xMeU~{5Epa^djFh) zQ_>YNs&0dr(5z1LZpyJW_b1TXB}-YF>Rz@*;THRrmMUaW2(8}#736BtXi5r_ZfP;o z;LcJ(=^jKiDbggqpP$vBfxQyn!*o8QEGumnj$Q9!y=IKZrLh*+KgEc2-%HSS?-^F| zISMm7=i+zHSex?0RoEZV&b0dR-0$-Qw7C{XOKwbHuDly^Y(tB%nfC;*==N*;IBp{J zST+LRef`B&x607~9S7{2Tf+3-IFd<q2uf>YutxSa`}lb@xqnT?ujyl9Pv|5zWY`#T z*}Z@b{xOM8nAn5u^;ckYDh>0Cmb0CK3N%~pxe!0znmf$Z>BjpMW>xzeN;}^`=Ys(3 z&R+_POc%lRKLaRFM@h`cA594h!tmqKQrPZpN@KiIp<`?*jO4lVchL&?+CCUx?w=2B z;kl?7atI2?52cSYTExe{EMVRLb3yV$g`bI|v0zMxxVm91gX&56AkIzL=^lr-T~*1y z|70p~^`P87&S>l9MVgyhz`><UVtzavZO&=oh>A2!+mgc1+6MUM)f-m2CkOUqmcq1d zO?IG06g?*OgT}o>>G;SNc3#s8FD{!&>{cWjzRef=x7pFmhsDC~EY73n{*WW>6NNRq zYJ}(&!|A(N&Q_Ke;L3|0utJ}A6=Kxs7iSC=?h3>0Fc~)va)r0LY8cpHL0-ZvD*lu& zIC|Y-x}4(Oz_at-T~fAv(|hI`A4G#Ol<B3+!C~gUG$pmJv2$h=Jz6vy7nkHvMEqi= z^3so=dxqewn0}n`eG3{lnV{Ls{a`Uy2VYD#M?2{*rWibkj+tkYI3<$Q_qd?P#XInE z|2`()o<(_U^U(UO4zB#ECA>U!mwg_jg6dl)p;nh0{t163lwItJO3L0?HFq*r>2dec z&vDfA^)Tw@KVSbKkAAE}Os|?HYFv?{Gr=P;jL-7R4kh8cX(8evn_*O0lZ@T_xT9h{ z=dCFJg~dFNm~kxxt20)KmG+Kw@WTl5Sf)&018jNEC4};l3)nzAYr%h_B`vhG<umgE zSkU7b+p8*2=It`*laz!%zifv2^A(|}NL>iAR;RjXOP)Wm!pd=4bn@71F)qXar`G=# z=7+k_fhKEIm3bkteqI9^(>JgqUN*w$^}7W3cTwcTGZZl^Mhf>1N$BN97aZp2jmBzc z+4BQ6Y=>qL=fu8-QinZ)l=qRlx!?ZzCdTgz?(`(XfbET5!QFReB>8D4-2YvGzkVrV zOVSLqQRD2Bi>|b9i6wg9&ZNA<26%g{A9{@QqfIJ1q}MkrXJ=iWv*OwT<ZZeG6xNl) z%Cr6P?eV#^ZXwXTG5qY%ej2v%Jn5x|eZrk#hgjP7qilam0jaHs;vHpc(l(8usb>r& z-epPDvOff~<I`zA+R?}ZndEjTh!P*C;2_Qa*bc*CRKvL){ywAmnMIR+&m9g9y(^>! zTE|$A`7t!!)Eeu@UV&HMx0u~bf2!~-X9=_X=&EHNHp}NxO}8%GGu6fAH7+>v_hVtx zhtV|h)DXP=sG4Oh562@nC*boiPYPdSMf>ll!hf&x;qsX4Fu$M&CF_*HkKRu};m&vI zpwbWUU$ZVP>e?;*9il+;MvfSktwk2{vtj!0>u?~x7y6p`&_<p)yLT~}GvZ5N^v-Ib zq)nR&nJzrs{{s5X3d8!P9V|4*ocgucLiDa4D4P1AaXQaVk5|Px)3j*sq)626<aec9 zIhx{h8rG~drL_vz#I$v7?8b&%`n^M4{Mo8PzrT*7`hTm%Z$_Le?BC6F%zwetQ*$uo z48Qm7w#6E~>uk`A>nynUFo>vBV%iNi;qVhP92^=-v#lE8uci^bNJysM%z(CT+5qp* zxuBJ5I(4dNkbUVAaPGTX*xY7I_srk3J_mJhX4_h3KlGO{_oqG0O$Y+-GxuT8>tvJ` zpA<G&e1~nqHaM?W293W<gaf0DsLSXx>yD41Nr8O7sc*_=r!-6YOtE5hGhL|dhAs2% z;f8u+j(~}Pl-y?#tn9B!>#rK4vy~C8vNoa>s~H>&OD6yK0@?b7;Upb(Cb3x{^wo=` z&Mj`7*K0~uxyNCVQH=QDWH#t#7obWi!NsU7N-or+r-x3l^fQt8wMq@);b^)(TLboX zbwJO6F5#5X6fD?P!a5uON6~r5_1J!KyrrR}C8en$MSG9?97!ZpG*l`nw1;+5$u4^q zva_;frSE+mBfF68G0GOn%*g0>{r>gO^YVImzV~&``F!5*Z(^%dA`M%ViQW<AlK5Xk zv8{I&(nd|Z#Tl{d-pjCOqf>>#*!|+n+hxo-_#peQG7r?oXCP|}gy^q55LujpCj;#m zzhANJ0T;knSps+NuVN0zufxJlU0T31DhJk<qNNjx%S;^UQN9YxFPBH-)bDJwng?f2 zJF;IBhr*r_)97lU6=hfFqyE^D*wVC@b#EO&H?H<04}NR+b;+lt3({!po<SIUqkwKs zI4$nKc!u@WscKM<XckvGTH`Ory^wy~niO}r<I}ow7?Sz{-pOX8sPO<)f<}<iw8vi4 zcYbBc9eqeksg=zs?L~W(9qG)MN9@B5OSo=47$*)_k%Zq}#>%zjA^j|8!zn(2zrE^3 zi{uW`S`vv%uUV3nbs@@UabD?*`>=Y68FqHK;kmhWtT=qD(Dr3GC4Q~*@~Yp$>gWFw zFV);-6S|MH2ijURU7Ba{rEE~`(`Xt|R1UT?0n2u1(%!zZG*Wp7eC4y)Os%=%-vQ>R zHtiwzeqIvFW-4P)>u?HwKSw;##@X>b1L@VAKD<NViqDQHVz^->3;St`Dea0v=CdZK zx$R3KW85TR*T>SUXZ&8`w?}w2DwB=#o`qe1_Q0=+eeuBWWvowLt0a2CWwuPF9^PxX zK-lbbGB{nv`qb6Hw&+C9CpR>NHx_Ylwdo2t2M&i{uU10Lgpt_I`{2`yW}#dV@1q<Y zOs5iMaMU$rGG36y(zO-IJ9{zf_}dG=&q%{7)Bi{;sEV~bR-_Qv!J1W$vz@<9*ul&j zV%k6@Y-&%ZlIyy(=}`rH@03KF*7d^QKMt`*=>$|B)F6D;86);So=qd<`rxjs%V5Q= zVdz~@3SW24W#ay!*!ya4a``$1Pp%2(d~zSM=WZ6SnguNWy9(Y<x&t<P^Vo5RQuZ|` z0ZooL18wCD@bh0K);kD%;`gxD%ogr3&ZGSAapcsSb7%E?!NMRz`rIpq!lrzO6P@z7 z>)&qnKzRyFmQkgvAA#QLo6yGBlW;)YaxZP41W=iEjkEYF*_-LxMb)}->~x$mTe5d9 z{J5=v(wq-h=ir1XbJVf=P;Xp1%7QX2CeyTMemMSJ7X5d4wV<%?j+c7TCdk_NgGKDi zp-K8Fgr~m38ndC8&->SwZDnxKRhKRp*9gzD)6neaMfScjpU(Zv!FwC!u*gRf@1HBA z+C?(7=5!)^9(X~_?0-q}#X(if**kzPyNBYADO0c^KAx`r*CI(>d>4A2In$m@U#ghW zBCd2DONyUwg3QlYh?RK(XBX$;*JVx6R6iYq(sSud;4(Jqg#!BA6R>U#cNL7z!O>3? zalfo0{!uifguj{8_B4rH`MddVw+luX>(P!DAJ+WqAGn9jq?MBXct70&id0nK!1|q{ z=D<B(kypNpPY-aF%(6_TR1|=RmCUGEV<OB^(V|%=7c*y1Pt1Kip3Wo%vC;Oql=`Iv zb6R<?srk1kQzJ+Hm*lZce8!o`XYj+42$;F~7<;t#IlL`Z;<MBbmfo@kz8;*<lFu5^ zoTx8sd~k-)nCp*o^-qFJyfR(y*#uX5N8wdIQyUg!GkGsxYVK<$E_V&VK&gq8Au*<D z3DfyM5*z9)Ovcd*_*-`Se0Kh+41WJyNKs2lVAPyA)adVw9+PL{pdDM;%hEETJjxWm z<g~E1{IxJb>g{R!!glCV2O4T-N!#bAW9-q_!X~v^_Mk2w@2Q@L=X#Ye+^`(ZZjGnA z5z<t?c_6%%^~aI-Dp_P-6G3Mxf0ss<!QfF-@MBdxChk_pqH$9}W5-@0@5@CdEoV=! zwuhpf{%g=Jb;JepjWCDb*2B%GAPY<e%iaofYO6g)C+5<l8eeE^y2ARa`BDAw5%hCu z8un7UAw*l`K?WJ&XzwBL(&3!2`Nb=UmsE&}Q%8YfYXy^Rb{7vW@?j->2c+FA4uU3d zmR)rW+ZrB-Q<XuC+wy@44sD<ru+2+y(T6=*G!V}(?aeHDhtba-Yx*2l$$m_og7Hp$ z=|-p?-I#KLRi>$8(%TT;NwB~p(%L8=x&?;Mc@1y-eiVv>+;QNTH2mG-NBfTkk<V0l z8vSq_ygZtO?Vr9uSNH>#Wvoh_X&TU}oKFRhN6^rUD!4X$F{4OjI#N>(ANim4^YWne zZ94QjBbeIw8#Z=B9!qiL4tBj+xKcI7>&<GOtq;<r7e|Ze*9Keef*A*4Il2(HToV@> zBe@*zfrQr?EcWMc92V1`Wb8Vba(g#)){drWM@`w3RY#eFWv$?wuSW?rS~PB{J{DU@ z)AiB{=3S6Pj=mpZ;==vH(Hbe5c*md2;)cWi!dwcQHkqbxJHSTH9!#Sa91`BVzU67= zHVy-q1VdrtIW~Ep5>=mRWhX4gvlZHRICFU*XDKfQ>u*SL-BsZGem0wGI~6OwT2RH& zZ1TT08Wf}R#gpd0!DGjN(4+vA^UjdF<8o=*@LQ~O(Jyu+I*$hR-`Q}y&s1D6dOgz( z=}YY)G1QT6Kx@?$@aL+FUb_?Cu-)a!_?%~7R(a)-or5wJ=^Ih<Cug#B`vRA=cqZ~y zncMS_FkClP1uI`pW-4MRyEwaq)VCN4X*Ujd8QoFG)j|2>-<z|DBRE$lMwPBdABUYA zMw91rRhsKD81>UmvC>t#d|s2rHtX@sB6$qTe~X|=wc%JB9Ypv4R=|pzL-8NaMW?hW z;-iN>Y=!hF4E?$dN@Mdu+3yLf3)I4fhu$nUE(NyEJ_b5<k<=$D6vNt;@Vt41=%L^N z*N=Z;ud>HtYimDBcb@?3FL>kGHQXux*%mv^V)4|_U@BPuo=vl|!IM_CEO|;kD184c zI>Zb@A=U^?x}<R1jalqJ6QKE-8SLwN6LdS@1%V06*rDPHVt{B)CvEt9%&8vO?*>pz zn8hZ%9zv0y?eNV$d8|lEK=;2+V)VBilEnctggzU*X~l+-xNo2zjy1GL%{c*_-CiXu z+FgjY9WkO{s79x`1AIiG1!`~cN1cl!ag3Zanf3A@<v+<J?#!bxPu?>B6CYWAiZ-5k z`~rS9>9E7~LukdbOYGW5IjnzOM7{mji7xx1x$iTC66dAR^hf)^#Q8s=OFjaJIfSwL z<N>T@RW??>)fM_Y*eSYIS&)a{Snh&Nq_h1~C?I$RQ*Vo6s`dUzYx`2#?TIvEyEk>Y zS>nSh0i;wn3tc(8P<oUH-Ys;+vMf2gelvu9{dbtTx{qVF_Vr9I)rXFUJ!a6JKpDJ$ z{c_VVI=5{AMUU~JmRd_%8?ch!2>a0wM|b*jIsn$cvc($X@wigvHM?=vicaZ-(P>v3 z+OORk8($5?7w`PgKvNcEx)MoHjK!1cr<vg*M?B{*k6B5ZAZe2pEc-c%{(KA;;xx}O z;dqs}_f97(0Vj%^W8!tlT^@(07?Prv1<NpLXUCt6BJr*b>okof^>GH2r=^6t?hzOy zc0l)<ZulWL4aZ8GVARKI_9)VgMtd2-hM3imzV-%N9UKqSmxq$nKr@<bf0^_CSF$Tb zyI}B<0#<#qAC34ng4otC;y=|Ce3>DQeMVKn!@3A+l;ohvgmzeC-wI;i)y&kZpXBV9 z-(vjN7w|wnPW)anh@4)XVrGlV*ufkvT77H^-rjc5Yv*ANc*@_Prw>;$>zlt}v5yN~ zavcdaZZUY!wjWx@sMF4bxx$|%R+I{Tpr=-qia3iPaO*%alux9;+PB$3<4iHn+f+=+ zRmLmxcz0{wAi5f-fk}zK;c4+m$+F|yz-8uZRzG75H7*^)?goT_MvE04<1T^Ud`8|A zbBvw3GfxZ|zX-l@4&c4-hhW`>twOkc5bE-qk*tCO**eGJdAa9cb?~6q_E<$6_aTa& zJ~_q;pAMxyd+o`mU_5?t=WeEb0jN6nEITyVjNSz&)4t`1vOXrHWHOl6?X#k3p>7oO zuaN5G7P6N4>X>wT8*EIrB+WCuNP6N^7<VxPYqs&<u=kf)O;HQPZscBy<t1bzvBM78 zhp_EXD+}C`gntHvQA%NdjQn^4GNa__XI%z)k1eF9Tc^-W-!c4VT1@6=j6CLl@}*Og zQeoFhD|-2_1<vd(fQcW*V4uJf>>+oG)m~S`ivf!0Z63t@%C|$<?NsKe!KuUx;@OGq zHw6{$XlRLbp<evEbxuaWAIAFl>dPR!Gye{ZeQ1R`yLm?|aT+aO5Q)Dv@4}I^RGR<& z8LY0ThnX4CwCGbZw&{O{kHhZ3lHw$~e}4^QjTeMTMS+-m%MAZcbj0MBSK&MF{%q~Y z!cmqUcsg|qT)HX4{_3}j9{K9j+h_-qU0H&)>#nozq>->Jpa?g&9~4Jttzt2(7lo^9 zq5KhNT(T(#jnc!=@BAmQiS=dUFKg2vp3B_Ed#qi{dgJ|Tn<P)Km?Fy>&GuBf(fNOF z{1!G30*>}VsSizJSR>ziJa1ri2O~(&$A`j~%%C$n_#W;kfKq}VI;n=Dh1vj0emxXV z@NVG9X(Vba-q+w!G@PV5CkZ|qC!)D$46WbeNR5$s;_%XOq-&>ze>ZYZ<&9lzaODKF z|Bs2*dl$kq!|kX2c=ngRdGb7!6FXIHPLq5_)2K3ii1+J@Zv3VKoiotom^p?#vBh77 z#-tK77O#}86Jj<^MJuT^)bSa}ovZVNKeUDI_nXT+yH~PRPp$Dm9}i)9(0=y(>=Vzm zu5L{8-CmeGZ#{b=0BY{ep#giHNqTt}o`JsT;(1Wa2=T#rN7LwrRw>Q3GNFafOz?Wj zO~||y$9kC?qmvhR$KU=0@v+>m``#5pGeR&uRl>$yc@Gg9w_%g*3YZ*_Nb+{B;Or1a z^JkkwQ1>MEm(LW>wx`m`m(lcRMifdvNTRW$8B6=vBj(*s1<h!HN2w>hE)K~hhvUOA zX3Ifv>X%Mm>YeG%I=;(qUJN!{GAJx09X4NIa5`r&_hg>C3BflO2vutiKmtWzY&TMw z$u&qxJOm#GjiJ!Cf$Z=8F*xZ|8hi1(5-xd4(f%!l)NwV3dL0{v8s>_4r^<omkC4ap zI_>PYHD{7dTFpY9htZD#9@J!Jj<deS;B#r-C2iQmQeW~M4$n5#@b9>R#}76?&rYmR zoKD6wt=P_M;glk4jQ%HsCHvRav7INg!2Z=xG=4pdE@!5a^@uRgeHu#ZE~VpKr)*l` zrG?3V%V2HTD%fMcNl5iD!j5DelroGL4fiqjpYK}EhVlhG77UkU-w3j=-a*>5tE~5( zBTUZv6PvZ$n)Ug(iD@cYu~Fk?aHgz1%B3HMBNEOYPa26iFJw_?nH3G3Scuv-Hz7u5 zJv9F_U@co^*<bkqWWPL!Top6n$zErk13f1gYR-p1{u?)W81K=JJk9=%MSL_V1~pp} z$ui_PWUQOSGUx9WlLI_)jI<x#bzb7t?)Hi;;<Mj{Nh9gW9tZksri-;zi`bjphB*Ii z1za>gBpN%}P&U7(9~~f#wSk&^xBpArwnPaPlvawj+Kj~&XS(3H&t|sTY=vZwkt02O zIGqlbPZDaMrIM<hG%P;(fh~E-pfy__95Vl~iYLv&mVIH|y&aAD1%dR5b16nnilevc z8Mw6Yh}We>TbXPoXAnvG!G5KQLVH6LD^#~)hlj3!tWZ<zKkzTKILMQ#qYIR&U1av1 zJb&?74ewk{!b871q4>TPTRzj6zO@s?_2|<4KQ8o=cYbN+4?#KlAiGyK+e@0y5aTcU zvp-4d7^g7;r%R^^XW}Xt?bz?-_hKlHKYR~HWtvj&_U~-XZcWLWH4eCVWf1<&^roBx z3HaN_7gcwTrL!OZK>yfce0)j^vQ%^7q^&c>4VS`0!?;iSmJy`aX3*kuPg(yp$C-?t z3(fh<?_&8X*xe-&bNY6G(OEkx{oV;B-Y%$rTEbjixCduKDwcO|W@??YNOG$2^vxlY z*p0RtFnCmq36;E~`eLM5&y+ChT@!2EeFtWrvPPS~&G4@7rKEGe16tJ1qAD}S^eb+# zq_v#Y-h6;5q$EPOMjf*#N*1*re1=bg0!`k-e_QRj!+^6&!Uz3?C*co8&A)$ymIw2h zY4ku`=&r^F?%+J-Q6>1avH}cDwAg7KFB<gymzZ|w4_vI-$j)D|#BBx>u;9{6`c~e^ z0_-Qj#;pPPLuxAr#%hzEu{)hmwt?{Ic+$%K&blY$(n}luZ(OrCP2EsJQMuLNn~_c9 z`?s(M;zqVoli$Ij706vjk6cQJQsY)>s<gi<4!S#zj$$f~9i)x7&f8H!f-F49-vTdN z>)E<t*Bi28_#Woc2e_=n`yJhUj%)qQf{S?<+tCn(F9NOb`N`(-9$JFz5bWFEm#Lcs zduf3jTc-3BypC>R72+{=#q1MX+SHFTKaFr@?IEx{AB~agOfkoq@2pN+(D<Q@1*Q42 zx7JQ*=yndwcn@rw$9vIe(sec|X8`tp?oGx#kD(o&ietvkVdZasvwbIBpdx=cYnvPg zd)MA)vK^B-!`_RGckns!%4Ig?`cQoQsa}#mFWBzP2^hAkSu}pURWR$f0Dex5pp|z& zK!Q{VnqR!f68TO1U7EX~|F!^ruHGz0em4|+3sf=4s)k+Y8$iR4$m3H#fBX~j1`781 z;K|qHplaV@N$z(|Dp8EbqszPC)Z9Qka?=d1Pf!tJ2aLj)qEs5x6-WcdYtf4*>da#E z4KEkV%k0JDGoboom0){RmhJ5sgQ;Xe3BTLLuB+bExAqh`<%ZHX|6+D%s3jJ5T@sI* zyHZ_T2V9%yffJNAiwy${u&z%J8}4`w&QwR^>4RU`f@fu5&3TIT(@;$9aljo5V(`|v zb|&FY8S59Vtg}3j&R(z-njYp*MurjFn{AK2d(MDu*-e(<$@!iJ3M6yM2vr9h5zla^ z^p634Ah}t=K1vQiflC^h70c27@Bygi)gxRwG=i>$x3cFMwXEAF3LTnWi`r*g=z+33 z1mCp9d9U->O8EvanSQ(2tkb<H_Ph?nq=(R|GY`Snb^*NQOw$#1E|eM40Cn=eAnchH zuG>}%?QH|;Zc8cb$#}&I{yEb3NB<y9GR{kRy`NX{<+Y5;HiG%5Cv5sdJz8(r3ol&a zEKJROitZ?(>egXcEg1$2&cA1G71IQb*?%C5XI__t&R|M?-iaL#eIWFu1r1alNJ>8~ zs5O58Du*Vbh5sbj;mZ$o%S*9+sWhG)9#7-PACXi&?Zry2jHCJ6f@ocx8^WII1{Z^I z;PfmG4i@&pb5-9UX}2S8{wFK$zA}^!9aEv-rB&QNI$N+`txS6zKeLv(S`bxijf>{? zh0K%#l7gF#*qe_8DMm4PqOun@kKf9YJMySO;z|1#JY`~R5i8-}L#x3<Fd^FuuT@P! zt66=8y6(%&PTPR^k0X2RZ$>gt!g01<JXXvK^{R9Xq@SU6U>P|Ym)9w=yE$s;v41<$ z2pEI1-CLn=z-5S(vZu~yXG*%eNeE9vQXc%A8U8S%WMcz%_v}K*AL+%NkV))J;7C~2 zxJ6uCuR*1uJ7D%)E804AA}b3y02dVY3*PS8LQs1McE*O1Q!1cij}3FuzsoXfoM^g( z75z;=2TQ#hp{geteVVF;2U(^3UGlEM?b#FYZS`=>=g-;vDn1J<4aZGQA3(j*5TCW& zU@-yQZ6sqwKkli5v<7$9bl*E2$NMH(dBf<oZz?X*(;yb;jCo}eo;eREwZgkz;|=|A z(~^}?ocxHr@cbzL`}vU750#@OzoRg%<qCUHFhWR5`36PD@`P3W${_WbAv($}U}y6S znbGY$s*g3GvD>^TKeL{FtW&4o^VHCCax7{(8sN*OblN_41kM(;aNGNzZ1&q0vE{F# zaQ*2;R-FAryg1OFJym}$cn&MVB__)R*YIEB<wH7jd|Nx4sY2q}ftteTgVxy9sYe%- z%~_XoH<;<$;5HpQ-04;cx--tQuEm+$|8bTzNNtv+-m`^Deouev8zlxsR)F3vT{_<h zXr<&wb1EiN=$&b3d%}T2wkHXP_H2NF;cB?=RSGyR-wS0EMo?G38^XK@-dAuLj4=aq znc%t-PR`_gMefs{BW9A)p)_`FS}EO{4#JRRIsCZ7h+QmDK(!rHssB53c7ESfF-9?& zoSxd@nCFwxz5h5CD3^rRcplJro$yMoH_8|nq3PeFFmam-j2Xt+#5}VV9d3Z`og*mw zgC}l{D8=M&$Au@wEkYlexo|wHN|gS4hnbd*Cb_xUsQOuoO{xyyOx%lJ`4!FVK<*N# zbX)>+c_;ncxE9!XWHEcv;)-|r91;gz;PVG-1pCAC6#StWB!4c$@l9^1&?*IYw~wL9 z_((xIK$+Cqc>hk@nw0bFpngjSo4TQZHBAEAa;FGG%}sD>^*$K8IgVU6Rza%4Z&9MT zkB#cI#><QJ@Ic!pK~K{h53ZcV6snvkcJf`wTG<EvS9b_+<WGCOt{uwrFwu12F8{mc zaL!QeZ0HAVFzLlAmK~xA1N|>D+b3t(i5P9TK6@#ft0O{_juO3B8A5R-lbK|>3SRf@ z@>&#@LzzXZK+`RmEmXFr>wWv-m-!uF*DD@<cl*FJea_MxSRst5w4wR^Lx|P-GygaH z;aD`ES?50$CPsF#uc_%Y$jT7!nv9@K`#QF0$PcJr;D|mmZi#dIs*`J{4{j>7q`=Kb zVBMWRLbK^qa-85u{vC-h_sJEQn_r9#MS<jC`Hau0jbc~jPjIrxAu~SXOV1rZTfF6I z*j^Pp`&AZ>PR_^6<)4_#ZGA~udK#4<_z62d)q)UZk9*$O(J2crI&uFX3s4Qhe;(8E z?Jijy(rHdQnZ?*FJD6s-N`%;yZ^Cz(O|ZPnm>RbAp$U;AQD^y2*pXWzS}N<~2MD4; z>w{sNEq};ImBBpa9Fn9NVLAK~v}>&KpkXrgzpR6ea1?9;1Qs@DGbHy`!xFpcJcIvA z)Vb2mXE#;M-S{2MQZvMv6J6MbXm7TC+*i@|&OR8XXpFT%qv$~JNLq4EiHb+Z@qS$> z2)9P!pcq{&xmm>S^EdK(4|`JEtc{ljhu|-hsibedQ(RTZcS$S$NqqRcZPF_e*NnU; zeEaK5Z>_RX=K3y4faH;QYcFTauXDz|B>{Nqn?9npI=$YSM+r{SxKOi{DW6V7!)8|+ z)U%f@<L}J+hkwB#)}I`=kDv;@!(NGNLdf)#Cp|CgkQ~ytBiENraQ{O(HMnjRCj7DG z`};7y9~dEiADoE~dB?I=)df1;Ey+nk9*l&Mw9M3p&S$5PSIKU;uPrY$4Ev95)!54V zAL@_V3z|S;G#6&RzUh^lZbAA}C)4>VIa=)?C5CMqLb98~QNd9b?az#$rit=on$pFd z%4@R##kJ7yuqMyL^}wAGk@(f!0QalMlS9Q1!6GpSUnuyJZ||YFwc8C#tb*xm{y_4N zHNac*yz%vtt1Q*+Hd8V4zyU{wQmx7!kOXPcoBCL^xqL=k@mZ0|UrnX?!3q4$&;hb< z?u$uP_K?PT<L1G9j@o}!(zNJ4d(Yj9)2pw*`!-+P`8kz6>^}vw{ddBp)rF8)If}Y+ z;;6ww4|{yy2t#Bn(JDckV%ye=yR5Bf-DZ8LJ`{{QR%KC`e+s<ap@q%qJxsng(D>3& z8g+aUTV+y=d7Ar#OO7J%-QET3tBPdSRqpZ4a~Rey(ZeCW7PQu226T8|f`y9iG}8S# z<QlZ|&h9A5Lc>sOy!8e~oA)DSrySJ2u1pIS213jYMcPwtf~&eVLa@$Mv|Y)63pFiZ z%7jMP)&38v3VIj|`N^z#&UKUIndJ3#XFRXZ_lzxl>D1``Q1gB<+vA~(#xMJ_U)N8= zYNMMh!j|_fqtjT*x0URZUOcO8f6RK9?}ry^cHAp8lFnC@pn89S$~>)DsP+iTo2W#1 zZ3gXW{t0pK6)}I)C8%!XH<?*>G&^LS5HTv8t*HpdD>_YHxqE}?yY~aQ;GF|oI>*EF zihVG|D+52o$FecinzS~AvjA^RfWaqp@zPEY?DtbjSY|L14o`ds&6iqWTj>BQz84Sk z_Ju&$tm*Wu_a)|aHkj^rI^f>qcf$Lic<jE_=;>GKh!Wj+YHn|X7eB&q?loNsc&vzi zxzbc;H=8-Fb)%Ig=@_oML!9=tg{7HAqvi2>_W3-q4@Ohb`qMNv&vGj1b-GfGcn@a1 z45wJb>7>OwuLIIE$>>czJL)t8)qM@=-jk8^sBI0qGL>hf?XocI=XbB*i7}L>JQ*A1 zK0=vX2AO;ehs5(;Y*kbuO%2)3j@oxJZ#xybeJ%qp4nEF4TR#A!WF;&tIKaO7%wZdI z^`PEo9h5y!V0L0V&pX@}Dm!}7$@UFUv7!&uTz>}dyVkIk{iRv)Ykt?cX2i0iTG@2& za#?iEi_CdGd<i@O=hGx^+Pss6yB~%H4_1P_My2OXw-3yy?k8*xa725yh`Cf9V`=h7 znf(1D?DFw`G`Gs0y*N6O3^>M2rOyX3x?ew3%l{3ZuV}LS=T-4cz&A<ks9TWxqKvh@ z*vgbN`U1@M^&Ih|nvJ=tMu(5dVb?8ZmUJSR-RQ`qS_cJjc1Re>mu16k#a~bt#d(3U zX%xNq0<2#)9y=^`7}}J<rm8|VM7Its@_LxTiD|U`yEn>K$>Hm)I6M~MgI3l7wB?~I zy*Sy)c5B;H_b`52i(beY#lG}sWh$O_^FY1iU+nyU4Q!>$eX#S9WwYMY!Kte}Cz9Yr zZ)e${%Y;#Qc9pH<lU6P(SeQu9-TOdaODX23qfaV}yWzNt6-69(V;>{((Ii0?|LO&h z`4c1dZdp3%xR%oV+L<(Nq@U2SVY8&6JA%32nn6i#KL}&AzQJbK9g=fC&g|bk6<ik8 z3qNz7&tyK|C*LuoZyG9;rP>369Pi+6&%<BaIaB_m6I-wFkj>;?SLcH{l-xTB_sSH| zZl^x9@RJpJS2%(Hoe&(JWl8!=ZwWuLf~lXm6uHlHAs;JilHF!cWeI)I-Hm(HIH%h! zd^Rf{wI1HZjc4h<X7KK+3>}q<#)I*dkbN=;LixNjQ*#1*e>8|iOz*|&$4SxU+54DO zmlh3?_ajs10Vp0yXX8%E(fCQRIP1n=*!49T?LL=_$=ma>Do%?Iy(px~o%Wnpl!{mN zCefSSd%!w75;N}W@%~UUwQ4EA$+@3k+pYp=oBRyyPrBd*e=}5^aTA_fm9s?G7;^OJ zMN#9tM2(xa%<@+O<{z2{M?cGoW@Asl<YT22J0%0Vt6gyf@3{}!^`8B_aZxn2oJnKe zY171oA#gcqEvtU-O=pZ6*tQ*(7&dbNJ&|Nn?{&AKJWH2!A9>@$5edxIw36M}uZ^28 zX3~JEiL~v3K9vpN_d+*+d{e#xJ}OPdedimY!!v-iU(Td6uEEf_{5CB7IvIEKyF_ln zMCy*e>^10Be~4(|*_fjx6m)(G?~Of^bS~%=r-YfXr@=2otG*7r8#4`qXYFa|Hm`xk zWzSiei7E~<@}#KV2U%-|5h;9G2CF8|7o<|;NFKHWyzEQYE~(+5{0wsNPUF10&#Y8q zvjArnHS|sED_&YL4Ers)18<bADLDQ(3_P`ubrt^S6%y}?7Y3_v=e-@tPMb-tH{?NM zaI0vLIFB8kKAj3St`JrJg_2{EGX6JlA}gWMbn8lo;H@stobCFuS3M_~*7Y(tmOp}? zzt7>`?>uy2*Tv6+OsV3463JO?6dp$D<1Doy)Cj6@d%ZSQRAtg1JzdTpl*d3}5u`XZ zvh90&QN{2P7@Vty2Wk(nmp-<Xx^N7dd`ZL8BcF>GO-s;r`3`pdNDAh1C&-H_R#?^U zPdh=Nal!$3b)!qrQZ*&DKPj;4qb_-~vG^iY70ycLi*c=6LC7?N%?`7$yWEVbmiLla z^}fy4Zd9RvX??tMdZL-0)@WL!rH_Bxqsbsx9}6E$LCfAF*quwC0Oz}t<rNL^Z9XUo zS$|Yai1@^LmYYPC!6h^}@vyL}t)BI~tY@Q7=#Vf+n-0&t0cjov+=HwO(+37pljCA& z`>lynOIpPL4tt>PCpETioF(<!{!=)AYPt9z>4>CSX_r`kL7!ZjcY^za6XM7BEpX*> z0p6N6nb{`{!Ha7wNb-Ity6daqwJrRv&?gqH(z@W}#~|EqTgKFHMPg`}E1k;9p};_U z*s3i}PfN_%pQt4)>`*PJJRFQ;BzM`1WTg0YZzXrlgK+nJXQGF{IGZC0@2vFTnf5q* zbgcq5Nq53o7a7u&Ordu*qv^f+066})2S(1{DQ3*7hA6)+@Nn>WTwtX|*UYtWwwDr} zHlIeLxbybu96S0HVMR}qljx-p&lUu|79Sor$0usJxaGO7;H7#7?o=tzk;hLU_}E;2 z7vgTbSR>s0{UQX+vd0Lga4L`0#JZZ7Os6LtGube7PjM5q=jE}m-tx3)oPcpD3t_XI zHQq5)_j<$gejzDxUPrtOaX`!i_E4^f=50=dfBgTGVADc;(RUht`R@`Nd~9sPe-`|F z`q7W(yU!vclhJH*^elFLMFgA(sPwwWInCqehEu6VDIOR;iF!N|@Z4W@XuLdy)fMgm z??G+gSzrn$N3IahMjaNX8F-QMLJt;TsEU$v+2SgnDiA-8fS^S|JP((PXLe<>sC7DI z@{Y6m&#K_pYd7JVXh~Pqx}i>CC)3=!hx3nJNyasg*|trf;8E*X6*V%C0x2ecw@r9* z%m8%f>d?e@p3J{Mk9ODQ(~|Q?VY|^`(fZ4HW|&tGP4-S~soX$v*tQGO$64a*;0R2} zO~wU=i#=iYBH^=KD|5=v6J3W{(sUDP=rpqen$nwh6=QI-^eUklyV;|$s#IHf51w9- zp_1qmoCPhQjPH9kd51M-$@z+E(VUUL@3nYLCkgKh_H<~25xpKCjj=_V_;ko1nv_{0 zdATeeOEQ*=|CX82;HYFOa5KO!LlfcDXj`awH=I&x3Q*it4_fXy)UZ4V=RY<iO#>+$ z{?(4QZ;YlnUX$?ZDR-)Q{)Mfbx0mhcFcn+4D>FhfobE^rplf6@39}}NWs8SU_V`eY zy}W~MUlNb{Ym(rFg%>#741}kXt;uYbBA(WK!Y-uOGt=JlSxvYNnfY6zY?w9P^YF&J zu~KxfRSg52dFIPG3jcY@VWmYH*laGvoS2P*?uc%&^`y11WDoC##QC7kx`pg@cmOT= z<_l4#CaAOZw$Pxh4>QliK+xT2ocygxw48MjhNt(V9d>DK$k8~=={+4EY^fD?@mbNd zzX;N(P6f&f;jG3i`Y7t+O5ORAQB&L@*Hojy=0OsEQBe_g2MolpO@k<#2GNPo#q4hE zB)TwtI9j>*;rEB3sH0~j6y6y^(f3}khVA1?@++IAjUJEM7MsEK-#j+TWe>EPL{LQk ze)Rg_c}dIbSn*Qw1nM7D$A0$F#_aKb*d8lwlD?5d>C&OhV{RVASA1fwb`fIgw;!zh z^9{kXP@8I>OT*H3LyB_e`GC_7cqHqX5Z2;`z2Z&i$sEp-Qhf?%eiy^P-A_Ch?@Ynw z4-eqG+kJSidJx7AZxQxo$AjVkYtoR;XLD~GQOYN8rd)oB%{O(x8>T~XFtf*{eJ`_E ze(RmNUSJBxG|9*&9$RL=6$>W*Wd~=Ppyf<^bm#s@4WGU=f7L~{e!K^&EDOgr7k)S2 zmCg=6SpsuLrr?N*aj-W>Md+0mhe1)cY}(W?82VA4G*Txb*mz)Sv;y=*9TE3**os4k z>=K80_lT1tMl#3MapbZ$f%WiA%h?OHjCv)K+66z1)>FlHnGh69Guh6L9!OcXhmERT zCvknDCCudC@(w>cQoOE)oeeeY^UH0}y-pe}Tb!|fN*GptnFBdG?^xZCEzo3}Na_}2 z#OR?$<aYB2Q##!O%l#j*X#XN~IeHbcj;GT{uamGcw;V=e6VtgIKn)HySQ0ZGv)0ap z)h{Mt#H*9cf94`~;QUAEFeoL@#J=onN)0S(=bbA972Nru9(*>0i<^u+dADZ{dodxH z#kY)N#@_-^Cu}qOjl0;U=4ARdXfS4;{tI^-`_Ra(Q*r9XGWN6E09MSl2kUKXz-vMY z>)PiJl}%pg&8A|okv;uz5n=wiB$Q+q;Eb82Xp`}bnatY`;@F|M`0_q5{?wbkxbA4^ zX_uobt3N@%uj_<&9y?(0gk#KcgF80W`q0ZLH?&R}jr1>zJEi3)+jlxvJgH_Mx`Szp zwk1hhXA1eRCNjOFJfBn#wBY+N?u|L;WxAj9*=$RN(keI9<XzuyD?bR&HS6HNfMr7S zBNHlLs4hrv(IK0_8Emq4D0dyTia%D5rqde@nZ~_pxNI0q)7feGxSe~{>NwjbP7&=M zOVejJS*Ub2r)9y`R2-N{6E8l1?I)%1@xmYQBAsWPJr%^xc5}A;Uj``_STd>I<MHcu zJ;C9QGWzXnU@oc(WOgx_lvk<1ojZ-J`f4-0{?D8|CLCum-&09n-J0Sf9E1<Yw5ag> zUhuK66%7`e;AYyvV&&uUkm(s1xMDwK?Tn+|XRfjdb3!qCv>Z(=E=8R@Gkk4#g^jk% zME3{N*zODq(PahS9ZWkXY}LreoCogW*<A!~mxi<NF@3RZ<_vQFB7^f@<)W$53XofR zl|}7a&z>2qk(8Y`<9qI*X!Y<lvw=YBcw8m)FOa9MCl08)+!ZsVJmBTE56q!_7&-OK zms~2=CY|jsU_?q1)Eu6|uDR>r-;uWP=kOo!vslcGXK7Gq7O=rx7J}b{uV6S)k$QU$ zhrVCi80oJUx}J8j+llX3QD-Z=WAv9rDn-)LeNj}Mm5)y6p9_9+zD#wg1qf?5h_kLv zU|SoviTm?x>B>y*#HoD7Gw^+IT5+5ZGh~YAptE+UJa;-Qvp1vv-aTL{N__rrIsuOx zYsEszE_UEk161Dl*`U(BoUQ&W4NFeulib^J7_Xa&Q_N&w7yoYyKErVH*eHo^KN%YQ zE)ECv7|>DGYmlujPm2w=!M>hk3a+-nZ(BVn$4?6TdF^02IfF=V{|;Dt;u*yEnLw|c zxHm(~7uPJEj!Ku;ikYwDsBwJ{^c*(EjRx=G+S_@s<@jJWc#j$UGA?0p)|TiwBtabV zSBW(2GMLN7F@!O0q|y5csCe9mXVE9bZzF33!v~9Cqdd~d8IEM#_()LopG>8zW9jff zKiYl2AKm<#M^+BmxLK+mRAl6_14?mVd?HO>pNVfL|6?V7J0Wj)30_&h9v+nsph?Ci z*prn+^R|Lm<2s1Nr}o3@orU-)b~3HA_`w=;lt_;=s|&a2(U8Lh^v-61sPjz|le(so zcQ=UxWi)Brm{jU~-rLK2qy?q?NyZN^mx9U8d{Vs~#cHh*QD*9UIF?c(EKHU~-8ECl zd~qe%FI&Z)*2%F?SF&;9z*K6V8-b&a#j>bn%H;1*&O*+*v-RIU3B7_8@aP65oU&}I zXZ|NMly1(2Ngbh-GCQAknJeR>VV3y3Y7gt--_d1*&cJ=ugW@r>Kw7!{FhuIPVB7cp zB%i)tGF0E4J5oGR*Lk-%!~QSqPwGcuR#{{o*#>v{t$DcPNNn3!!IJ#sA$Wb2<bw78 z-I@`sIyIC!ERv~L<ylGBoM?(kmm-HOTfA_8Exh@yE8eOZPpf?=(y?FRROK~@q#CBP z>7$3x;_zE=D0eYjHO!=gxhZh`WGs7r)fC6duLmu@16q&8AP5^^pZ7&Cbps1wcx53; z$!%s^t5flJ#Z>{66}TVAgq<Ck43lKPz@yn3*ms{I_+R!x`?oc$>Fap$n!`OPv9_kD zZ};KE<4Dx$cUhct!j^aNFL_=FsD#XLSE$_=g8p-hM8l)!*zPmQ)HSCXWTIC=b^RsI zIeozbh98H)hnKRVJr-m#rvN>UaK6|2Od+AX1Pu!l=s4#~&TZ^34s;uVpN?wL)##Oi z-SpS6YwrrUHBN<{istOtDY?|K^Qq_OS6Mjh)M8=nCmW$CW<K=t8P9&mjltWEZWJ-r z7n=jRC6SzSd+x3ky<M)1tBZ4StXe;|E+-CmME-&mWnY-ayw_ffOn8@W#wgNTY>D4y zyb;sRxzm*amZ)(%g|<YvqQa?kaouP+{Q4t@^PPf8cYh;0c4#f@{%Qb6uV~V6m8tAc zin8R_q%d6aL5t$<`QpFQ8Te*Tt#Iu>Ps%-Q#KuK066>2(sAG5}EqXr;dw=wSE3d}T z9^JhX%M&7-{4rOM|HHs)fg#>&ZDSvbR=}r1HE=zCM0n%(03JNEW%2#2;A_S=mOd<w zO?I9NDSBTe2Y&Zw$A?>B)e$Mywzrnm?C)gXY6?;OT-9)4aEnmHvkWV^o90RPPB8|i zk!)p<czk!Jm^E=6_?1~=^Dt{tvo2wiBYwd9XM@?$!Zpmjs|u`k4W@!VErS1LJJ26? znH3#zWaYtWm>V`n$O|o{pBif#WR(M$hvx||51z~1lbwLQPsk!U9Tr+Q#-r9ebIgo$ zBKrm-{9VWAsr;L4U{H?mDJKc`JFjI2N*m$(_%^sV45>SKD~p_?ME5>_VpT7~$T@Ej z`{vEq;yPrBi^I_+zzSA;m%{g1CgRgE*CBhZB_<Eyq3ZSNxYtZUY|QYiKk!NgFNB&= zG{12SDC~`zYui~me{VP5;rF)-3go|1jhe3J;P>L)Y<uho>~nnp_CA=wM*mR8_8nWr zt7p=|>}C$NeC>zXH&3%sMeEqG)O=hw?m2WzYK5;oFIh@gGIneV!>g1_K38tBPu~+^ z)k(g`&l^QEwnd6FIj4`W&e7x|W8R)REa%uZVU5%jJfdX5PGtC#_A3(%iVkECs!qW> z65*@U3CPv;Bl&kD>-R1xC5I8cX!olSwsr3;^v|@woM)lzq?Iqd-{KCX2et8ybsn|f zo(S<@%rP}%6D#^YPjdbGA0VsA?1a$<w(;Qsa7)UiQ6qkX>oGvfMc>%2n#ZunAf2Y# z?Gpa_+yF~+n}!h1F0WAtB>8z!G-Guj$@#y4ElYN>iq}ST_J<Fxx!}Tn?U;<qIoC(F z_8Yso@DE#h`V*Xyx+>f(f6a!NbDp<S7i%`T$(DRE!E<jpZ|44IAuX|o{fG>J1K9`R z`<XIvc0d?X<VEqqv;myC?gvtt|3QP?W3jdLBN)8RA^lEST-((nn7-6u=k`y)Z-=w7 zq`F;Lal}e;YQ7sSS#3(AbOm;9evR1S)651gc_m(GvB1q~GbqS%tMK`^4NG6FB^>o# zEG*i(hkZQYCtMGWAX_^pW`0P(A{RAs4mkk=XMp#*!F#1PYuVmyQnc3Yv-n$nlc3@q zg6CAcv9QSk(`Of>@e=Ocow<(LIv;@K$El+0>G5<m#}PNgJd}L6a)!N6I>thLB<x1# zVC>!h6_fce9DlLC6kYxZ>|$?%-uJIy(pE@)<gde-eeyJMz;dC8`|F)uE{oZ}E$HAh zIWjHSEP0Sy%zbRPnZ~|U%#VJ_oOGt6DoWEr^|kOd&6*l+CSm?tDKYwrGP63}&3@k6 z%-W(;!O-J^U=dj&F4pwGx<#vmMQw?s@o54zO__-sms-${-acY=%}Q}eh!R~cDyEGe zCQ-YQJX;a7O_;*H2r*NeM2oTo!kBw55ZanZU+P40`36TJq`iwJ-fmzY_VC?u{05=3 zejqNBjG#4~nQ+o=Fsd%H!mgd1T@r1M%T;I5eTTs~;nzj>up<M~E1dYwuYhEAHQCZa z8w^yNf`4BXq2nn-%68oXE`7r3<#r<>=<-g;nHEKfIc?%N$5wXump!vwoP+P9XW&WW zoov2kuyBbp>j(Vi9^BETti16md_Bea#f#FsDgp(1{h>eSR4bz>#F4_?6e{BP()MY6 z@o)7CcCaFwX03h3_J;eS)@S~G=j_>#tcje#%o$}}NvK)<o;etlkozbZ633hs8ccIo z{1h8%RSjeFj;KR?!&LU3XYp@+eI&eJ6iHKU^ytH6b1Hf4iIxUk@cwQdh3@4{QNw8Z zX0HabQWAvK`|Z#_as=8vI}G;f!*OK>Qj)6)>AKA#lLy=}Uv!iGYew43`-vx0vbbOC zCJUTmkC*5B(cwYnVr8g0{C)8qR1%%(;iCYi@^u%K&kmuL|N5Z*dR^?3(*o<8&%nXD zNOp&&;@jDaVCU<;+@b17w$BFQu83}kHuRzHAHArk-3-Xg5AT1~qBX^{a6>Ebca$`n zG2si_80AijwW6_V_9xDw+`^I$*$Wqs8e!Nd1CSm$0Xq&z<L1@=qC$@hzMPFhxMHxd zV?Zetmd?alx!a!i;`&ou`D)l*ABU|9nd04vlga8wBr&a#?8X(ok0>mH(M<w&ISi)A z0h!n!SW?8J1SpemSN(!`a$f64{ja9c_8Q*fuj^orKbu+je-DIHTa57J(LxwnI~0PZ z8sLRrAK{MWKWI+d1f7{aSbd!DrWNg};mA2Y^GZYH&*^BKnMRi;29lO(IE|0{B+kCF ziH&_|NWaedAl7l7!dkwQIpRtsuM^3AvJc*?DM2&J2R$$Td-){^W-sCFTz>&imTF*+ z>npM2Sw6Vza3$3PzuDC`GkS0HPms+^Krg4uY_W3;YWORmY_$wiP8dw`gNE>q-E>@f z%nu6YJZ675-=wS79HJH|a6V5e9s4pFn@9bIo7bmeXq7&$*w6`;^d1hZI4hFTB!m-k z%+gYga-PWJfzC4M%qYbBl@_FN-W)5}y0Wy(kt9j4X3c#pDeA&SVF72h#1)vbCD)B` z)$?X>pIr>SPEA9<r8C%uo(JrThaJ|{ZxP1r?SNU``<XD}EL&kTkpe>v@S4*l*y)&# zpZ@eA)o3MJqRE{}dRIl4X~kIGI*{+NU1?pl1|~krqr2QeuxEB0c}e<^VfP<4`}#0c zZkdVlkFu!jgSJ@K$DDQk89<ut_UyB#7DoD%Qr{FG9O}HA?bB>$e~N=hv1J54d+*8W zqdo9|?O)O1hACb7whCU(_Qw5@=6K|JD42zQ5{$L1FktB5h7embJf{~9eP148BF_q) z$XqFYuYAH%woIdW10w0Znjb2@C};JL_p;)eK{U9pJbLOVvIo1>@KV?&QG@4E@(&N8 z4;dbCdtD%;=ZkD!VJUsHQ9;o@mfc@<STgRvD7=1U3LgG7k)meYW^rTM*!VF1t%qWq zKcG(h;;N44UB9qasZVV0#_g~wX&O0vNye>fyIC<?AbHl5!}~g!LeMLJ)^Ak`+OP4$ zE`@Cp?KQS|;7=&I#u{T}N+1p8dk0b1RMPxHo;4p*V&`RDX>xd;r1-Qm-n^Uww|~V! z({3O7t!P3GDiM^^kjjQW*1(QZFFX;agu}<GpvhVtJa(iXQ~#Yr3Q<-#kLRKNM+LJ< zYkQND{cPd4!bf&r-2?yWO=0KLqS^Jck05k=D~pL40{xR(!TPu}sb4ds;Ji7|Z8V5% z8XTyGJEJ{@Dv{xcVbpqd25tQrOApJhiDtd5@&BxW3AgK6lCd23q;$e2%3*{24m7xX z{b1`}sZv|*943=*fcj3}FnUxR6;`{lZ?j(tiw;bpWioR3!+f8Y@vZ&fU1lPT)1M2y zbuDPgfLSd3DCY?O*vft<u7@2?UTjwFR2p(UjJ(>Pv8NBJK>yr$+|^V8e--7xNT!;7 zU!8#FA)W}mw!q&h7a)7h50H7=<)u+4MUG!&NUmI)bn^;9d%y?gW$+3b^D5cufs;_> za01?UG{)1#{UKxDanBaXQkdl=g$sU_Q27IWwAfY!v-*_cqt$viH_V?+{o_xzH=nc9 z+E(JaW<4CHW{1Xu%EUpdOVN0xBMm%kPKsU=@IaI-`TOQ!^UWW^8_o$;?vNE;%{eIq z*$!tdCpQYVt?9UY(NV8izWvB(f+-qYn8bP)grlprH~p3_z(JRWV3@uh1*X=Bei8$8 zd9e^?EW6L%ZgpfIKW>3OtEKRk&L5$=|6fQn+6H6NHHFNXc{EJj5WjiMXHon+GTpF; z>3;+EVXro+jL4(@#Z~N0gq5&jO+FaNyp$Zfvr|}d#;QSKjvr0^n?#p(#Nf;>A2K-< zghf~SqRHUv;?YEb$-MHTug;lbJ^y_>c7HUL=*!^FxEIiUrw@&C4u#4)zH}ukjy*k; zM&488a7X<H&>R(lwDc8=__;uwl9tL2#P-94)<e+z^QTy2?oYwe_u0!w!So~S6|@bg zfLZebNxC$cBz?}X?7U=bI^jw0w2M){>9E-0)gPsH^Nv+=GR@m2#U|AFWBKp5te=tw z9v7Nf-{JFMN^>lAOPjIzmwf0!brCgowSeY>Y53=fGx}9zV4jBqU7FX#p7tAnPg@7! z^rUh8uCGg1n#RK&`wW1Vi(>YzdS-4@iqHQjKzVZsDL(UnSUpQ<I%UpYwI$Pvk^Ruz zTpOqTok3~GH@hn(ghEQ5Kx&qsy|TX;u-bD2@tf>(I5CVnjcT>Aaf|}x2m0a4>J#kU z0~>f85Q)RKYhdcW|AgSxOSzvRg;p1s;%U2nr1E+m{H&;CQp3yHftA^`Fw>CSq@vi3 zxCpxHR)nSMzO+wmH2yO6#((<#aKaq}aJdmlw{a)DOD(_$sjt{=?wK=YinRVq7LB!u zz`T!}!8>RdEWKlZq4Rn~&j%aDHX~<jf0WJERLzAC_y38#jt63*dms9D_ySzGHUxve zfAZ?vFOJ4^b-*PJ-mgFO6UsN3qC%h!`{A3;?r7@KUQJC5XyOdU9}aXTY&`59y9%VO z4LRSWKc#4#6BOUI!w}x@{yrv@yMw~$y>c_l@b<*gXY;_r6KTR@6K1wCkm=o=K|{tV zf$70BY{nCJyrS-ir@ZGf!>{9U#Ia$5hDr~6&p8t{zOP^b&-<H5cs9jak(F__``WYa z*fZ$5Xf@l7#*et}6|qsC`zn=DJC1Xr&6`<y_fdAswo&-~KZ?#ftjG6@<C?TIl=f6< zYHB~{G?kL3lG3JKDupzxuf57vvXYrisOLU*_TEJHj1U>&cmMwGy1K4-KA-2l&w0OJ zufKJW`8yH4@`lnc8i4QWcd`_P98%2Y-cAEGcs@V}LpFT@w;ReW2AgNXu_99(w=9LU z+j5xi(y16KA&bkV52N2I%b8K}eOTYVlTFMrq6K+V@IbE_Jo3K@tqxbcRz}LPA@$+l zQ5j1;@wO!Iokj^ov*GVK8L|m|;<b~-;VI7kHIY%kOAFtzMwKXb-a8pvB|i&tlb#F9 zj>@sMq5bi5Kp-P~1v*gN#D>@H7u3}BuzR=~tv~mVJ*rv&ou`ajzuz}uTZTV`$(*16 z^G_QK?ED8`T#TqBax_^uaaLcFGS$zDVr3b6G=gWBRL>OAlQ*Kc=jb;v>2Db>zi3MC z%Y#X!JdfTS<nADYS8PpS43_?AZ5=u3D`&S&CS}{>%&IL715dwaK8B)z@dc#qaE%@B z2%{W1d+dnetYy=D@r<~h5$`Cel=h?i=mL7CT+Ol%_oLy`QKWZ4k(^uJLWa*OP~56Q zHKR4?(ST5NtTN>s(@vJOaipjys-y5~hOlIh6DY>W(o^NlLjHqn*6id%c8e--BA>O~ zOH4%-6H~<R9&{{M(`(ZuEt2?EDKxyA!YUM>vSZF$g_uBZOqI`vwtbh_@dRFN<$b4+ zv_L`bmp0s5=?Qkf4ss84303-+Q(9mmCR#5P^T%-}vx5e@Nc!?UwYnJL*azd+7Sp+% z-t>I@J@Mi-FX~yk0L<2%1j&n5_*J%<y_@Ajp2^vGJvtpP2u<vqZx&sx^kw5YH|$0C z^48$f0y%t_h1_5}3Yfc{9ac>SlM_{(qism}uYE9eX*Jt4&X*3DJOMM#p;a7G2EAWL z;GrwJxMcerF({LX_xUbR?yV}2RtS%6t%2UxHuN~h6t>^;Bl#JRA;(3DzMjgZOY##* zu5=hv_vHJQuWf9f@IVwM_op=@d56aO2KzD6isn?+i?RR4(eto%g7ou3nztoNG~E$_ z#s)Gl@lPN8&$ACch1INDwGAvdtK{6LH2fAn77ORvQp~mOuyLt5X-CB3P>&Llg3l~_ zd^<E~PGXAJb<xt$8P8t33diOE<tawvg~U91^|q3J&+Y>|2b$p|<qfd=?gTWuYr&=j z3V1%kmGg#=!ImABIN^vcsK%6{?Vn0E@vJh2zEh&gti#NILIAsqn)qoa=RrG+rM&h^ zI6K2aRI=8g&D)~s?z@FB_l6Cn#rmM}E`A@F9)O*l(eU(@G}isAXRX}FJ2LAXi{BEA zUb+=<eN-%dES)AEt5LxnGlH>vmkik*Jl5JwZ$Vyh3)CJOjunsAf}Z`+*7@1x$5J@k zv1`Cgmb!b95cntsXG)K!`U-O<e?c0hAKBuFZ#t;ue2i%r#<KS99^3^|CrnX`!Frwn zJaS<K&%kJ5@||yj<mYfEads;-cRS&#8UE~kr3&3vETXCEGQuYDCcFFe0y}xihwdh3 z<I)00tiE+iJXotwRdb|JP3b!XjgF@QL!^YYio}-B(WSmi`(c*zBW5?<hbG9^iRE=h z?2@ky+?X%We(fmy>}1Nq#4K!4Pr}UX$(X{k^{dX`V%m4U2;*iffN72sFh6QKJMA(8 zHR2NS`n#o^4HH6V=JLE|Qkbyr9G`KyCBY4IU8p!7+!`=wGuvW^;2*4jU9$?&MsFa# zm>W)!S4~lY1>?XQDU|wEAM^f9!Y0`yJjBoPCRQ`y@6-@X3!X{^yN<A~uij*4?*a*P zbhuNb4RV@|FyV9unDEU1Gu~mD_56|;{vr^Y4^G6?aotcp;t4w%s6}W0TB7fUJzkxC z`M&g51f8|3M6K*n7FPRKJg4W)`c?;``H=*iz04JkAL9<~4OyH8)fXSvyV35B?}F-1 zJDMdM3+cD{4B=V<Zd@RVBVFS>Uwrq3D5osWpDrN7<&$aVJ^mb7>cedNS%U8C0t$QS zAVxb2^ruG;OPhHXG$w-fadxYJjvwiC{DpTTvMDY^6(xHI;lAgiX!|}ne6k~#78Ra` zRX<bU@bc~O)ZdRj>@ubeYt}JU?nl>o7(fSykHxo_cfsL`Nu)GUgBqpz*3-J0`G0u_ zDoX|kBm3*asiy_Pm}pPhUN8`}#ZdCyxdJXr^<$p`OmTTXzO#FrN!Kd(u`RV%Td%8Z zgBi0UaDr1jjy}~6t*#?*l&Ty?g?ht=sXDM-@S~8SX^@{$%d)hJS&L%|8|7#rJ_>3T zY)^Ix#y>a*zf^;*S2U*<opv}9rwewMbxAk+CA4(kWM%%l*cqqApx8f!!upJ%o{Rfi z({y-FE7$`KY_E!KXIenAgmV+Sx3g>m6|y}VPc9C<?D^ktnsLZUyx-I!TpsrcjI||D z@KmR%>tgZXy?k-P@F3y9sSI#aQNd*iM_Ek5arV!!0&5hb#F^vwGtc8+J-4m<#ujF$ z3D%3|!77jb>{JM!??0<$X6X)ec1}BUYwKWV_`dO7PCeAe4ky_k{b+7nwRnGuAsa2# zpV`_fP>pFW>>4SFseWOw`ThXB-e)Mdp8dhDCP&k*`cxRpd;Z;=J+#v(0MfOFK>nJ! zu<E}EX0f4>8F(i$o%8yblKg?0IHptD1D^R?*$Co{r{e3HLvZ@$0BUZRqKB81skrbH zvtB#`6LQ^9UQ3eQ+uS86nflYb++pmnpEWv3l)?GNWL!O6n%&kIgb7{ELfyGU*mCK- z*!&`k3iqvN7Czfy={H+4pK2~>KJg&q(*r5togaQSbwH0JMr7>8Kks1~bn;dOHEMCr zQoaY9Juj7T_kB2MltP2+)-W#>S9E?jjMg^B;PEetEY0;2?9SUR3_7<?$otoyOiXUE z8qUF#N-D&XKj*;bXBuVudt-9747y(n66T&g!oHbY6?=BPg5p>KlO3H%dtw|58Zecj zCtA`cD+_F_QS>SZz1Py5Q_6dU>jX=EdA#7xzoRe7U`JgI)5@F9p7q-ye!MdU?=Kq5 z{|*h>>3vtIHl9e=I{k5E>m!yQGKTuE7|fz4y3;!+a}W+rhK)J1gn|1CFrvc=x1EV( zF*~!U)o&76^mvi`8hsk}K$&%X+y*zgmV)Q5bV!})gjQEiu${NPsb<*}I-L}T?k~nu z_-=PP=;_C1yL1U-vaD&DlN$UDS7p(4A>uMs89MLlL^l_w)ABi=y^`85vn(4elub-v zRs3D+#6n5^(^#-H@CAuAi2K!dumM3b=s)qAaKU}Hppli1O3$>JcXl`2k_knGoh(Xi zEN;2C5Pr_G6UK&l^S<yOP*=z&->U)m>C6uZ+I)n$&bTZL47w^-P0tbT9Q9@{jp~&5 zyc`|Zzi0nW)`Dh0JdJAp%Q`HB@wXTEvHQ5u>X%kxaH<>}5(DViC=ZhC*a2TRH}Q-_ zIKEkKOlKGDWLwTmMexc1W8>|dOXPs(&C;l#<tMyQbb|4(HbWi$U=8xBw5}|L9`f(3 z1*Z-{hMgl*Z%$!q%RJG`a07go<^9^&LHybDqJq1%P*^HYFL#7ef8G)5xalBx7M~EU z&J^(ZO(<S5N@q`QzB;xuIt25Ne}*f~mZGM<J<+{a@Kj3&SEPsHfX$D^x8ajfCp1j> z#M%4@gA9<Gc7gfFVW6;i8PoIUZl1x9gv7iNu-)PU6yDTk2ZP@;^ITJSFwK|r3{>%h zb^wJ8*Th4^O>p*i1Ln1KC_N63ppEG#AW~@@q-HD!jiW^r{B{U^*x1mzbiX+)p010! zL9VnoQ60wF@>xh_GrYY!feO6^!L`+cT-z-vT1APXs`b!*zXU{nx5u?t9x#8WSgKrP zN#%z>z?t}V&@@rP+hfYG<^39%GkG(dl8Hj8T{bLt&SE~Be<XBoQm4-s1oY2yqIq&g zboKT@(fel<URm47+?K|$BXQZZ`;ik(aQ-L6kAKPj1Qo#dOTIMUDVuD*hm+#N0#>+U zl`xinw_jUb!+IaqGh>=c6VEu(If$U<D+fhA^Izh@FOzZ9#SC=uO{LG~CHSLw8_VGA z>-4VS=(^xKI~STo3%C2=8ih@;ZX9>aR6S#N!{3SFPmE~7={W9fv&KJj+u7+EeZ)D= z4&<`c0pkyQlS0o{cH?({^lgiU2A^MS`L|FkchRNG@&l<iauK__a1h?hzt8TCQpXRe zRcJLn0PRoz1f9LkxHRQCjQEv9Tjr(l{kSYu6-v>v@u_rjX)u*Kcf;6T3;e_L*qXc2 zF*ZV#TKn*Pa6~0L7U51Vjm5O+%33jfx;5nmG{UWz8_c0(DhfJktfL?U&V3pqMi-Ui zno)*0CRvX2(MF@1{{m>fqmQWp{df(>g6O+8*?E*;&xs&hw9Sx)4T|M#e+g`yw_9*c zki|dyD{#J)1dG<Ih7Qw6iq>|7iZx$hUBc>PN$N3VR;@;F8p~<_hocaXmX0rf4#Nv~ zf3i0}>|5>nXu_l?hICSIg-5rbj9=z9!R-49xXrkhsXrMemX!F@M83!Wa2>I2L9Ng_ zLknY*$Kw_CU@8jtrcvrn^kCs!uht_*0>A6>tWgwQ&%3||K0E;Tc1L0AP<!hCcnaqJ zu)u;H6Y-&84}0W4pZzT2`??o#v^?<@sE!K7Z-v|`mfywID{bQT<P2I?AB_{`F2ed{ z9T4?j6v<U?WsTDW^sddMvxQ6HO<_5{aVccE6V#c{l~~H)ow?-C^-%2d0+PpDWBS;Y z?D7g#`j;PsGp+0}*|P<r|Fa`zIvB&_m9RqbJF6L;LG)D}&i!1;N>Z+{qlXTGOOQMj zbeUnyr+-3e{ABJ0p2O14_QmhU5|mNLJ6F+V?EA}fJlm=yW?CG8M1Bt$!#~H5(^o=> z%0FRnjV85R-4CU|{6&X7OQ3XkH=O2d(8EO|vG2Sx@_L|7g;AWpyzCRa*<naUok!V< zzfYMV=S(arn*_!e!`Y%;8`;c$JfC^sldyJpKYF(kp!>SJm+SYfY%h10Dh9uY!E>Z& zv`;@=GN%vCH5-pEd@j94$D6)=)FX#y{M~<PAm;ySV^YR()Z%Q97H`z3w$C#*$XS=| zu$jlIs`SvN@eg~s$_4YJs>tQHG4`HyW{z7#Hg^nAZuWJM%bY}Ay^3tGZY-98DkUG` z&+o%vyzSH%b6)z<WYI{NFgc5ok|xpV$Ri*-@07U3*@rFPp@hx-<niN(6>KNZ|2z^j zvAFZ9@O6Pa6y51dbg~i)Y;OzGGv^AfD;nAD0o@>LKa|?5<Ef_Z6B}f53RWK3!VC(g z(l)DHboi-4?;|AXtE?k?AKS*_&l#i7alq};{prw4dA3k-aBJQQUCwdLWeU;_OpQ%r zWeRZ^dHyt8tIT~}c^335+MB7)vS%}EYuTzbyV)n>aCFlS#l=6O@L`q;>kc?8u)JSP z(#8Z1Oqqi9E1fWO%Q#3&Eg|MvMbC$7iE4lBaaUJ4oT^^NWF)7z-c{cYp0%&p44XdK zaR#YzUmkoop@7G|n%M2e;n-C0m3tX-Q0~kqP~N(Vb<VrhT0XWC<_sB5_lGQGDd9gM z|5P9@-fD@Jl_S~i%|0aaRhDX&+weX07q*3;IbAW7ZfELIpE^yp=V?0DDoWDt8^(C1 zg6HY$Kf=#HZuH}P6%FhVL3%UaKWs?CBTKKtz2-a?8)4askMr31y3dfgNr2X<OUx>% zfaDb&*gB&M(mj#HTDJUUdit(do%fB!IX`3?v8ohlNz4mI(UNdO;YQ2|GR!v+4h=BD zyw%0jzUc}q;2FgYRo*N)&lj&b-e-qhG}-FaHh3{TUL4EaZ{_!;@b8m1EcIOy#^ns6 z&422_c)10Ybh_dCo<X#=ZY3KO<iPw2mGIPNP4cPffF|9Mg5(AjYUkY~zbo-{Y%KRU zeQRSq^Q@VJ`Dm&bug=oT53-)RA#`)>V4%H@_-J=u%=8^fTK&{;t70`A9+gKo0|!vY z+~sWTxu4>MwL|D(P9%BW*v*V}vzYa$bh=;}h~LjVfi>|TSZGQOll4iV2TN7(^GQwe zI@AK+-bdr)!BcUaiUB_PHHxg$D{+cmJk2<HkTbtkv1`&M?xF91t07;!)MdZ3?!fi1 zc9s-98D~j@HS3vvV=fIF{~m0XAjTeDC`4CC!;7*cG<f(%?8AHU1CkVIzz9E{FB^&h zyk9f+d?zShtw3?(eOUJ5KbXFM4RZ)jq}v0W+3GP?<lI$^X5v`EU{MOZRw-pq)gq~L z@qO5OlQUHXd88W}{2V6W>fH&bQTrS8{g*JAeUe_j6MsTQU^HFcaY|@*{Kh7@JYd}y zp0^f<szB8CCZ?eOvvpX1NwT?B%hqm8C6n6&D9*;6M!!j-J&uWVYE}}(&*+ba`az`U z&HLmb+Nk$!9As{sDgN`$hrw%avjCG3G%11yG~$dwQ`V4DR$PQt+R3>4<Z9Mt`Uvzi z>zGYD?`?^^yL^|=#+;_o)InZ&p<oQ$HX4VYq^)7&jdQGNR4Lx>J;FXj6yqzOR#v}a z7F%4bM!6>Lc(%y`*Y7HX=mk}jd+HyH{$~weH>`oX$A{5@xszy{q96M(>@)~GznZxB zJUd-qLj8vnliflGoR%2`h6a7;<7YiQyVe62jjV@~-TiUR%a^T{ds5iaoqJ%`r*bO) z@em|VDuWh2XjNEeg||29gRI+1)@M;$Yh5GY&9Um38*>-(eBG(Q)t9cc#8H{lht?ho z2Xg$I!M<4x#C@EJXP4}ZYc5xy_Phk_emGbB-ZzMRr-gH0*G92+KwsK;X9(UJ(Eu72 z*SR}ESG*LPM_s#L!-JaBY-Oz(9qyirN;BU;tb_~8;kz#l?$=v><_(LCb!W%IxbNbH z4$ZlL1`c8jy;HYCndCvJJmWXqFOkDN?|N9HK{W1Ds=(X_1F`8-B+JpZVbI`=(|CWh zqdkYtZ_~hc|C~vBg)G~prA9Vg=IG)5m-kO%LGG;@oj=vX<O@<!Q=4aavINw8?n!}< zE#ddG5*(_z30#Kyv-4AQ$sy$xEZ*!$53Tnz*A*9;{sMK{msLf~{}+2TZv^VKdZI%5 zMV7Zpm%c77r6EIl;NfyPy16-=a!dorBYX=xtf<Y(_}oO-r3l|A6;gw0J{S!MCOT|{ zE#;b&H&&J`9{ga|b^h?*QY~;@a)R|VuLSder9$P+mF!BuV3e6WkNJ27V$i%|*7y7h zmN*|liqA=NaU$)JC?RRvI?*yA3A0`X<G9CZ=ss1R{<)=LAKOe?=x$HtvfJ6ZWLs!# zjbbi$?Y&C$i#_Wm#>3a(WY)f6G7e1s!9Lf$7w??Zp;e=^KvH@ZQ%T7t73l`y<sT*5 zSX@G{k62;E!!R7kyC_Gy9nfpfM0D?5Af#^$L^lsDlz3Q7ou$;Op;OQLN1cb2&GKyG zF-NL8-pN+Bm$P!0<*-)hi`@?YSmp(OhU~Q=ZJtXVnzj(UQ(v&{UDlZUxltHxUO*Ra zj>7NVQdrXRMfmysq?o<5iY>Zehsq3SpotlcmzHR;9|S_nm0K_>W*XD3xdZdpn6rOB zE()nf?CI#9XiEED;ALj%NzxJhsdDN~k2#s$UPrA`Q0|!>wD8{hjFMECu>J_#Q>|lH zrFO9~u0dpWI1=7jnc&+=j>4JwM$`~Cf^J-FI@VES$`=3ZONOsnn5u^|tr#PP<wjp% z-fQ0H`#l>XQa^)K_ZP4~Igm-&N3!i%k*!gwJ}54-rZ+~aXguQ*>vcZDKFJGSg$}B? zXuq<k<~$t3yGG(~%Y0P%m5D?2Hi%XCs_^?H4V*H6KFCKq;Fv)YxQR*N*OE|Dn06Ur zuWI6bIXxO*-jA+c?P3M~g;??QD-4&eAjj;BEO?ABhCD3C%Q-*T1yjy!>{A6>#~aaD ziBxpjaSm+jlmrdh-QvC1R1i%3upwYHp1f%ZHJf>c$-9U>T*(hb{hdKU18~S<PkQgg zcLg?w;ngK8kT_h4yAK(Vuw)zS{N#zPzx(2I{b$U!23VgU2K(B$x2O0y$QVo(Tu;Q( zob^*6g?EJ29M-gYKMx|4L25W{>Nxr%RY|eGb;-VN0=RuN<laYJdh2|~YudjX&{APS z*XtbwLx)&ev1AyoDZLL{*6f30`(h~OBx7d|L{koTHD2W2k6+x=y21Ap47mJ_bxc#o zk^4jNdgV@b^Td7@sd!No&(^Zy)u!ZiCzH<Kuw$A2YF-n~htsHUZJsuV%qcxV6R+t1 zgv75Caqy|}_(Oh+;9szf$u3Rfj1+17*cuO2dq?4|`aCx5KoE@_GLaT3RFG_z6|Ffi zj!fIa>C{zsTKdqL?AIF8jF)rSoR3<#KT?+FneyzDUopMzSHm6*tOxJHa4cRMhCSa@ zP+xT=3%D98&b3uV=V^b~*R7ji=|?$~S-YC8ZQc%Z&4N9~&Ey>Z;CQNg`5r#*@<F9N zE1`PHI(U0@Gt3wV%x~%vSlBlbbMm$bcaMysg7<t@oqB-%m6yYbBQx=eOBCCbGYX#W zm!hF^t5|7SD!%2Jfk9oFv_VOYl~q}?ma1CFJ2Z(R-=Ajg`$|~&nov3@1^A46-ZnXe zw%(6)!_!;yct8F!Q(tk8`Bj~VclL%f@aaITvogi6NBW}fn?Y3SREjG7)KQ|UkqvMd zO;eEH;~WRz&i7?xKHrc2nE2Du-whyWa!=>++q|>3g3W!~4HgfEp;ec_pUp+k_W1)F ze4rK--(T>YZT^qh@tOM=-dm6`>4LZEgW08qm*Ulep=3JKhwkSbWXu0}QQkL4`fah7 z_3qIYo_$k9Vw3RXX-Q$B&L`o>Pgl0~$Wx|dI)&c#B;(=Uc`WGC1@VJZEPl?5WB1%O z;mCnE@M@|aHU;r#;pa|fWpG&-rE!DJQhCo74_CzLo)Xl0Fo2ADNL<!gP8ZfDu%Bgi z<Tg{rtBlWv<KmAqC+=Dvl6VF3hLkXgd25BK7O~tt9l)Y;PKaN#;)M4zT=AL93O3k% zGzsp5NM`&5YCn2`l`T9cEF7;49nZ6Hcf?~@9~wdIi!pALAB>HB4nE~_G5Gn8#r>W- z)bxBX+feOJAr@71J^Kf<QmGc7qy^El)%Ivse~2lnNrI2nJ<uEc2Ci9G;TO&s)(g-h ztCKu8H#36`64J@zc^*lWxKRFaT~X!9s@6$Q#<Fv1p4bpNlr{wy(8l9i*}kAce06pb z?v^T~6pd4Gm1o}b2PuM4buW|d<0RmEAKbW)^SVb*BIl3sWH)OVDf#Kqy8Kjp3CSq* z-eiHMlSr%P8msF68FI9zi9t`K*r36VtimS;Rz4|#o&Hs<&hIT8^{9rYSC_K7%q7q^ zu#~BB*G(_y$XY&}z+P#2(5#OUICgUZGyY=%(r0$TA5zCtyS30l_Xq29HyC}q9NE{e ztHn2VSr|0l2mB$OSzlPk4od%HzYiGDkq3QH^RzE!mYG50$y?0kydl0j&G}LLMCKdK zpH(+0OjI(az}M&5TkpPj`n)A)v8l3iQT1$-sv%Xm#o(`mOJYc57fdNBr$br>s9_RF z@|uSDiL)16S4ol4qx0-q&O@fQ?htGWjfR`Q-on_ADpd5%fHupVfE8zs11ZQ++lX2g zzDS-16j?&<t1sYw))Lm<_oJCbbx`}d5q|Qlug!{JRJ<WUb(do~(_{jTH*?0it_pgQ z<%;enjc~BopY!4D@NrZgDR^t+8%^%H>~Nz}zh==))0Uk*1(<u)iAwjpXHOheX<K+0 zE^RhMS#4#yWRs7ZhvXm}y2jMIAF#K_53{VfPBchg3BKj`r<@sqbi?*6=os6H%ICcB zT16b{_)mdn5u=%m3ukB#<j*)x7k7NO#>B0!VTVc#clphQfSC^XzJ96TQXP!>*ZPRB z_zoe&R2h}0c%uHXPUd$Z3BTG-gYK(S$p1z-{M5|AZ-e);yAB>CKU4`if4HFf_Tius zs6)*eaS%8}pLwQzhNGDdw95Jr<NsS!tJ;U;%N?1$_77&-WP?$vlW5?NGUykO4bL>% zp&*d&0`K&)(-y;Vwp$=Y^&P@{8LQZ2-MOssZ8_4HSbQxvh<@F%qe~VSSX+Ywp5RQQ zfRiCK=cK9EYx^Or>DL(g-G2$Nl^J-T{~(;vBTv8iN#&%fGR?T)1bx)4yiUB2rJ~$k z_G@(&ccE#}bK+cyDKmrvRoQ6zR2KRsJz&R%q>$;R0d(f;12*?dATGH52DTa~;;==N zP&!qY-*5H{2MzZ!9W!&xx%>i_zc9ja6VfP2R#r$`KM)tI&x9!D_fS&Do!oQkVe--# zoVL1Juo<03>2(vSaB?Vqc(w$N596F*{X5_*|BaoWHkL8#GpygKrOf|%1zrCo&*B^- znEeK2%A7WyTzizr;nEMWUPl%8&dkJ834h#F5CleVZnIl<O1STp6o2>SuwRm8)Rx!` za=Z4xu<OIIH9!NDX77Xd|8=+8P1A?!X_L|AW-;xMTnuVrC!G2A$Lm<U6JBi35eq6h zATBo_f7%AqQ~h4%w<{bEPUtUaW{vP#)Rcy0y$U!b-baXepn)jUhphj0vkNKZG<xd* zDv}?Iy^D5ud8xdIMTKKo>HE2GC+*V_$LXio$aOI=;6^cv4Lu;lm}TLWXPWT*>nJcg zU_mBNktNq<qKsAo7~GQ(oiFK=_P|z{*A|QZ2Lc7<VJqO|`v$OR%%gp$E3iFpDh^zf zO;#n^L<TFF_16Ma*OI_Uy5}`9J(yPg&I8F1b*ew^K++pOLHTTLb}V5y_N|_Z-?ij$ z*av-@Hb8^ju<^p!$5vQr9Rp+X{Fqy3y4X}9&n~JXU0S0<3&I14gTo*?IiD8n{Rrwk zTLs7eu0Vg~v3UEXiunG8K4mi<k~*GD_0tsa%cM}K=~AG#eFl-W%T2NR`4AFQp9-^5 z!eAvRpz2~3eA+yK%&gt$=L<u8UK)Y@&$WrQr>n)+e71UUradZ_4JPGh-&xqJBsMMB z8#_%BXytNc5XQW1-DA`$9*d0;njgG?!Wr81;Jp!@<~*{<%|?Q;t0ftqREKZb7FgpI z4Ts|$>0_h=#al_z;yLF9N2|f4Ki7e}{SxSg_GF?v{%G_d3;%n}dBI6u<n>_^Ox7=l zStDF%<h^UmXO9D0zQ-Lk{+RRb=5F}!y%MdRUWOt1?X1?xkYYx7G1c|e?0{uATkjB! zh555s$?S_@pY}w^Q+vyl#_Q5`bjO9eHo+W6Ej)2ohf@EuWxAfVyk8rP$4|@#bvNE4 zC=U^=J)f{^*OJk-^s7)L=hm9CW+-at{9{9(jANTLy>O*COo))og(+cnB-1yXgd>+> z;`!|?Y~6So`f4&tPw~dsR6}a`J&{gT%!lmR#d!A46*hR7GMzQ9WYOITI5ThpSSSyt z$uD#8!TLSICB-G|_OA&juRI-Q#~F~gA&AtRZn2^yYc#OykKbg%@$D^dDhTkU7mjMo zw$sT=F=jt}NaCztwJCfKVTsp9KNo-J8_+_>m!NaBk_MV*;+fZd+1YEenR3k^_&V(v zJCfYN@}`;66>E3gyL&48@m`tij>plkw;C|ysu_*`-owrx(4?ZiipY%psVGYcOm{wq zq2|MJR(lL?Xe?sBzq9GxgG!VuRpA-WKdt{#+?b@nI>9;Q6ljD8;;%M0{A=id)A_#B zop)0&DYe1f=EvgX??ISXd<|}2iWS=xKEvhJ!^My$TU3)AEJQ>GVaEj%?u9cV8P~D2 zqi_iBd|-}C%zSb6og{kj$N}{?RZ&9)&@-Q3P#!6dO8th@sYr8t95xaT%;H=S+y3~o zAxbRm_m;Ic>fzYgpT$R=*0?n}jb3y%fmLrkM0k|2n*pcUu<1o~b8EA><^F0IVA`L) zo^60f3wJ|WrwTRg_eH1qkC>!$7-}5o0G$U~aJ=6j?rt;0$+kz>i80N>2RVOCZ3x0v z_8kJ}*7MAm1Qu(#K)81Tj*!a2V<T;F%Y*`oaLN$=jrX8kE+uH3V8-)wS$OqoGW|_V zWCyAf*-YnNR$BN+oRlL)o;&UFWx+<a>y8vk*EB%WRw<g#BGG(KBzyI)Kun*li+-h% z=wTQ`bqn{xf9d0?@whRm4gqrcAVryW)8TCKQ`UWP6>D?e4_lVpV!hiHut9vncb^Z% z6NBw>cz2U<ZF>$bzL1Z-*RQeY5Cz)hQH1M;Z)BH#1+e&s+&zP3_|<A6EHl5wW}GR( zXytFvJ41p7OE{69VGvqH_h%~JnPjTH5n`T<$3LsG@tCFu^cm6)4=awc3EQK&yZeLi z*snhwuNh1G_jj-oGZkz;N3hIROGrD-{fF%?!s{WPls9q*$Q+iTq!|I!wxxhR<pt4X z!H4Y4+u^C)MJDl)?*QJ6pt%9d#hovYx2!hgesjZV%)D4pXfET9+<zxoV&*wEtlgWY zUwzHuEqsMD2@9ZcbS~S&Ig$c*zwJ-@%u3hga{r|Swn_hj$?{UT;aCJUZQ?8<p6L|c zPsX}}!7SrVtJh_tII6Tw#}BUkx!>Lk{U&T?=hi2a(amONzbIC`F)N>y9d$;R1TVVw z;1ekLajqoP!0$g}anXM%FimD1EFHi-t}5LBp;slkHWYAHy$1d;*TazMQskSzMhM~k z{4;n!9CA1wcHUpke$36o4*NZzE*FjdXIC<HX&HR5Spz2CyTuf~EP!1e!*K4rL^3Gb z2F1tt4rRz{VO`7}HY;L+u#7EW^)p?WWkfoLT-C;&PJwJTT2lDPY<%$D2D+R|g}n8z z#mhsUvS{sn(4jh-bA1NUhul%<vhq5+^)?<Ilk>1ANCF3cvm`H1H>i!2q<xKHG}P-R z>%ApF-fzxskMX6kbF(q2$Ak=jI-!(&1_|#w*$?k1OjLd^-0;81U<aVf+dsl{zQ--d zkVd5&5-9&?D4D<HJiMOU;xdB)v@c{7XRaJ(?hR$=v?PWmZ%!t)vofu>uKb1wi=kNZ z(+<aa%Wz(-AIp4pT*yqCA?%-`Ou>Ba_J{8fS~We%+awIPtSCf3y*@xE?9lmgv!H&} z0S>;H!=frn@knVBJucTEvGWpiwQgq9$A1%sXYFQ#*V@ymwD0VMaURL)7~;q0A0b`i z4_n)510619^vLk9u;T3w@blQh68l7;r9=Q7_4>dT%`~O?*_ODo!Ilkr{fDWZ|11{p zp2FAuC88sD4q&Aj8H{(Oxli=?J$?g3TX9c|0p}ChkEL6oUbMV0gtUH~g)4{6aAj-+ z+ugUACDr-iSt$?FUN?>`ee>zoKtR{KAK~7OVsS_T_ZR8EZ#~hTk9EG%{Jgsqyft+& zW4-~7-Wkh17xL6KQ<ok5_?cz!&&|s7>a?@a4y0C1#OQ^i(InIj7Ss%3r+<~w{pDF0 zW4eQR_jv}F_zXAmWDi@wcV1J}!tv~Y4-osToxfXevwghxb$xX`yYE@Sei>a9?mbP$ z4Qu*g)Ado<+V4Glxi<iB9aX}i0}n#`%On`~VFuhu3PGb;@uK<x18h{b6aU+!Lx)PH zvbpAniDzPIt|ssD8;_xbJmXgr${n3Yk1{LCY*fFw9n|lSp(k*RJwGCfi;96_wPnfg zLl(=(He&TSnf-8eMei+<l(KUed>xX^9$Z}w+DEp*JsnH9zH=bjyz+q;+#x-0NEk@Z zV-OnWPkYAuQhew|2)Y_ieJ3@G55E{4GuoR$;RmH@CwHNg7x**hPa0&aU?&zuY=-NV zAy}YXK^rtyvawS&#j{^B@ug8IbZcpn>D3PLc3dd4S!9I=Z@gpCoMCrW&k;0wV{xa$ z6uNM#mftmAGP`@;P#(^`r;U^Fm4PjeMrEA1h_jW7m$qJAvWE>TFT-bB!|;=Jf7Gr^ zM*H3FIPqo{J5w7hG%WIE!IFI`fB#l6-mD83zG#X)YwR&k&6*mzu7G6REyxcm!#|I6 z&_`_}lgYfq4tyuD`&34SGnMFHTNsla6;1zrPC&WVK#YH^OwD|MDOsP%vXu*H_7f=} zhb>U$`bQY-AxT;B+`UoCe?DzA&o%hd%LNgXu;B`fNOZwH=e(h{ypx4k)I-n@Z+uug zimBO-6k5V<(Q~^u(<yQUx13Vm8%slHsh1FyU&H#GE~e?>uGn%RjcSG!)2fBhf^_e4 z<|rA7j?@fB|A}y8t|G?$&O%w8BAVUfOPgIB1?@x+oLuK7^!N0naiz{UFVUJN+{wnt zzxv}`DS5nhaueHn+Lkg`adzmFc#``y2JWj4!k3RTXu`IWAmbj6))tYZ9~gosMy!J| zb4JpkP2+^E`rRP?>mHaK<(*UJPe09Fs5Chf?yfrm-Log)wi<ml&9D%+G?~*uc;>b3 z&t-O^+@9=Hwy_7l14;3Y3NeilAj3a1Gj7Y^AgigE{pNa0rjj>?ZT`w8L>8k4-+T9Y zQcM|<dZ;qh8bikCF{kQCTGv^_OnUurJaM07^CFn=Y93g$O~L*LE3v%A3j^N&<aY`a z%<Y~;d!DX>y5oz5HGP}d(`^Ab?|Ueo&_B+)5(d*0o|EdHJVWTc=)t{9QTWBlnS8G0 zK}diCtw=w|zUQlYZ9Nqt-ZMB2vaORzI%FfuGOiQOtRH}9c`q<McL`hGWrM{Z8(EBv z6<xo_`%&6C_;gkWJJGL<;{H~%t()V;_phdq&tO}cm^++Q7utz(dxM#K@mYw>P-Dw9 zf+=;bD#-?n!W9>Xu@zD=6!|8VJoSq49@{KfB$!a1%t}x+oFbS^l4a|hlWD{Z&gWZa zidt$elpS!C{io`RYpZ@hwY4P8OA=YHdKLDW6AC$FOi1f^G7jBj4Z(wNfmil?(RPE# zK02&xO%8er_VfDUarII<w%8AoW+u?g$ap&BqzN-t-)GMin?O%JpE@0;vzLd{X~8Zh zIN54OdC#`8cb8?!#NLD~cV$qO^?EiuJOTGuXuzLo=Y+eb$KvJ-dSFrb9URQ+Mc4RF zX0lG8!IzC7B*mA?I+QU$FN}NiYCMA{6;o(l4rOr9z^VyTn6Zg8mTINYjr>nAjkELf z(-hdq1$9s~J{+At|Ax+bdpsu}ix!`H*|#NOUgwS`h}BbfvfljT;=AE-V)GXTF!?te zugLFYZRZ<ddB!oe?%Pi>>t`dxe{5%mzio$2YWg^YyX^dCpJunNUSlrYZ<F4tiZQk3 zn4fZs{afxqYOXJ#-K0PC`_K==4>!TrvkGKTt3b>5m15I~0pcY03MyT{n0<Ua2Gx7b zaIVgI*bu&z>3=SQ3u)$HyWNTAtahjFfM<|g*q^`iV@PwQ4|}Dzf_H?IIJ##zYps}w zi7`{j`_o4#*;t4_-)w{vin8Q<^P;%I?j<v?7)NVo`mzl!3Ane~f$l8)$RbV*#VP&! z;mF=sFxs$)CJsoU?a!tO5g(M<2t`Zu&g_FPm+-&KDF~GyhxK$ylfl_Z^l~=OvHUfo zDsMeH<vI;s7KP#$S<Wo_CrS0o6={4&2Xnh7kmUEnY^+N*9Z`J1CfER3EXfs4HYMYz zt7W({c`8o-T}qE$w~0^X2*O_nV72E$xToC@{cg`=FZ!&5gz<Zss-!zTzv4@KT==X~ z-VR6YHzC))%+sk!9k%o<z}l+KVynF^b-r@KQmYs`+eZ=pv{&NAS;p+$_bc#qlL?MJ z;fQzZtC(u~OyN;Q9@hFyq6^QTu@LUQjUS<bt&_r8!I10VvGW<Ulr4dG{*&SUCqK_^ z*Ya7!Y<_0sjInJxEo^hOBF0|!#4yQ7`1hhgIG>`8@++s3Ru})BoF%w!`yyC6KAFxu zzX+B)+{tOtOISH>24szNghi{%Y23M;EoYbBWSZ`Z^q-jx$@yJmJ6qgww3!?Z4>aLE zLNC@w-H`52iloE-w_yJF58~Yo1vq4I95at42$1Cr_TKTdTDs`)o7i&twakLnJga7A zh7z=Yc{~<)tHHg7UEpRGO|K4ih!^JmhTtA4f|rx<pQ<F^pCseEZRyz4UWJYQH+uHW ztfczhLUjIfMI3$5n%ym|1t&gl4we2To_?r}2bJ}yck5wsLai*V65Z&=NGZ&AoJjry zv{>&ldrFJ+p_x55S(RoNn|Nq683%e&s#XIW?+VAJd4BkKO(`}cjDy`bn_<22O0Ns! z{e|SfG~Br|mX>e$EVT7hvY?@tSojfrytRC^P#HS~5<DibmjSbcb4h!Il>@i3k>kv0 z`>stwy3t1%62-Y8F1Dzk7l{`3QDRwZ27Y%4z#$8UP|1jLrZ+zTK2MIoGu!WZ4LjF| zd|Efay%nY~qSJ+hkY={5`ydRQ+5mb9a~b)2QFa+;1kG6`X1aut%9AB*AwMgqneJs5 z+a^-`^uuhrwh5Cmst1d2lbHQY0Wl?r@0YA8@?$m2D0<0CpAMxA14r2YsmCi9{phrn z8;&rJqFE0&LvGJx)&muEahMay)%Rf!H#t!I;zIm=DG+~aNb|0I6)Mh}17knA;o|=c z$;3h$Tm}XRv7DD+6O_QbJ`JL*DJNS!<svaZW)nC`45vO5bx1#?M?5eym~GXV*&2L5 zngWkYK+vsBxH?szuEzz@uW}#s>*SetD}L9VHx38FK6p{+jn*v(m`;cs&i~ZPPW@io z(!F*c$X8^Ff!%Iow)y}Yw?CaaI@{p?U85+z!wEa>O^V`BM#hg>#Y+iN*`z^zltz(h z)o)hR#dnuS;;?eT7#O^rGnzTq5WWY~tBDs{Lb|8W;2ERnw&?(9KH&;hJimH4JfG$_ z9A^sLOWHg76CB!WB~*<1#!}synbF+SEXiR!9h;zwnlDDvmvU#Uemxs*_XN^0w_ogl zgCs6HFGIsloMaN8G%0X*JM1&6Vl}<a6r~)5TCenJ==U0SEH;$#1}4#7#ZWx!JP_kQ z$Wpn6Ee*MnK&=leDY|Pmn<7kunYw1M%Q+HNs{z6i&oZ}%i1X%IlaF2rHvg`Jt}|im zL4+pOj+4W^T31;6-Ars4cTSWI>W9-7w2HSwENQWm6>9%yOdh@)1)Epd=pA#69XF1p zkJnN$;8rg?f4PF@@64yx-vi<C%|v><H;L>e<0(IK2K>GJ8?xqv(gw@@@X9Ei=6i@< zKb1}~zNaI_d)j2<_*PVEP=JPtGi=Ei?mQZ}3;YTip~Y!7*rv=8ayneFEKdVp$MLz~ zL?;%R_#0-oylze3x`Z=?{V1oF^Fcaq!T@s<?0M@zTgE%Gao09_jWEcA%Z;9(?RtTQ zdd-Dtt{$NHU;<i<Y+w>y1JF8bFB}?;Y;k=&p7&Isb@khrvdwNmtTu&1EtNQL&tq2A za1WeaT*b-t<8fC0I4Wt=V>;nF+$la9lq43haD_GEHGaPj_ZyD!7Yf*MpEYdw2|aTE zID(F8pJsPgDN?e^Qg-82B<&rPL6b-Lq4!npf?Kvk{MbHC2#)bU`l(D~4yLliJ$`gz z-3mAx;erQVmO)pnCVLc}OO1V(F}tcFs7`lh8PTrTeqa}Ojas3M#r)Q^^G?X(N22^h z2TI%*h6jI4pxo06SW#7hR4|1W?7Ze>oiqm9_dH<h;+vu3KSR`MDy8Vq87%CDGMSbA zWve?ln{VY%3cnkRna#c^J8P9FyQ4;kE3#%U^LtsDw-(;%6fnp4J^RiX%#OFYKWU;b zn%^|Q?77=ubFvfrptV@IQ#6LA>aT@<>W_rwPTV1L<0Cu2<qpJLnG406XF=DT6HL|7 zj->O=s43MEeSaxaqLK}MT_;T*nVMw$BN`WsOC_Pd6JF`fz{vSLTQ0K)md|hTx~Zms zjhipAl<e1HAG3ILUy_Flo@P<kzzJACyaGmcsnG1GCr~^dDPhuE2sx@xE>^c#-+g1y zitotR@s51xp+7L-;s^H3C)F$M>w0z}con;NS%iCET+ng~py{0Z;<uXtIO698HtvK3 zDn5+E&ysw%`aBYkNsNPYkcD<fB5+#MRJt@aoK!AVF;nf~xOd8XagpH2=YGdQ^pL|_ zJJoUD-6p0tR-2j>OHiwABxgYw(U{j?*&FvnZ1cJ7r8n;#)b<vj#1`(Ya9q{u`0gvj zf3+a%8+oW^Jx-YO!2=fuMWOv{cQ2J~GUA$JGFW-aftuY~VBsM}Y(C~fzk?G=yW0UQ z9HnUa^-*;Cahv#7_J?q2&o|IcFvm%8BbaVSG`9UyhfVEX+;L&SBA&%i?fEpCR#Ydp z=_bRg@;+kQjULDh{stH2AB(w`pIB8j=dvExq>RQaAv07TVpbH=S)QG*eKZS(bPvPq zTz$NK(FC>mziDDyH-jru7&N>urW(4!$8-7kw#63;Vsofy%_z(Q9qzn}!r8T1D9IVI zPMs%PPnCs{=_sBn*LI>;*9YRYCMz~0_mVh`?_X~5d7)2u9Pd+(Wpjnq?4_m&S?q8C zyG^c`mB{C@2c)pfOM&vk8^ruEZrIW%n+@hW3WMXsUgWyqMZagv=5Svu+4BdA(twmD zT~I4p1FJ_O{@0;Ei=_9kd0xGsZ)A-Dg_3wa=sg%W$J2jFt?bLzV8OyYlRb)Xf&L8{ z>}zNcnT{+#v(`L1S@V(YFg*=Yc`w9#UrW+ys~6`kNx=c=gRkq|a9C~*e*2`yQl9b* z?zO?RrPms_?HNNmqjw8YhaBjY(IvRJaT5fLe#&&M6|l&uln%NppbgIj>{%%z9BrHp zj~YVBa9KOVr-ZWRhI$x1wO6dw*$dm14sw=GwQyyX1AbHe2<_A3=&kW<Hi2ovBZ&dj z888}j4|j`lO>4Z?`Z<&TzDRs=PY#1+vT(lS29Q(8fMGpIx{sCdT5}3@-v?pnwRs@6 z{}I^If3SYULfCd7k+M7k@!!AYaNDAUuBiFb<C;oVlH4D=4qk<Im;VW~EmD#7#Nn~V z680kJppd%H5}ju;fwR>?B7)D`=g3gk_h>4=S%DfqRncd4GyFN3O$~}JxF*_$GyM$7 zbw?Z-c7%aU{$w0vxR|qW{m`Sp8eQ**puoLSXD0D;qT~;u;-)(ci;BdBe~VGSAp;ew z^{MH*r`S=noZWXT!`>k^FmvQk%FB4yYEm<k-TWi6sTUWri+lcp$%ez?g*<`hfZN%K z{A4f79q-w)(c6XB{IjNg;s9$hpDpGa%w{pN8km^0l_~Q(PSp38;+G$4bjqlUr8OJF zvflxuutN_AWlV-kvbI!yWhrN%Xwl#FffyP75b(}Qh*9olgY%Pxt*(h^m!F2)EdA-s z!59crccAgZ2hx2qfb+vA;5fVQ@YSa;ZP1D)*&8NkJuMe^JRF0!?rewopHpdVUlp9+ z>w#SxQ^@gnAw<MIViy_$g>J1u%z~evjH9$sTJjydIB}GjtMVBGb7bwyC*ZUfeQD~_ zLa1DskDdS2czz<4jy%?-+VGpOXqPq(^OnaAnO7n4ekk>sX%BucL$PM)B>KB-6@-kQ zBbKd`#fYN?bpKf~9#gvw8GiSfkJd_fx2cMi9<E|Julr(Il|41J-4r&O$z!e84v341 z61QF{CpXPc!kqO96mz8%M}Kr89rNFU#X38B_Pm5b=4jKxqC@cWkpq}7vBW^XVKhbk zD2SaVWOrDKuAR}K0a@Dk@xfilu8n~MHe1-<Lp#|Ww@`R}N`*7aBry8-I9j+j2X}1= zgXrS3OgT1@UgwX4K*0g3{_BBJTP`u#+J5-yo;hBb!k?MU_e|C!ijGSqQT^s>p}%eo zQ_f4KC(SXm)b#;KE}BY4x7Lcks%|i29o_|7OHkIYhz`#C=GCDk$3ixYpntno2)>&* zYxmX<kSp@TZ=QeQjI9S#+CPv|7rz1cVaJ_Hsbml-L!TZMqOfifU5V%U?=KN#)f7gX zPwj1Ku+7777S0rZ;|q*`Y1-=eDGUb{oCDQ0<+N5K1kI|Y>Ff2e?BB}Yt!4H<VQuvm zAx?T9<mp8TQ@$0j6PbI(4G+pVPgaS>*^b7LFwR_^-p#W2UH3BAO$6L=6BNyUL0zId zJ2xi^UB<40H8U#cz-`ugDeayRbY~K^e;7=43fy5ppW$cW3s}AD7A%@E7^B`6vXme1 znN`Ajv3>gvvEZI6l=K;btL;3=?cOg|biPV7;OsBYcxg7~?`qhtlZCgNU9su(0(h7; zg${21EI_9WJl)KDK)GX4sX`MM=ZzzY2a5dc5rF;9rqGXiC+=1IC~msk0lQ4zGd<01 z5U{saFq2WHxkd8y?)nktp*IR&4xd6bnj>)BvklD6wg=9bZ(*I`X~OQVbnIt57?dXT zWi@|g>2&B&j7vQLpSj<)_)h|+>5ZhlcIJFu&;Wb#bJ28CIL%rdM5*(e*orG=XpoXb zMaH^pwP}B>y#5HnqlV#YV|TWFf&%6GZ4|Fu_Jeod8`;&KFp@bPkDCW5(~j#2Y<rR? zez`G;bb>PJ$FdX5%+eQ62Z~<t^QzEo#V8ydunR8Pa6b2$_iW^M1L~R9BMe0kes8!1 zzr>Tk;*;>!*ki)trvpi24tMQTDGCoydf={Q)~#per&Ce22kv;wnZG5Lpg3R(cS#Py z7y38Yns+0}bJIex(KwVE`-M|m#xGd&cr0p3k7BRyXVJZS4>a`NDQI*|AVWt(*mB|p z*lXkgq<n$UO`N%{qC$gozrx#~RGfV-7iRFxtIkb-p5N0W|J}1%dNp#{4yTD|amtAb z&MDE7k&$Gu$_EP^Y)NsyJxT`e0m1IMpgf7QG*77E?+6D<-sB)^1&^ZD{g*>v%e>a5 zg^pOG8bOr?8`%z5FPI}UfSxQm3F|$%gU04Gv{m}yMw7mDtwEDEe|!R8G~96J{S#0g z`kQ?@IvgN(4E3wZ$3tZ`Fg#`j`!-C3k&BMNO=CT3`CLU;V#ZTi+9tT<o#FL8ZX&cx zDX=~@!^v!+A3Hxp4fAzmsq%3aMTr%(|9T`WyikN1{x?{_>SE^9(*j%Nbi}S<nW!Ez zSyVX`20kez?D(l*x}Bv<lht*O9W6*eQNtB=zx)JwWsm<+bl&k?zFi#8&PZftwFp^7 zMSSn;5F#N(MK;+DGBewyG-yw4rJ_Uw_c@K9v}kE-(@+|kw4Uqvzy7G#_jTRZb<XGW ze$%*+bxtf(3i4Lz;4aBWe0?fQymUO7O83};|BQjODB~t;IC_xv<$1u7dU3G4W-54f zjfC3U)1br00ji7n{duf8Oj)^_jk3vt7fYtWJJ}{SXr&^BU+Tkbhe?BrT>!Mj#?xUV z9aMW1NxAhBIC|@#Fzr$R``qIVwptC$^Jy>0TboU$mS5P@EGc#|Q47BPxWMFoI)Iw= z9Z|CRs<=?vnD&P*5EpY9vAIq!_|-ie?beS%&xsLqV<P8uNQyAlxm?uJoJ{i+P70Yy z%22wxFKpc!2_w6x-dH7;^-DLU8mFyn_s<D9KK}=sE@j2se>k(ZgXYohE&7~N-pv{x z?!jTlMzPJhnRuk@KCZc2DArX;*sEg)(T(RS`u%+`VlsC>?U+{o@yI}Ej?=0=+o=xQ zo6TTikO~c0!+F4Wx`YAw?<7`=W-zbFhIhGi(aW5H)J`?p${C`%Q$CAJHs-*swVPPS zhi<0tXGi(&p%Ato@oLvlyr#!pDf8Z-|7tgSckT}^t?5s*jQWy4=TIJb@SAN-&Sd3x z++e}LEUNqd7WWSFrU!Cf&~&g{I5kv_O$!ky$=4O47A<8uLo?XX{A}i06AvYe`v|MM zUZKnSIZ*I&6SEqmMd8`cS!G5jQ|L~Bo?$ZZfqRkfS@PLI?G!kz!v{|hvq53+G+fL3 zdaBxy;5xyMBzK-lzW=oaZ~l(|#Al!MB^qb<tHC#)@8XWh$?U=5Y<MpyviR0Xu<^PU z<Qr$vW$Os0dUm0xtRG9ua(|+o`gnR;%6*?a`xCMEKm5E<4y@El#j=KKj7!dA@{WTk z_1{jWWf1^o8>3-LQwR3xcME^V=hD6<Ic(3~6xPSGm3?sN4S#r-;Ckmav2Ob!{Q6WE zK89_>1qY&GYe}uZR=4r|L>qpKE&%h&Hk^dKy?fV>>byef(a-fvNy=XgANiUs-|h?- zHm}0Ao_p-tU=7Ouvkmu0zGks@uW-YwBzR+FLvQXIQM1BV{2b&aKEC2XTNb?#_XU4p z26I(Ny-tP&)J#WobS0+&8T7h!pLpfY9J;;5k#0TBqQcz??0u0oEx!H)-%Or}wvz`y zMt&I8d@97z-Cb;XiVr1i&7~O+oM_;e-^fe{gHn+*c)tu|J4?gpienP=|4$F}_N>Iv zn;!f=rvlxwi!d<npm6naHfb%M%d?iw5Zpe;sd7#So|C;MUe8mfDGBZDop~>MdUF{4 z`g&4a`Xd>%co%DTT`Z*r?_~4OKM|{c`qLQMBr@yYCb-Di;^x{!y41Is&wp|y_La-g z*W(|1U0}-ROS1IUxtbl|EYt5nuf$E?xsNoug<V@@2oGk-!-JN1c*35rk;Ww~pkJ$? zYh?-iPKFBu%OwAm3-oi19bC>)f}oAQcx*&4#4iu0fXHDGseFemwC@m-%&*`}&oek} z(OA;L2DWeJOz3SpOt9L&fF(~ECUi|WiJc}RS%p*&?y)kUp63;eE#TkU)yvqa(mJ77 zeS=_cxmU1w?m=;9zN3EHF$~@C((%UlC2ZZ%bz)IuFi2GfL-ZCqNO<f3YxTY9ppPt6 z9hyvYO*--4zAz9{3`yZnKmN0?LdUIj_1}73Vc8ecSoW@PiZz#}v--NU2CQg^Y<InG z&j7k=vr;m8<tok;bff^KE3ABLDi{T8z>54`Oe#5-R+w6_!>jrNJsnEk_50Y(HSbyE zi@w-wAq9GlD{;))#oYff9^UOS!MtdDvOFitit1!&^TJo`%_Mb5mog-Kod%W`=m7^4 zZOHHLIyPBSCyLRNa9r0!e6rcM{#msX1h_sEC1cB&#>CIKQ>m3b<erEfv+BepTk|1u z-)wsQ>8bF@=@#bC8A*c+q^U8SGeKQn)egQrj%_QO4d%zY&`rA)_jA5kc)PCTTEA1c zc$))R`HY1ZYYK&hIbE!HZZ3V>Q^Uq5tii9JJRsG7b$#KEy;$?qjm;YMi>)~p4XtmT zVA0{epl;&^FYz{`fsEV_Jz-0p1XHd>t+051oDh5d64Ocy0At*M@lzFO<h~T1AGd(Z zve((sm{{s_#+#K6P6U?^@0pa@VUCkNi+>059^Otvp38d122>TJ-@7T4VKWk(ZVrav zJV)vbdV|wvt`y#Ey3U^K_~NS9(X@Vh3ybt^#~0H7SWj3Az53C>+6<4f{{AWAb!Tt5 zvV9Ky_DR9=>G|ACFq6cEi8Ng`4pisOBJWIf$bX*$ZZrF_M4#F4bk}nc4pfPg?9=GL zr$M+<S%wOwqd=qTH@l`h3DZ}OB1NOmxL$iW)t2`s!;l=RKWhdRYKHXT?_=f?xQZPr z=H5VGQ?jiyf}1una3XmPi%n_9q8EC2*S8;i>M@6^ec9Z(H=SPn`iZp_i<pU`DXiu9 zj2*QzB%RS-^k%^s_G8az+!wxBI2P5$UOF_3!P_UY27AuGE94B1pb|F1EtspjjcEBV z1xl1!!5r(=K+X2KAYbMQMlyD+?`#_ud$$cY(;K$s=q2p-?gvkrmBFx3iMj_2rg2q) z@cr8;rYE0BCwSs;7Uxks;%uaQ{p)eq1RW-~S(oLtu3%mAZlJlbKdk!oR&;vmKq<4O z*cs)C5Olbm>AtXMqi6b~&z3In{VNqV<Uk;dIP)7l?QLM$DkG?0UV>iB>d?MJokB-y z)<rII0=qgJSXH7(=YM3@m;AA!pY^=h^ZpY%STK~-e><|strD0wGl{fM9k174wv|Ol z%i!J(e)Ko54^(t(N@}NsfqUX4@)DCcTN81`1t}bRO#vE%d7dgJ8@^tuW-={Z*h|_K zN-XqXT4yesP|Q1%{{~Ryks*}1a5A)h_M`S7Su(#cj`9^<K==CrVeR2R^+hF_<of$N zUR_rJuT_S?)8;?I7(U}G+#5%2{q(?M(-KjgyCMc{wuT>bW|8&mjY7c{Iq0-Kjt|fA zJ3-xEKIi=|cn`>B19|rGbqwEKyf>l?jr&nSi#ygc{t0)U8>8#hC<-262+oJB!O+5! z^!Ys=zU;+^Q~uzURqNPd&ZN1~7)t9~EJ-Rkff{7Bc;7Jw{8DzYE8m8R+1)y{byGU~ z)s+tqc7#LU=s0SB7>N~@(j>|Gj<3>mgqZREa96b)pYtro{3+vTy4y&I?&iJlC|{^q zp#kIB6c+P(uV~DBt_sPkYty97aOFi+D&*Oiu&)X1k%1q`axOrtXw2?p1i+YL?g!|W zp;T!{cJ9bVR(6Na`-;-=@n$V{F5Ht+FAk=A1!gcaj?bQlmxzO$(}a(PUi2wFhLXHx zQR#Rvr8aCqBgaIDn$%a6T5EvoPdU+={^N1$jmzS?3IU$B$-!NXQ&@S(0?sW1cx{o2 zx9#87ADqf4XPF9>HZ5XCx@m05v}tT}c`i)s{exYx{~?&?>CuOpS<q<Boe6DrbnQ<8 zoy|KeG5Ju)ER}7kyS@c)JPzTW_-Tx#O$5z~=eVXJjYd9;qqpn+GNXg9*q|AK&=S!h z7WY%93kN$SpExt_;E5@mp*RrSY<1{3XOWD!vqLPtJ{w~fsKFJ-F2@ePF)(ez?OLCn zRHiKPp-<biNv-%Vo?Ryc4uedn{hKQM<t%(p&m(xi@)(w12_r2&JM^LToM)_xCENF~ zC|AT;Lr0;7bR^B9O}OR6TGm`Q4sAYFvQKl)h`QI-Vd~R<Lf%-;?C_6=jUj(ou1fO% z`whabrrYAE`~Gw|tr9Db2eLus9@v#yS06nr7)^Lb+ZQ7_f2J2?{<a5S+j;Plzk`g7 zyM;%3AuxQFH?7anC(XCcDA#8Oj9-=p>t-9k=hF@7|04m)b@QkfXHw~xCNj0p6)5GW zN^#RdVbi9U*mtfVG5s(Lj2(}P>Q{^LO#|=HY)GiPux&qcT6J9vDEyAkP4c;K)`^9E zKZHld`wP1pbU>%@kzi(_P3w(TGs{mrQ?Flv``-+Ngsm+Yc+?x5YeLEPnj_`a=8Nld zuW&}kFuo63%H9X(!eZBTxWIQmzL*_GE214}Lsb}59qk1REi~zDZ)bWu*<H}ux0fv_ z9LUz`e#GI6hV#t9Dr{IiiCs}sr$44~r0-UU!)YVx&V9i4ZW_tf_bg?3IwyrSl1|n$ zvx$AK9S*YbA6Y=v5W1sdNkh^`!rgu&XdM4NZ6B5iBZl}fEm;HFdOlO&=?MCAM4%j< zAy}303rly3Xs=x)gvQ)r&iehyM5Px0Cgi|CIK^&W)`!3VU+#Y#3l=Vsv?lz#Xx0-9 zw=ey~`s?!K&iUu|c4vh}D-CGL9Y?$<EeMr2`M1&693IA~z_O<qwEt-t>oZ@#n^scv zx0f<Rh;v}$RUazadb)mfR2g$kxsOxb&EZJ59`>qGqGK;=(D+S%;mcnwYCGSJqbGi2 ztA=jCUr!CdsV$Cnt<0iR7y97}-w0YTKNvoy`BF*cSaJ#LO(k+GguyB;=nycPihgXv zrlK%3|0d58ItIYOigzr4?~6^c-{Kz5sx!H3%v3*EunRnQ)6-9tdR1sZ%D^P{+tCAT zo0f{>Z_TDzlT%pxrA+vmn}#FiZ^2uNj#O2b0#jrhXc&LLt~=(7`)4}Pp7SP@Gr^6L z{N$*`Ih#gUm_UfZP5e?LgNbhq#CDTg;=IkHDZy+s)n1SxOU){=!J!$IH$D^{Z*0Jy z7Ne==KrcF=VncdkeJLh|!K~8Rw7y~;lf>T?M#Z~fx4M$>Y<Z99|00+EbWfr`3yN6( z(OG!cQdR)fGpxzhna1q|=w9STUzC+F=5qr5aLtD6`!vylmt60v4}g&>dO~tUBwF3G zfYvFA<Z7f3rVR#=aC{n82ldqEyXeEYa~YK7oD9z->#*0M{b%%IL+MqPDf7)71AVsW zQQCj$La*>pn%;jVUGWZ~bN@|3&qN2XSs|h8uT#M0Q31_ub0+-7*pvBga8Ya#Vn=X4 zQ!jJcWN=)fXf;>(Sgr<fJDwtS@{IcdCu~^c2zHz7sQkhmHn#g_eMYARz3iO_vWpvp z9;w+7r@d1$Yu6xp$T_et;&<YC_sKwe%-F{6gCZNB37^_0!a(I=bZmnnObky0Vg4(2 zzdc?sp8HDj;$}0Gsg?z|wGzpvni+KWeGENR3dS)yS<pGMQ~aj!fw}w`L?5_g)7{e& z4GI>b*uGc{+s@B^vnTAw&{TN1u`d)oNn{^YW2xWnSh_NAwU~JOGMoMC7k=Hvxfy;n z7&#&j8eB|Z+(8#eI6Re|^4`m{J2~`Bbq0)*7{h{(c1+1E9pp`Gal@Jjy3KdrvL&kG zP`=Ymv^&K{PBX`-+A#JaJp~3fXQBMBF8o-xO}tR8Ef{?tPZouo51qv`%Qjlnzx5yc zd}y8cv+W7{J~@C=v>)J8i#XU~F$n%Po?}5HN79?MHZa$tKS)XC(x+X|Sw%$vsHv9; zVc98c=&4+qxzz<<m+8Vn{YbcF5lrDfXVATS#e&A$I^nvb9b-~*IP2Lv@$6YYxX16m zNv*c<>wJ^I5v90D{T}W*ohDl4L_n<U2zo824NhK5uut(9R`G?;80Czhp>a8SPw*4^ z#6`pA6A7%NSCr`TIgRGreOPa;5d@xXe9yb&m}ppb5C8BStX<Dpy!g2U+n453R^1#L zHbRkp7rN7U&REr*q(J9qF|7Brq_E46SzY5K`giXrTPLT?I;VYLtG%5_-o=qUsD6Y! zB|Gr;G))|{xenJ$xlrlV7s7>+n%pzo3zKdsh?T#zq2-ngR4?v8;oU^?O1FXIT1D(k zpHP5x!>H`ADn!<2gWTF-6u7Mq9BNU5!bkp)8cTfsqeKzo0rq)+k=#=YW9^fCV5a(A zarMBXY?<9bR%nz9@0B<Q|C|x^tYXX~WGbn~ZDjAgKH$?^S5bec0VUmvU>}S4_xboJ zlrj4#y6+03g}zO7Xl+mKi+*8M%~e5R41?c&a;P=7Ksa)qGpElSVT<Yt2+H-TYTokt z>Y8+{+o1zTF2+Nz1+T=REh_xa>ervNynz-suCOCc3t4cK0j2GpKv(abWvZRi;cJu= zq~{sagUJ?X6{aDH`k_NFw(EeyE(amD=r@yDIt@HcP73SghoU&H7{`Cg<=HOopo!|> zUW@`9Y~n+UI|ob3ho?jCfBV?|v0=2fJd}c>A|%U>nbWW)3A<hJ7CqyvX<VQh>mM5o zbt4bqjFU=~c0v*AMN87=-Q1oXy~*QDHEXMS$_9IwkcHDq!LEG>^SN&fO&1c``Ogc{ zC#6!-$J17{vhjh)ib7U9EuGFQJ+6CFC?R))AR0A&EFHIa#5#D+t$^>tp3WNpUlc>= z(ER&?)}uc3<Jd-a_Ye2g9_vKQaq@K6YckxDvZBS$UrY8V#*y@1ck%qfKr-GJ4GN!* zi``4B@YSpdaIw*zddaO}N#h(KOZ9Jkza6&dd|w+htuLT7zhnN>-Xgj#lyNE<ZUjdI zCX$CwAc340{m7p|Uo<AeLQ50QnH^5=s`BZg&t?{Kxt8Sy@)_L^TRd#}7Pqh0rUPZF z)T%oMbhyXZ;#Yqfwy{=R@#L>C>(m9jv~elhkf|vuWj~gz%@_pI{+9Il#yPxm$Btf? z4kI1+tE_8M5Ij4?xgzrX?{kM|V%|9V{-=g#L$u-NtZC%)-~wJxe}u0WPlDEGsx;^3 zJSt7g#-YLGc-UQ6ys;>SradTSAyLC9pq+Q)b)K=<w0@G^cl{(TSj!%H`q0hd9<={- z0T-*bFw5~vgoUGwh?=)?*T)Fh-%EpxYXI72>cI*JZ&(uXjy)Ws3QA|Vlj0M1n`q60 zK8o&i&d-OMYiwx4gooJobd1yXQqEIdrh(3(3gV@%?M_*Drs1^N+{G}?4J;GPY3>>y zAk|DNXu8cae(F?w;*mJ#U<M8P8%-6S&QM@ph?;SURM@OSVtq8O`*u=@2)$BY(XC1o zazBgBZo23Y{s3=#2Gu{(XS7LLU)(!rCa5*Zv6p?5K+-Y=m&+p_eV)OBZgbbs`#Zu^ zJKiPyk}N8v4u|}@!>sq+7V)<(?^Qg|0sreslwP_8rT?a|H43TZUC<wlOGlETOCYto zmx%2<HlpP^1$fPK%c)-Rtm}0dTiaqxYYWRAn{6yeD6B*|dEP949|SX6RUu|V4O2pS zK0i6iyA?B;TfPK5PV8kZlcM0<*5~-jxkj|xsZP$VqiJ{14K`WsEi*Fc4|f#}p?{Pj z%@`ZXC@>qcUn${%kyB~!*wa{=JqGp<7)LRc;ZU-C1pM?;pm1w-627#vK>sb0cdoAm zmk-M9*yMo(1Eew2Mj7+NpECKLk<bwEmAk!Ou%f=#>z=Pu2bKRkX@Pb>xOd2e4!rPV zm-jH(Cu;_`OM`Iz0(S^}$NA3dG^mJYEBAZ-M46Anxl{9=B&~NM1x{(eKWRQNHfAu5 z)rbLyOMO7uSpv^CO`@y>1~w10$kccu6$~!HJ+Cz&?bHMODwQQHfwO3>vJ&Og-N?Wq z29mZ6ft&rLgpZHr(U+GRVD`&_u3lco!Us07#-kz3;qNH?wJL!o9~uEWq{c&nX0e#u z$lV!^Tk+~s16IAm7Mcb>MiuQWvfchdOmA`!qNZt4>Zn>&YpSjL<$Z%a4?Bvz)V~OS zhVVYpk94r%&Qa@)d35Gt6ZRW>4Zp3GW#=5{LCzyXm^}R&F4>(y5u^3!;|LjOEejz# zgTZj%!UTFSEspnHW}(mUDB5SheRVFzq#H34b-!xUGT*aU%{#B_H`%h>N8j0uUaDgF zRL+{xuH)GX7g`W9l3GlM!TUNXirsI(=L0k0lXpD$2F_=u#=iJta2n)#*x{?cuJn1F zKLjdk!`Tb+<dbg9XAIm|B2PSPFbgK04~P3d|6`et%-P@2NigM!rtsggKy2D&K@+3R z=;faNsC6X)&G?>ixc?CLV(kbrcrg@;4-O&UwTs28GBNbCzYl7b4TLp;z2QjX2NrI^ zojPXx4%^$2#Re8o*bZ$-KS}uHnKo^S=neN?$_c&Z?5a0&;P-)}FNDXBBgw$U3|9Vi zCP%Z$uw37ks#k;yI~|9?!JX^yes&2yPaZ`pPYk66DGIP;y)i2~F$K~VdcjzEA3l?o zX1j(Cf&FU@=;B)i!lN;eBJ4#el~8)}=Q+EzayNT>Y&zIQwy_tNl4#-M0L*^;1pn#( ztY^18>L2~~zyUqCS?5ee>XbQ(MN$h{*au&zei%gG-zU=VbAc2X6wDUol{oonufaLa z+Vsb)8XaZ_;`7&z;=vVtq1Z|W3h%uaaw|P($-AABu646${YnYEpI0dS+Z@mLQeRR3 zi2|su`6~|maTX_(O$N6oZRi;8$@dvrRMNn`jbrU`;<8Udnp-H8wrKMVmJF$`k(HFq zf6R=_D>3)k7c3^;Bhns^W%J)N_Z<On?rJt!E%;flS>Op~X36v<Z!qky;7qe85pY%M z6Wja8OPp-l!#p}?^DI^_?SG!YMvqy_{x*8CYWIyo%A9m~{#A?4Hyp+F{Hzlas#*T% zAFQTYfDN(fu(iG$|6ET3=fD;0=AKVX$47^`u-P=|o-Ca`83itE7S&F*6o%ylLVKz= zzRqZ5SCg`!AbJ5yjLe}8I{Vm(?e^jpb$w8{?!~`(rtshuzw>Q8#~dyvV?d(|1*(n% z!^smNMc16-kAA^h%X&egu^PyXokYzUs!-vmOQ&QkV1?d1c>P6yE5WY9`Oe<LiqCUk z%~uzARsM#Baz^$k$^zrK5uj1R=g|wA#l3bD;b!J^L8(tVmEHNy_8&VX_ADPqHNkID zb?PJLnieTW$c<!4{2o`Pz@4(In%L$y228QW2@Y+}0{{Fzu(l%>P3FG9RA&WxXZTKt z(y^xcD<i;Dx=dhKXR`m&62#RdGIZd3I&|Of3m-M-2n$x{u@U(LFk~Ee8-ITwE*ZRv zY0A76PaRqy#K^0GooxuFTwTY$O^78;@0q059YEGUT*0q?Fx{BFgzd16A`c;+k}~+2 z9FqXI4OFRpU_D;fE=7Bt6y5=1uua1ge5ZKQ^pQsNE~XOYms-*_c_}RR{>~o#38Tp$ zfvv5$#rD_xgWk3=u${XaaPV5Je{2OC<$JJW<4=q@S|WDzQpEV1nXtfbE>tcXP45SM zmtcAzbnH%|(TN3Y;OHM%VW2=tpKmb-k3HC_6c65^+>`OxNAjobD$ak6!d)kkDeP)s zzQ?~YO`eI>;f&3l$$^lV@LN>yH)8fz@71e)o({phn_u_n2qs*&#{Cx`;u_COj9oIO z0EOY48$SU)fBntk5B7un%E7cV(GPaFrPGNK^5A(_onFY-2(AAdp(QyU&2mea@&;Mj z{r;p_A$v}k|8Wb-EnY6ho{H!Dlpa)5X~d}|uFO)-kPQDGX01PZgM+Lldo#`l{<^(I zu_7F<+7x2ZnsefhR9%`FGgCNS`krmmy2wu5`X$_$`h=Z}w1E%PFH3rcx>DJPS+x9L zCRJb9fg`KeVeOMJ`uliMy?kRg8_~sg8ELU_zAYM|vl`EjHlu=5BT>cs5i1(L7Crj! z#p8*wVA^azeOtRxDn|`EesYh`cVnS`+zxg*CJI;o97DnHp0Iv~hw%Z=nJX>ypuPOf zG$>pRbVVtY3OA&EO4;P&>rKWsR`gGAFTQmt<UYoSZ0ltMcHeOWJMeG>E7^HP+^RMP z_iJ8YyKX&0#b6KWXzdLev7xYR`%T6yin;$MQ7qj(5-vRn1`CS=OpDo*PlE}p&b2`u ze)l}RG>^?HiHF9KAH~a>OW4o?WWVZ+sZ+KAhhEE|s>6R-GrwDmx-ge^@Qh;L=1Tnc z-)r&Bn>MCvJsm8MXj1K(_3Zl3tM%X4-*T$p8AI#C8^spc+n9P=1!f$2DQ@|x4-P8f zbaz$<n?Gn5+rHD6;;LTao*&tCi@)LSC@*3+)c3J}wSytAI-Fi=JrK)Y%hQhci-e#c zeVCo<PSL(?IDULF+u*0jetema-S%6BWNQhnIA{j)ZBnp&KnUbycCZf@edz1J&&=&` z5QrLsvF6rKJaER4{`pH&)v+<Cvyn3*XP!f?p|j~iyE!R&DpT49Ejl}EJa**Fph;zX z{;({Y1}sY?>z@K`vc8N?>dV<5#iQb(F^93Z{1%g#Ug0cx6Vk{xr@-|ksNT|uciRm? z_TwhW<2pqMNSOiBYx7~&G+m|{v5A%M&!<!jg|^98aof@uXxW+tcLO+Q_KhV?YR$lm zi5*NOXb3d-9|QLdOz4!kgiO5xaOCY;tlH&H+N<=%G3t@fA<x;ZF>dtt?t!}1%LZX+ z?F~E>B2SG~<!nQ-KTY80%;kQEaML)>m3Z>fq5HKl(IEiy$1`YD3*nuC6)bV<TFg7M z8y}^OV{6U(LDsfzQCuL_MlK4%@0r8#yW&JrTBgRH9j{}Gr7!Dl8xLhS^OUhp@eH;W zxUqvHeqg>q6b;-Q4Z@qD6gFQD9R4YQ$FX6s@VFwKJv)c8cT})9@hfn2MkYPE!O(7i z7o>ihjsq&4@pjc@IB~9wEmyXuwJMxNSeO7qCnPX$sZxA-btm(QmSC!lEfgkaL8gkF zxS|E{Z;=^U4OoM@nF_GCJ%vUGmkE-lEc9-k0y&!h&~3Q~Ikk7zKc1RKJrB1C)7oZ2 zo@+8}9heRm6DOm;%0XeJ_Gvc3tsmKDr4!u#fURD^a8Km`lUGp%{R!UC!S6HgK5s;E z>Tqm-8pHDQTycKqhx%=Ht0lFPDHvbH9TEf9vFb7(ntVSH>sP9S+NS}auzCztE@w2! zr2}KV6<|o392swEV;fs7F?doETq<|Q;s_~nSr!G^$p&;cOa%;$d}-P6c~sCdiam={ zq$4B`Rl`TJW}ff)`gH?KQ|SZViG%2FLoRA2zjYcq=qTG5o(NsB*--YikV!9SXWDsP z=+kTr`ok2UpVJ?9wC%ni?ihr}8Usn|!xcO>P=R7%xR-UmDhy8D!BQvPV}{d|>8N=) zd!AH)Em+6~JQHZ-QlN*`GVo=|9nr!ikH#8UvzH!0;JVd|-I>Q76%JEjw1q9~86E62 zMXr<mcDupE!~bEjDd(#3`C-!)#QMlqcG~?6v-)UEWzAufH;7U1a~*>8j3Z1hBOayR zMMH9O6ipduMHWxr;mQZ!*_3Tdgo0Hkn126FtaZu-Y+5^<8T(Ga8NUwVEm;*<bK@i% z6=noFrJ4A_J&8u-FRM2WnJpUmatCl`E_c3<r@~c@xVq|9y|n%%EVC`cZU437$im&2 zGI%6Bd3jeHuUlC!c-lY({;FHjc$F2|HnIUL|6@hzjugLFj_1_9(KIfeKATP<JB56B zr8OItnGJ#8tB26V%8P=fh70F^He<%qt@T$npAzOxoX+z2IoolenmKj!!*jJ2`02MP zJ}6s@U!ynRrqRR6dC(v{a!du(qI21qoNyX=Jc3CVj)V<|=F+NRM)bue6TSrB#X6_4 z^ih8nyvp?z%5N*e$wq7T2#jf{P9S|-t_;0D4~NQT{@Z@94to>>Xs48{I7t{!Z@ce{ zj#WQVe#kwR9B0CQ)ax_dDRFeLiaSFO2#^sM1`E~Zvq<MCFzPQ2^!6hAC*6um71cRU zke`uJA!NGG33aDv!1TKh@%Z4G;K_H-Jq{<tgyiwy@2mp(ol{U56Y;d~J7(DRRp^u( zL3Grerno%9hc%pAc5S266`ev!o6%*K__IlTIMy4MYK#WA^(X7+O8v$^^Y^ebWvzI; z))Otd<zXM66~V-8=0EZ}YuM}whWZ6?wzN0>n52Mj+%5}hoA?>NOa{iB@WNEtC%7x> zCrX}$Q)IwUNZc(Ux$39v-Q0Xwu>KtzcJa5UXJrhwee6JUa}B<3eZ!=6-->hd{;~xh zexvD!8Sp(q8H{ZLL29ZpOgz0)G#p&Pa^|Sf=2pf^^ao)#pLaAJyo#U|1usr6WHuML zcM45t#1d(U>6}6{&6TJyK36=_xK)_;tQl8zrBZXwFvb$qL1&i<o)zWM;8H)7_3O>< zV6>z4swgVB&iiUNWvS_J8tq!vgSB0YQQfi{|D~<OO_}xF)w7DttGbU(GoyufHGVM6 zE1SMLUS;j4>X_CkLuftOfist0$FOo=Va#k_TKv}<QtnyO7b|P>RViXF=DY**T7`vL zO=f%4l5xL`2j6Y?6SudHh7D(V-uhf43pUrn)kWjz!=#P4^YlXDzyK9eJuw)629JS_ zdtM6rc3CoQDP#6w!ch2OHU@fI<VZ%%mceix1F)@Hg{5z@sn0hNi)<I-A7KXF{1qa2 zvHJR@UY&R|rA8?Da+Q_t^Mm3|k@eSJ1%dt4sqi2)lD4l@qs1k=oqT1&$VGQ5blSzj z(SwQ1eepP&Dcv6uzu80cvu-?WkjOJLpV;x8(XexBC>Y*Lr1<U;;9)rt?%aM{KX1%r z8gudv3rTgM8v|xy!E?@<<*f4@oJE)FCbJvQQE)ZUr;BY~VoP8on7#gu!^Q)>b7{f7 zDan{L-3fO8)u*vMN3Zxf5DL%eL;0Cq7&)+yD0BF(sJqAr6tjY9GiPSZF-ZsOpotLO zWk<HcD00wjX1C;H@p4EAXHrdLrP>LYQF}!k^4FN`S0=N~Dw$+bd!#<X*&O1NT)^_Z z8&tpYgL4;7iViJmm}@?tsfBz*OND-L^@<m{?0qNvD4h(4Ol?^Gm#O^wbqeo=3!tv7 z1)@PNq~7mCp^9h4ZnH<?u5M=#Gpu1lcOrM9KF6x_ymRu*jz0N}hH<V(nV{##X9zP` zN(}E=Y|Vo&8T~Ngj4fF_P=_IZ)2YFHDBu@+c5M*<p6;11p69H_^gC&EqUjIIFxSLO z>w3eW!ycp?HV(B^73fz=D3~`p!rqbcbZFOnG@57&7U}yi->yN}Q_J)GyS>r&@<0f> zd>$2=_X=f`R9ISQ2Bo$4#l20ur*gPmn6G>oKiV17r31rhw!uxdYP&7%*LMLgl|0as zJuR+V7)$M?BChzShZ}SIlWOW!mK44eFBwPRcZ==p+ZkPGs<_MJWgUXOD$vBAZlses zf~@54GsV3cH1OVF%;Bt(q>vfnZf{rIxBHl6t-%2NCbXhyW-Yr_WX+l!idcAofCD6Y zP&~;LjHeBQp)Gx3lWQr<(X3^AUcD19HO_+KDJR&mZDPHkFNps6mBJk98IXwlTl8Zr z40~_}{~a`kcD{H&{auYsG~!O}oKf{RTxwYLZa=V@w~eVr{blbh9O>TT6k)WfJKQ?u zg3hb<3yob25=w@V%gRJJy<Qi$WPiZU49;hK$=QYq9`y0^6o~o0kH!A_CcM?}k6+*T zz}>n3a9Lg+nrzI1J;Nhm%yI6!vkHR8>wD7zRag4*ek?T_@_k5^5Bz&<NyT^%4HgPy zm_MH0xCPSwT)s=}WkL5t|L=0{tREauEgpKJLxw(o@sI0aG4Nh0SRIarYcsN-dVL^h zRwlE$-5Z#On>@`h=5Ald9qhR`&-^@l#46ZvoD*qJC&YMKw|ERJ-f2eRcG{FN!<o`; z&anr53P}4uZHY<QY<8(a0<WL;60b&2fzHfZ;{ML-c;%-K^HEF%9nQwwZ;{Iy`izB> zkNx1;^bR&M@+~e@EFit;kr43WHwzC}XJ1Et!yk8)!9RM9AeZ1k3k%+`)@#!R|D2iB z<JN)dAA3-<)sHGGzl#YY9^+bvr7U-gC)q0NvL}HO7IN#Bux<1MW|YNUmM^;5>7Koq zuLA5>k2}S!;|D=JvGR8xghe@V(5J!|G?bqR=d0^6#&w0*d^Ul-TWbw(%1>gXQGYo6 z%Mv?nvZyuX5NkT<%q-LbX(7I3iv1SiK@EAhp)>(Za~z>)1Lrp6_|UOWx}Xrl=i{N% zsJ-z4O21Vnljr65W!F@={%8=bH7XP9)7|OLof~X%{9NdYDxfKcBWe1Ep%mlTh!>iZ zAuvt~G>!em52M{->p2O?^!A{$Go8U^`(4q>WIDx8a-&Uk(v)Ig#Q)7hD2Mkk%eFe= ziOk+KpgN6OSFdC0<F>I-<@0P<h#ne5T9eZd2`gV62i_;Rzaz1mo$*@EPHoYFGg5s8 z-JZ*0-Q^OzDB458F)fh27lPX(L+SR?nRLYZ5Xu~@!PE!obmfKy-Ltj;lSX|oS^Cl` zca1+R7(9gjY*gkRI9<?uH;%NA@4@lgY{>kT1FblAftfb?)7K^Gu<gkQR=LrZ-ELTi zIkBnG>()`U9@K?<CeMKtgQMx5mM(0!d?3^wILS=bCD3QyCw0}fh0HlXrFKVzyU?G` z6wDS29*&`ze187KBZJb5r_<X4b$V;=OXrsDV&?Ddas9t-EIoPzR69A7&!9zO!GA?e z)&D%U$QQCR;o*|?+_&&7j{8TxHL(v3MJUhnq|!g@9M{fp=Kbm=xb@8rF)h9xk6h=w z&nxHg0H22*w=NQ5hI5u(H_sM>2FzM-L2D+y7CUXHkjuL^)_MCnKAL$|$c-vspLUnl zn|@M)1I&%4HE%)dNgi}z^I4o9*e*n<zhiyBonRC6j$`poee%0y3MaeYva~k|bWmRt zw5m_Dq=+Z%UVIWYXg^|6i?!%;%M|+j;tOKmdWq$1Pv$pAg-lnc5X%UK<-F5dz<Iq3 z&S|pClew2_cN=F0E8=}^zN_YcZu5$0y5^SvSJu2o_r?P1j7_D*TC=Ew&kr0^mtcBl zFsX)IV+W3?K<nHCf}wE+gy<X4Q@>b<Z0<u1yZB67T7{K6bu;&RH(F9zh_!umDJ*Ui zp4_n<Z$24FJ09*6?!9Oc=PuxT+(ca}_CAjn5}HuMBpOcJEt4p6*5sG-GidCvxiEND z1n`e0-aBxE4HT^~=NSoC-!N*rJpp_?2jMm^FZ!CgLzwI-&2~%*fY8<MpucJ-3#tu+ zFTZ#O?lqtD)jh>cf7i1`0p8R!EEF13U7_}sF+DnDfxpKn!OP>D+2OG{u&vIMypIKN z&VvWUA8=tePF6AGti4M=Qs}FhA*{Gy&xX$(4-5aPfKJ39@)|voT5roxW_>RxJ`+do zn-1crhFI8S6UJExs$d=*$9}#_pfZC{P*qwkSgP)0GW*<^`q9Vi+S&uCb5H|#szf|- zVHxXn+LHe0^um(W=fsF@?d+Cgf7lu%!%Q;<(1f}kj4>KYHFr*8$+!D#cYAMm`Y4`e zChTEPC(kA853_KSI)6+3NTTt^P7pOejn;WP)1k#ubUCAz{qc^bHEZl?vD;kQ@LLg_ znyS#?sX8<7G9jC&&FC33A8U5~!8t(!sT7`e`s}?My(?d_y=qn@rM3^nuU??g{Eb;e zjH6FyX49dQ84%m&tXS-&E&5(dWVg##p}w6hW~Lq#X3XFjsY_R}MN18OB2!4%_z<6| z3ZQ*=bX`>c>ugs46sVj(f!$oRT%clgsLu-{zcIl=)fsnTbeIFaqAmERsHA?PP9}`u zjIH~%#jKANf6v55v+jEhsG}kcvAtE9oWUlhToF%8_eap1kV)deD{ryHyhviS>NV4c zY%<_}`rsNH7=J$#yZF7x`NVRG<{b&|@V5yI#>*2NNT%q9M<^P5Q`zlYdb)l-_O$tl zvAqXE<~wat`XGwR!#A@u_a^pjWiCY5PNnh6oXNNA6I+~{0o^{*q_MKRUaV4w$n74W z@oEF}+02=kUZ!-{HkmeSje<Y2oFQS_fbyN1P?PCJsdJ*>>|0rM&&~$lWxllHS|oL+ z_>tkY$)vDhI+#!D4fap$sY6E#QqMV4Th%PuI;e!jULFpbgAd^9#%dP1I+Xf<cA@a7 zAQ;f)1V4B#b%%>L-Z{njJef-LUPqs)YW0QTu~SJo;{X=rTcEH0E5UQoCG;922d_)4 z>o$hRlSX_YIzHUXV*ZVX$k766J5Oh7McEK`4QRNk7lhz!xWovPuldj_i|cHEvJ9WI zXHnbJbU41<6ArJ|WVL@9#7A473g;G22fd0&{vHZ~H+f||2cZYI*isgr>rP|Oj^eba z_ssXU9axS`qKRj=qqBr(PNRoF;`FJ^GRPF<&7@%x&j+ZMUqp|Dn?k{;KWxTtJv=mi z0PW$wxjp-5QBY+eyL+aHZ6Ey--Jk2zwAUkqO=baf<9R%p?4FN`{BA#B-ev6Tkis6i z{i<&-wiN9<YjMBP7#h4_3S}>{1+($SFuZ6z_di$Sv*<3iu*L%x-Q0{5za`;Py{(vf zL4~BpT)>#aBSC9GBlZ|;L*a`5zYV3~u~a#}RE`p@?<GLw9#hnu<tX|ed&Ht2{$(Eq zB+?X{Sv0j!j$&y#41YHhc8!x2qZjoP<vzPJwNHD6-u_ov%wR`axTui5+&mlRzS0Eu zXNu&m7>3t$qsh6W7w&KvO$A+n;tdC$bJ(<o`3EV`j(vR3Up9f}<Vo;zuNWA3aRRBZ z4*XHIMzV976`WN54}a}RB#WVb6gtlv($9~Bz_a;~_B8@mtkZ*#b}Lw;r%H~MoJBi) z7lug*?EK^(cyY%A_D$Op0*f+e`q8=2ov|C6d%4n>QPbeDnHE|bW^uk{Dei9C$Ex-V zpwS)<dG;&Vg}rNW?R;DA5c<IOYo0|P%d056Mxb_GIm&rgK!*N(pv=<;-0u7lRy7|( z|HF&eKJ`<Wt)VDfSlW)`{ns$Z<~P`VSwfwE-m-s9obUT?62)D$1%1gOrrt9NWO|(x zW1jVcm-eA>YLOhQ>Oy>X`6GraHnZ7#%P{yv1UOzyr(N$YU~8fdRnZq=nY0Tlv%Sn} zhnUg!!I3m%$pkv#D2jbrb1C3r9g~bV1?PYaIHniKzC^`Cyl)5_R;vs9*lqlL_qy=a zMOXO#r$3GV7Y4&u+Jmls7A%TQ!ju2$lew-VyiSV(Cw^Z(&%XuN@`lp9Vot3ri-DT_ zfmHK^GXZ5~DYB0y#H2Zb<rZ(MPG+!jZzWdn9$=oADK<LHW_MdmSvmKE$DfYGMy*0- zZJmMERXTWiQ3xFl<h?m{CtP&Um-X_?V&V!7$s3QEFuTufaf+87g`V&ylSk*+-pF+< z_`+W{F6tPon_4XT%yFPsL6LOoWHq*=v<bO<kF0FQ^Kw>Na7uSCo@{(1EV-ot#itFp z>qZs5R(xe~J$$de%$8mqn!@yQ&WfhfoMD%F98H&&BL4$F*^mzzq#L3@vE04ue&z*x z5Z;0pgI2TMmFL*^q0?!<?h&z%Ai(#oRcO|a;q}Gb8PYL_;+>Uf>ic=j#n6EM@^9nn zEpM5B*+fj$u4Q7L9X)aNhk;L`;Pewsyr!W-$CR#NX9edp=E~Kd{If?~9i7Vl{PTss zDlv4w_Ez1%soZ02$bD0P?chz|EUI{t3{!pA;{f$Z?C(rF68V1g%CijGbkYstri(bs z$BWOt&fu}Tx3TCeXPM4Sr~g(OvHNvXc{e_jmIQc`&5D6^C15jq<mCvtc$r<Ou3|^d zdeXc>vXIX+0_;==y6x-`c=Cgep4^8jc^^^dOdDS591AaH{3&Ji4$L`~0>^h=th>ya zZHv}tvnu96x1aO({kkS>?;D9@gD=#}WUXZv`*^bRO75iR=1cQ^6Ui+ui7GtuVe4@{ zR;`=|H)GAAyEKTx8~@^zBZ~#4;!@1m_fB&181Hr*TFTDnr_(9VRPf&J2fva%Xx+mP zLg)-{h~Mx_d_8&sINZJ_j>@fJGK<#==S;t0_!`cYa`%7+e!p8j;3;cYw}K7HLEyY0 zPso2D5AOY>*nk|yY!=x-RCo+}Uq8WAU7J|Dk~66-0Z9}LgQrnhV0+FAmRkH~TdqdK z?(a@;;%TFBZ|qA+gY*Gm*C9q$z4GXds})?oBMn+J73&qRYO{_ZBk5gFHmImMKx&>1 zxm|XE$Z4{0ZNv}=zYq$+w<g1;IacsHAerV!r0IH}RV>7~7k&ET4UWsy;O>9{;BZI< zo8~RRrB-n;{$e1Ohw!Y)mn3@Kb(pzU?i0o8061Cj2W8eM@|?*VVZ!b*@r2hmLGjyc zx|*-V9hjz2Ie1(BrA>aU&Uq;G@{6F=W4*|0@=N^boke4_zKM;u&#+#zIYTtMSR5kv z0LMNs5PBl=z+!n2&Hj~6wtNbZQtl4&>Q1cm=tOKgXhhGq#S`vRq{j#K>E*!ltoH~t z8uhF<T#8DiRw+Zc^4yua-*QL7z12AQ=v(fan*)9PTEwih5=6gQq+#I7>H`?P^iUPF z4r^lbhMU5?%^~<ox}E)qPvpMRUqWk{6CB)IA-YZVg(;UpV97!YJnCe{RLX0F6OQsw zUN;-$_$+0?Hz~@o{)Orz%;@W&<*d8g0uJabWdGJX(~ZIQq!NE#IAZcobY1hDWvR=6 z#vB9OC?AdETsPp(wq497ax`t9YCxramXLU9EZuJ&g++JNA?%(9y)$lS<^S{{@4O*A zGyI1y%PnCq)QLk9H0fqmnHXo03)VP^lKqBJ<FiYcy1!DgYFQ8@SZcFFX+iKbxgSLR z4x#Y{d2C1bY~Innf<sAx<$sVT^{!`Z)|K-@>NYd**l?6x3T$U1eFs6ThcexqphHm< z1f?INSpT(=w57(Alsly0_d_YNPxPcia|h5{|J`B-ONKp;_HaknRdhI)M;~X;gD%ws zI8vO?^wSiCDR;UtXWuw>-jF-Qub#)){SGj}@)KTKk_z_Q8_>H{2L|g0!jt76*z&w& z@cDWLuLi|X{^lgAJNFSKw<D-&T_ErK9c9*NLv1TkaL20<c4)*sMklo~vegaVEV{;0 zudS}%@@*!?eUk>0d^d^GtZ?x2^JL){4T0m2VA@n8vMbYuBX<XY;fp|gy;cD}XT-B_ z+qdG2G5qcRc%rZ?cOIF%C=hQxYhs5p7O{wiTH$EUaxv!kX8f8e1+9Svu(!|}54L>4 zz}q7zAU_Y-$5D8AUNwG^yTFFd8BSi~b_sWWWWuyPwv_eNnO3i?6uua9R^9h1q2o~v zd;Ib&yY6Vl{4%(6e)D{G=ppa(Z%Cln+R=>cCxQH5Z@9pjrAKNGU~01sg`Dw&!q+R9 zUP~hWQklv;ZyLhSPtP&$U;r(gu$grf>r+O^G#I+q0s=nn;d`k6>PpPaMY)CJp_hR> z$W|Iq({xj8-j&Vw+%q7=Z!qQO_lK3f7f}3e4H47+FpCmhXg~WOE3Rn8PPGKu5g-NA zt~6m##uV^4yp5I4OM)?V`LyYKKhXEcq<6!=;rxIEkh>gD5p!3IYs;5m`LL~QLADo^ z><neYwfjTZ(C6%3;w4t<91eQL*Mud9&*1QGd6X^-rv=oGzn)Kk!w!pC>Rc&EHnxLK z6MaZmR|IuWE8N;L2TDKaQ*z&@V%m<2Z2F!^TI3Z>3Eg{HNRTs(SJ5Q@eeOa_z!R}P zJD1Hpyo6PHsPV3}t>kOkA@)TvjBR$<$W-smrMsW|(`!;=NAf4a-T5QvKUY&2TfGGB zm8|H_NgF!xT%8W6r^4hLIaIEqk7m0A;Aln@+gG}s9pF$1i;84YcmKuiwPcWkZ3Qlt zyUh+;htka(zeGc`H+Yk~o_cpYV+UW#QSzMGFlUcF4Swtd{)>a5r)voPTNgnuLZ3>+ zdrR=<uY;0DVRk@eNno-{ou(cL6Fv!2aJk+UcE0@&N63e=B$daabnzxsydBJY-)B%u zScFq;EO2pIGoFn!!_0whG<nB7*m$&_y@{O)(}qt4x#!5fIJe@d{_*umkplHwD2<O6 z@^kUPdu-il1SJ!LpzEl}E?qj01wlR#-Js36(*NOtix)77?{3V3y{TvO5}dX%2qrD@ zq_+_>*pTr<AzKlcOB$;$9Wao}4cD>cg>|fWqcrvUXAdn3yO7!j(ySZRtiMrZ{nIsi z@SL9oi57EV>^y(A{<s`Hej7;62M9kebHN#_&eZ3f9}H!S`qG!tF(mmkljNGSNo7JJ zoITo!-WPKysn;GnnH$e;<W|-BcxIB&=_I-@w5QmY{7o6_OKu@5l5rnm$lBGD66{q- zMbC>ReGDPhncN*!9}V)GE{hdHER8GWY<IJPpw@pmd*3yj(r2i$^|xiwX=X9ocE1$i z-f7kuS%Y$#_i=Y5=R?61y2iOnEz8pB`LcPSb#pSQmn4$evQbdKY7||H(4*bgUJDsV zTT$lqQemd8r_-%>izH*FD?*l6F#9a?P8>Exhqduel*5HAR@r|U8Y;K4Tq#>>UtWh@ zOU!Ai_9&RTJRhAs^Vq6=E7<UTUTm9DCY*Ve4^iXfAZ@=gB+XkQe0b##**%Tex4nQW ztla7MqYx&&Y$BD$o57l;;b5cjl>LiWq&(RGoNzc4(gugXoh&`UghgPR>n+r1m<%6= zaKG&;B~X4j0Q_B*sn;kO`sySBjZbIs{>8p5G<_&(_v#T&HN6t%jS9wW&Mnrvp(K1A zHw|=?vS|kYeXie;g0}lSpmMx24T?B{u)II*ly1Z#i_@(6nmar@e@--5H<16|wPL!C z4k(<o0#$v1jXnC3J#z)H&KM3)x&NVU`VL(4Vic(wIa1a>KUz86A3}4bL6RB<|8~T| zT<13Sx^6Tp{*x})u1{poeUgO5GiB&-Ntdu`x;J<%Gl4S|31s|4k#f9MC{9s<q%wK$ zt9Jl+P9X8xdgQsb2lcN;F>;-333e7!DfQ=K7WHlot9cwvFGg3hODmON^L2OjkQw6} z;5k4qV~YFx1~u<z(y=0Cc<4C}8tj)agXOaHA?+`Qx5d(cj5)A4CWb~^Z>dX<$p^m^ z8-=;k*KvMo3^-5wig#9z<vYgH!rYd0I=xK;CWh+4CU-5Goy>XqR~O*PO<khHq-U5@ z9?lljxq{TVSlC}xKt9Faa0Z_<W(WU}?3mrn-Zw<StmXM|rfXTfv&|7<#QhEET9F0! zpV`Ca(AmtbeS_rl`k}bk+KicZn2K3E*Q&<+SbbP5)ZE|CUM*^2l~Dld&pL7K*hcn7 z{*sd_XBNoL{mfQA>JR(*nU}Sx54BiZ(DqGZ$Y;wiGDwM|OsVN$@jr^r!!N|Y3&TmN zC?YK?m6Qq%G}LoWC=I1TC2d7oWVAI%Bzwy$BO`=RsOKEJY#FKW6B4o`BkO(M|G<ax ze9t-eeO*dHeEZ&rDkD1SSof_E4#(V}(ocrc9KHso_Aub<GV}Rae+x7-HNnj-x4_Tx z1Qk|$@(!mIG~H~?4^phq^otn{kT6QcZtWyLN*C|VR^!@%!^tk@p75tsAKvUOukL-# z9hWvrtP*s^3)*Ap<?lH5y`zW&9A#;^G_zg}OvNF;Q!&$GEFNm>fcMoyFvm)h&mS~G z!x(#ZUZusK7JBi`ZwlD&zn<u|x3A>p3BuDA&*0|o0*LuAQk*0^4_$waLhqs1z<o>? zG!65|W1X%XkmSt;ALQ9GM(T`Qwt}}3Q*eQ-)OF`klDl;>p6GKDdXz4OF{@U>_P54t zc>ga9Y?R}T)rC~#KLBeaevpjJDtgnU5RSMGX1(<S+isr)d$nv)e#lWeGpo0FZ-^DY zy)_<B`YUouYdhH*y9$TP&O-ja2r_iC;zb9tX~&+Y)Tex@!?`nU)w@rXK&hpv@I;!E zOD3q{OF0j2$O8;f-$yHB=i>c;gURFi3=~c5(R<Jz(kQqD&E^G;mu5)Wv92ZJ#hBGq zUi(VOs<{Ws%No<4gYLY}BN#Oe>M2>rm`$gr;rzLCY2L3W9HoB%Otbn?jzV`B`0XP^ zZ85+cwL8#v;1}9D+C@ysh^I=)*VnqEp7N#-=2Xjh(w*}p)Z7@t3cW_~<fgeebk`ig z{m2r^KC}|DlPs|OsYK9|@(A*!{V?v&B=p}sk}W=T!|t9A7!dG|!i{gxag#t?SLO>h zmbmkX?Y`o~Tq$oqP4cVs?M8(~pJ<ZDHiy>_d~vD-ck|brhuTV_bf4*mhZcIX+-hIm zW0uHwbM!GbMcO<6*1?vJNL*4f6o$mOaqBE|Jk{Gknr|JTtY|b|pP|iieWGZcnjFej zG*rVXIqcssn7*~Vr<C`xcx0dg+b3P1c@e4bqs<K0f0Md}Z6<uBuOYk(tfhZva$wHr zv9x;VZR$2ImUY~1Y4u-2%Ic$rU-u`&(m}N(WBH=`$hp0Mv8y1)CYbwwdM3CXut(jB z?r5o>Puuf*3Nvf9`G~tJC&y>A{&^z~Sv*R*2L|%0V^UtgZ9Z;yiKBxis*=+ngunE< zP2+-6&@1K@3=_k7Wqk!5&FBfK5+}dRVFdVlTovtK_2;hj3!qra672|Tte$bhfc7@@ z=Ei#y`H1W|(we6UCx1EN&Dyc>J5v>p+zDZeMcJ%6cbAA$r@?^ZGg#cvTVj7cCPk%_ zg55Q5xM=l|0={mQa>*pXe{r1JJORUII?)z?AHG{M1gF=AVcYplu(`hsN8DQ^R95`~ zqemVrSFx6?6^F9Q=6zzL=UDi*Y%81y499yJ{c+!lAWq3Y2%s2+_AhN|*yAyr+$$E; zrA*k804bl|C=15hZ$Qa$J@l&FA?$e>ipAGF>B5);qXzEG19f{PzOl~&&rdl+{j#UB zV|y$YjvIh~+&+jqr<B6G%E9pM$V?bEA(Bq@&c-gIlUOFRr?|pvAa+O&)dx2Nam0?X zJYZ}s+3lQ&{sjjq+u$F0?UZ`|I&s|CG6?K1&t%PS+BoB5V|9>=z2i1bc@C^pWy3=^ z;Y)fjEvRmV?cE>InqX<)a>x(QCw>DA{tT<&s~C7F0Gevtd2NB&*$aB#Kr1n{I#XMh zjCU$>LyDBU6)!p7l6JtQ`^}_%raCuS=KxIg6Y^H8pwZ2IVO{5VT-F-GEz}HaY(7xq zv0Ws)GL)Z5@91kiQZVD}Q2aH!fU?|FI6cY|&)l}dV$Wk$AIb_Kq5d9NS871|b`2h$ zt_AjQXR(co2*)I*)Ojh(xb~L<1oYR0{^CK%b9e;p9tnKtK^|!CF-8+}Wt>&=pJ>k> z{G)X>*m`V+IZrkUkv5wh-by$4A`b`L)6$QxJRT!t=*<^)ii_acTNym^{-v-o@DFGv z`|$Q^ZC*3v2kqZ{m1cJMkx{~JVP~Q-ZwfcZ^<tv9@AWjiyR?aZ*qbwFyb$({OMtm! z=Wv|5E&mIOh23M+K<?vzLSL)JG|aj?M!IjN%sdmvV}En$i<&K_DC`ms{YhlY0htuE zQWGmJd()K=FId=gi=G>~vS->ZY7CmeU8Y+@zjigYzjIN#Z}#V&e_vAL8g<qk(2chL z%*3)|Hz{Fu2o63f&B+^|le***(AsW-iPJv_H}A!B#_zQd*6VEb4aZt>Z(nWR5cVHw zHhA&(w{6v)7iz1wlpEp3y*j8gwn=n3^c7<39r20u|G*xT_cZREAIIcLKeKx^af)mi zL?2rTty^A+lV}0W`)I(=;>=m)q#YM2M4;7E11!QO`uS;;Xdw03-V~*ZawDS1ZO&o3 zD}NIbzgxqoNzyJR1jXB9FVRl}eayGNK>6B(*<|{6(wB?DKLu@IRmBwb$O|5Zo53Fc zU*vUhGXz}N1d8VzDfGfSO0Ma_gN3^4sv~mfS@V_(M&1`5ea}KSdmo-DV<jqAn@El% zf2ev<B5pgssCrM4z2v-;`uJr5{3-P*G_Q8ztEOgf$NL4{Q7#nMfGnl%{6K~8htsvv z4MMp|IF?I&<<v@Fk;*Q^t&81&A8mtolMV=$wsAbfbDg7tUUwewHJp#G4&fE6!>J8s zuy#cn4ty=Whqe~N_Iqn7z`_Ss&8eo9JCkwDcQ?Mf`~>XQUVyTG4`FRjcjhi0Jc>NI z=~yV6Y21dsQir9+dMaMOrNqVizSFUG9o)Fb2Zu=6nvL5fx8}b^@ZIwTUAelHW|+IP ztF{Xpp0(g9qojPF=2FK#s!~_`;#+tWzeP;)o58wv^6W50ftPPlL%W;GqVWL>q2V>M zX+DAMlK@mM?*g6MM)2i8LyqgQoz_X;fnx*b@Y3@VE2k=z@2;B0i@JJZ*-L+37g9v) zCb>Xw%@nv^Rw||^2IGO>>R=Mo8x3OxwkaEem&0a&OQ$s%d%cDVsRKAe+RHramdz=S zp%~p6g*T4;CTFuitd;I%&HF#ogb-f}+|nX(=#%-ht33Lp6oIU60EV|KiY{xKsPA)E z>=CzxcJ&QKnURub?%&Jm^^2|e_bq+yQ>6^&EnK-{R1R#}(FMcTof0+F90acU1Do!< z;)mm3XiVE6@ZuNZ(Vjju_xJ*yaZrOPUJ;{ITtPkiV3q2;!DR8*3wP~01F65dU`Sn0 z3Tv#Pm!%(|w{{BGo_Y+2TQh0I4H=&QXp<08J(FgBw#6mu=khFhcg|au!n3_Lf<fVZ zYS0=-qf+$HrMN5hu+3!o!;wsGN~~l%0oQ*{#m_MgLdsjo9TFvV<3C@8LDT*Czs6%^ zKei9GxBKCTxV=KZ5OZFi`5dNnoyF6_4Y9V$feT80(5~?Vx#nR8*o$r$wZ#&Gl_zo1 z%_u%L?g1^^q+6Xn^f1Nul{_lRR-B`siOc&ca-v#U^_V&j)YVkx%5Wdfol@_(eVG;b zlzM_r-ELZRAp_pY24eY+C$Rgj4C=R_7-OJKcMOeC_FM|T+BJgGuAhaY@3x9VU%Lxb z>V|*qt6<mJSM;^#cVVYt98OX6MuVe4oWH~!P8eCiz_Ceu<DbM9-g*;83`@c$DI5OP zZaSVmmxa$S*h0HYCNGqoj45>iIC<(q7_YfSaNlEy**+rW+|?VEspd<Mx&~47q~Q?m zGLE;zbXM<;Yk~#+EOG8+b3P@_%o7G=vU<W-z&F9{@#!0^h@Xqaj&Z#3sw^K&*hfQt zh4Z45=gB<V5cM`md$I%4H^h1YA1@SmQhRqO@Hi;G+dZBq@)~$>xf65`^<WKqdzAe; zLo!OuqGE}0?~^isFMa+)0h8yjcVS;F2^@#REtJ_{_*7oEF$@1~mV;HtVmY9t5A|Q# z7cCSfb4lbd*m}#49nE`iHyKSFks`72y(7@<M!91_Ssy&LPM5YknZW&|Ug)7GB9xXE zP>G2rK9za{7arMxsn1OOF+G=}`+tHDYgR$z@GfGv`4;%>-ZZYCG66q2ETRd$XR`NF zi8)!Vz|)?YON`Brf_1yDbfc48-+{j5VUs7WYLGalL)@fZXCS(s_27*c(>YydyU<n9 z4~ufzh4CKde0Q-AuhjS}?AyNp%~QToKc^m07pM*%*5=r9_!M|I1=7U}cKo#9B~?vx z2CYZ_=zY<TyEx3F{@?QG>jGOY)Y=AnCs<<T#zyem`b`+GC9yklJH&`fN^q$?%rRAY z6rXb&j^(3DAiPbwwMf4kUw)-xOX41gUXaB>^Y4)BzM<smWGu*;JD}3^QJm}_!;hCu z<)fdr(bMhHzm2U6=Dn{M)JmpvpPe(&f6o^oKF=4{PmYDYo9tolIbDAJqE=iy15l-0 zTj+hvg~n!GgouOYRpYOX;`x(i!^Gq-@O$SV{*XVPtvO%NFpzfn3ui&)0WEAVoQYNI z-B?Ta7_HL!N%=pb$mHk^P<E3%#OdmEG-3~=KP?sv{HO4yvOHmGvLlVkE)d&V`=d>Z zldz(@y>MSE3(cM<b4ytTRK_%jL;r^Jk9B&e{9v9qxNISP`ras*bxLzpk_s9tT0%nl zQ~12HMc6a)71UMCVkgfN;`}vpp?>!WEOiyYx!RJcWD^NaM+B|Z2(0u>gWJdAF?`-l zx|lcy^<Tu}(A6<mY-kUqZhfiy>Alr`HfUkss-93i{0C|Oi$K#8ZrCOA2N-wJguhOv zkdh|PkB>bPUP|xL<Ju<lC9}A?B$c7;={8uVZ^*9S;y5cnt$L%xvAkAtpB|?!1)pP) zqWJF*$iLYI3Mqs6j&lSjx^05z!{-t2Q<LrsmN-*-W2xg#*a6j~*YFb_Sq1an!<*s! z!xgkkTl!g(xV&@D`~cMj^1Q4AFyl)E_S`7-GK(j&ix$J1JyRuTXabZxY!bmM9^;R{ z6ic?bQ~ko(_;9cu2gFDmI}>M~KIs9i8)AkFlUGvOXlqmoNny{;wv-cN#ycJca9Q<1 zVcEKU;+0V@m~<taR%&Tc<26@&EzPTTm$SLo$~o+^>lWx^4_uJk4?N_bf~~Cr=H_@{ zq|^^99NGu-_I`z2?-q&KuPbJC)1-;*ZXD-f$r`^;fw9U!z9kkzPxC)wy>cY~kyC(* zPYV1{j>sfj4dVKf=y&=zP3*`5t%Tm3>YW2|{u-e1Er%8vOrZ;+4G+4!lS*$V;4Wu7 z%-Q`>G&@lTmUWJJLJm0M@hP#~L!rvOLXG$Q=Z|+=PJ+K*EY+``&%gUh3?iQdblDt_ zziZ}$=kl+lW$DBXLlCvn7Ya{$^rxGB@4#fw3An~(3V%EK7V_%j_|WJ|TC4wxwBDZ= zhR+@YD_7^ib!TUER`%g-tvexIKMt?9No)tn^KpCoK%5xnh~{P!P;ZjN-+9{uvu`Kk zqo7yxy~U7jKrXbpO0M$eK#B?cN=K#aS*eXKY#FVOnq9kb_QPxz3YQDo4Nkm6;^5@J zo`cy#8bN)*Z{gh&IkeiM$U&2@3T5(9*#111=Fa!w;zu^@Idd)*_NfwFyDi}Kf4;nQ z@_5Jp`YW>e?BBwhrNKPcTbBcqV{y>?Ko0&`1x`2p#K}AV(R^n&mK~l4Q5AhS>Vh?I zDY{Q1bX#b^f<gG~x*dK|^k;opFMNDz9(UZh2JkfnOO8$9#_#>P#D1ChFLxk*yd95D zwxzT(OUiWo9*TDZnxUs1^4#B%d^+ZCb=B(y>|c-$@!KZy+V`pW`@m(mawd_N72Kyb z$yIyru{FosTJ7kySq4+L&qt-6zvxA4f4;1IAA0TW1INPh>9DvO{_rTwGMYw<T=t3? z!Lu-P*+F=4YXPj4ZaAhrGVtm#>8<}g1nm@qIi~fE@Ui&~6?UnI3;i<r&{sux=jVfS zq`T>}p?&%F)u&V$s|mw?jzjCEz1e%?eA-{#F1R-I;FU{$I?gq_MosUmP}5>Pohh1t zt0mTcfV?kWD70fEhq<)$`eVT`q6%*3^aPttzIZD`fgZPaAxW>n3rv=f!<=E<QFEHk zN|~{`9La@pWl`0zD{W$ZL^OvSi4#3qT<K=r0HLP7!f}<Zti;xf;t!**!QUl1@aTM5 zm4DF;HVd=HqRk=v>X#no%1mJWKtHT?h~dTFs&vUQ9iJ?^26#gTy{ykdu&%&$CmzG3 zx&%(Xep@Uu8Gx&5)!?XyHjc^*rKDO-?kDjtkLPuQpNBJcS?kKL?<!&R*6FnE=yX=q z{7;<Q+=b>jbmtX|z3H07DB7j(fGgJq<NMu%@b#D|@SUTMzvY(G5@UZBIxbO!VlNzb z|E@Ue>=;V>?*u?|1pT_G$LWtHcGk*PNcJg(;s@sinYGcPe?ka)%*%xzjUi~Q<jz4u z(s5Fc@8EBbs6MwHx_zHbI%`7U(YHy|<(>kpv3mjcBM!ptJ%P||*F&oQAaNhhN*>Fu z>uG&>D%1Y{IP$42HYVBQ*j!Uo-%=0RRs=u(C6oG-acEhp$6L=;3p-{_#Ovh(>MfDu zf~30?%1<4eC62#p+G1FIL50(_wYVrZ3su*Qz-?zFw$#^Dhj-Iwi^pbI;@E%JC5Lth z{qVj+x-ucC)RvCJS0=J~qCcnKmwb*^I=t@YTwIdv!nG5u*<`Uk%eYPDxeqszf5Zw1 zj8PG;>1ap}?5TLQTn3%NN>~=VO8ls%g}>WV@%oD{bT7^Xy}LT&o{({HaGE+k9x{n- z(yl;I$4^=oU`y40fml|V!Da7&Q;s|Fy%KZwT{4sImk!3!i%!vj(;EDrRTPf6)f=x# zoSm5IGr?Iw@;NV(Zh8;o!J);UH&toDg>^IV@+BvbxC`uSX9`uvS3<AriyXUWOT3%N z2AZTYnbES9EIj8?@>(NSJvJAUW3#wCW;2c0>W!*1!-cWs3ou*y*iF=!hDvv}x%Iaz z-c-=Q=+&wGV8s`hsI-Mb-{|qgTLC;{La11Ow;a9(tOuLEI(RWqo>OGpSf+Rt^z@lZ zMIoypMJA2ihfZS6!P<D`X%aYe$f1nlcz&1@K+jI6V`G2d0&P2#eU^;2CiX%_mtZN^ z-www<_u}Li(%*keD5-ps+=i!H;KMN&Zg=*EhV!#9GHE@0FHgp*k9Ih^bOMVSuf>g@ zgDLr_Exya#PmAuv!@@tnB|g*XW5*zLFOzbpk>)HXu>muShVhRseQ3$G3UHTxc781l zmG(%>#nZ7}@vl}l{@YecqWmu~o41};*6t8O`lryt01=)lmc#duYDf)<$K!tHXz=b2 zSf%f!^`<VUbkvXQWozNH^j<aWJVD3rdvfN;98l@ugZgz{IWKz#|Czj-mR1y!W~VkA zC`z1$r#|T1KU73dYb@TX%I3o!iQsz?c9u$>zH@aj&b$B==o)p@kL8<h{L$2`3hr2k zi5Yh!$8y|Op>ts$G~Dz=ti2Qf&Iya5_25Su>JSaH-^ugnk1f#j?>qF`S|C<M%;VEV zsZyU{EbqA*gDc<8hMeeJIQRM{)Y*I{$GLl{<BXKmh#d(wO<Lln#!qmhY6Q#mTng*2 z8Kc~f7P1(p#6!y`(dWm$WIstjvsh<*ZkoUcucpA13r}g!xq*Cmh{Uy$ve(f`@1Q`+ z#P_c6fpP~1^S#R;-n#-^{AQ7`>HHnoxX+VQXAD8h5Nlz@$yvO|=OL`hv4Vg0rkJ+H zo$G2gfM)1)ygB>@T)AIG0hb6KZfp=&9ap9{n;GQOkcJoTj^LFCzd9<u3gY(M%~CgP zGEEEGOA}RHaMQv_o?79KRc5a2yut`)OqVD54ZYbyx;IQy-4B0XgyZErP3p8vW3gip zeCs`5oOwV6>*7?f<d7C$RN4TeHumFBs`JHTlU%ta+MWxyOTF_Yjg)I>17FXT!@z-M zpmXGwu+!g0+!JBUHVdV1&f2YXclsR~-fa$wUtKw?!5S|vHKfh;x;VFY1RE;5^7OF- zsb|F%IPG-DF=F);3i|1Ys?whP<Mn6I+}a^z1=`^zX*ZQ+a+?ku?8{;87lpRta$q^J z7?PY-dCca6WcfT1qbfIx7P%uNM?^3`{+-UF<5l^;Z4vmub~GE_D5Ii;3@L-U8xnOt z39p~XK~wD>Fb;L&<1r)IY+gsT;rD6WP$Y}_--h5lv#oUgqyoK|x(9Z=*TtPz7jT%X z<n~!)gPl$%=yEf1*QRXj47n%v%eBROxhgd3up_LMW`Uv~kA({x^!P^66N(OY;1e~1 z;FP&Y@K^W?{(lm2;O#uR`cIyH>x}W4<_yfpHNplZYp4t@r`5B9@s9gBO6%c>S6j2V z%eiPyiwwpF>Pc6nnJoNA03Y&J#3oBG%vzy^;)XZye1|bQHhmIKE5sAeT0;rPufv$I zAv{9#rhC&Q#@WGfc%|9^OVqXsIwf;4ZTJjsD;`WXAABLEU;&nncR{!59;|o!v3Pi> z95z1><QGXAZ1Yf$RSP@B)N08|m*gna_6vh>ts2L47kS~fO#=V&>m)`CzN2`BD)#hd z#fVs1I^T>h9kCH!zgESb;g{)Le-r$**9BEZcEf&c!CY&pj5&q2ydpUq^}mE;bH;SO zdASFwdWY~hx!#<=#g=DAmR1+YM$*MaTcz9MFx;1A#b&iq?^UUiaz8tAL`N&F{kypO zt@S%$de>yO$j^h?n-;8j<s^N5*N-c%+mNEh6}UIbNOF&+<54|jSiP<hMyYkh%Y%I+ zkLXj7vHt+p+B31`YBSgbxzV;k^YD&`3A!nthC(wxjDz=dUCtiOH!Q$Wi-PD-Z5Wj0 z-V&F%Kco4r?i3RgOrJa_V@}RF8lJxwsNIXj^Ga+X+#}BM<$nExtKVKW!0TNHa#M{p zzio(TBk>hHdFIJ&4`p$dmMmUzsQ{;8{ZNRD!mK9(n@Rklnk(07tT<mxytojOS4oVq zF+U(G_r18^;Wii)II>=r9uABc&y`<7aZR}n1!-$iR>>d8-uuvTRo`utKT8!orCs31 z@5!*X!UlK!vFD_~{m{Cz86xg{ry4z13=Wdm@3Mn&ib)d;uU<qKRGfKMTBgL>w_~lW z6JpawDYwyd2fo&~3Wqd;c!N$NN1v}1w{BbIINEtR?7lFaeO2Rm;Tj9R=%#~~?yF(5 zPBzEebmf2XWAM~kSuQz{PV3LFBj5N;d^kXfePnv0ry@w*dNZ^dl@G(B=QwyRKMfwc zHTmCWk!GND<G&xlMx|NMeWe;7E{MbBC;P#=ClR<IWe$uH77DQvbH&l&7X7z=90E>) zx)E!|%D#fchN&gZjN`DO-W-iGwu4r@2#QO~91mCwz_yXCFu+5D_x=cneG=nE?N>Nh zzTGeWh<yVc)ebc3NL_VI=Wx8ZUIu2BnsK640{`5y4rU*mjt`VygOtqWH81+{^>Q^H z(Dx{WJh=;ArLk;$tREYHw4ie{dgHHk({aJIK>i@*?CKZxp=qs3s8{LAG8N}3slOVW z%p620-Kj(B(`d&@1@3)sB{;R8haT@`d2FuKF|jY8Nrfq>^8T|BnQ)4lm#JWs?o><^ zrr?W)P$=6nh~*Y6qoiSTz~^0e+L8SbzUF$t_fv-0w|^w~4t?lYIBO89u60DCqx(UW zdJ~C9JX!a+6;{q(DC`K3hqM*_*mAp*a9sO<V|Ca`Dyk0$y(T%FyFWrWR%FNh>XuRF z(UBOM9KdEJ-Pta{3v&y#v9+o<%BS_k{`+rGt=vg+zmv>yBh7GB!x#Fqu8s<JCt}2N zYcW23BIX1-;^M7+@kpgK!+B1_)jN?iLJtcSz15iRw!r-_@!T!z6UD@D5ZmhO;4vqP zIP?_E`PM*TW1}JQ#bPo(|4sO!5sj~+CgaH>XLgO&rIw0I5HY$pcE2e(TaqWSb)`41 za;Tt&pFb#MYq*q|dLew@{6_Tswuvgn+^4_8pTU$d1<?EYFkI6w7T-0_!KMCrWYnb} zH%mDjxn&XjO7*Ouqt+AF+SuUFSF6NTG3l6lvI^cwzFy~Z@1T$S5Z<6$07-}Di}n*t z`QaEH)+?NcGcO&asnZsq>-`)tOic#&Z$AzZi^lQzlOuVlt)8&v$6U0?OoJmj@pQaN zMdFQ4!hNT=IppmK<k651bEcu-vd)tF+%JS&gJ9Ns+9upNE8RWZGw{bgpkBRd$UHAs zEa<-p`c{>a(uvtP$KI9CD;$Iarc$2ZnH{$u=!qwnZvkb~G_3h_7yb-2!T!)9CS+AZ zexQ<+HNP*cips$F@ga03YAuYsRswgP3wXMC2lXg%l=?_#t1B1ivRrN=8(*pcmG0*F zH^g7;oOBD~6dd@qdlzwj=N$aE{+3vMYbuYAYp08$8eH)}3+<aC5%O)sxQNyEi~1YE z*l&^pb<sUgnJELczR|q9s-AisDx|Nw$Dn*?0D9<8rEk{5MH!dA(rvsqjn#Zw9Uq~_ z!f`9dXq#7*J~^9Z*G#}G^SklzydK!0m_-V96X<DTH4V}_2JOT9W1mUgSYe^m6+iWj zBA@F*uFQ6-Av+xSwkx01Fk)OThZ{@%uyw-*$A^gv`25faE;^)%?`K5vtS_P5H_RKy zdf9S(e|?7mOGCuUnqTC%yj7g?#*a_E(?f?Vi-n#Aqv4xoBq~*2r%TggIOLW+cb_nf zUoWemfHqycqA?x&rTgO@e`(KdRs;G0#^m)$1!EVxqvoba-s}|s(Q#Md!t)FvZ@ViW zU}s);t`__U=&<~MLom&1kzn_&i^L}1O0WKDqU=m}7Nps(d95$%d0wW09kQ_ROA?-t z{>+<wMscA0e2#l-i6^pr*z2B1Y6Epx(=HTeT=@V+1^yVcXC2(hoyMV;3^4Gp)MZ<& z!-;Ly?Czn%O}&~)?}{&u)9S<6dm$BTWWncgy0mEd6Vb$~gwEF8pv4MqyhmX+pA}Y! zql2dLvbG`oYW8`MpV*V{4)MXBeIAR)wcb<M-RF>GVaf^4v79d5BD~fG@O$SBG<st% z1{tVBSe-Q8jqt-SBd=21szfY1V~y%BNATdyPsIy^ep2svPbr7f1Dc=ga@aO58Sg(g z<y_H2^cxw1F+)2=gLgi-Aqo&TIMRkWz3IS(1(<(M@@q-+(ln=q6#V(1kR(3<4t?%{ zhTe~;_HqK&kN!2PXUAXKarXt({Ptk4$h|aAraKyLOyasLgT<)@tA$S!#^c;>2LzwV zODTHU1fkER5HKEO%5!pN3z<*H^Vug`M9;k2)R29FCe@F|4}OZM@VzUUHQs;)dMZ3H zdpy@`j6#bB7g!}RLBGeO@M6s^6#jKOZbcd4N%{P0%YFeo;@Vo!f7@PNa>)qaOKv&M zKBvI&)i#J4rHb2w+MyMsY<S`ox)S!C@|=>eRdWsfZ1unesn)psD)4yKf$SY^%@#ug zu)H7;&uPx%te0u{*L@m0D_Zet$zA&CcP8faP2t$*l7H**cUaZnL=W#Q1M8|zFgub{ zU7LA|GAbsp^6$xfMxzmQD!Rh<IXfvaLF(^je1OS=`v_IRax|<nnpZE@=9>qV(K>Jt z$zSLMjdCX_@7M;(D<i0Qf+y;FcGAONJ^8QdeR_4O3;No~L8#IaaQ)JQ_cXquK35iC z#)B<j+B_1i-$t;!bff?LLIZP@WbjYsB=8#(O_yu?V*0BT9<s<BI}FuGZjU<~=N+e{ z>m#wF>qU6)7Rh1DJ_&`^S4bVVJ6?R-Oiix>IOo|YIBl+l`%HJ!JH;O8xcdNTUe{tV zC<-6LUHCU|6u#M_jhCMmP^?KH?HT@<rf1cW*5v8*sd^CmN52+_?>kC6WIJGJ*lOzW zcmlikm%4-V3qY+5cy@F>*&N?bH?sQh@R|Mi!I9-O|Gp}|o4=Z>t^09!uTHu;cPV(w zYC+_rQ8X*NlXeMtg7RJgvW&an&%4hZFRL}e!uP?P_bnc)_gUf7xPCNzbtkPh49CBo z#njQ%19#L&{c6>}<bQ4!wJF-r@^^0>N0e#_1IBj8<@3Dpw{*{3QUv_<Yb!n9;mu=q znB)B&CVVVlE)Lf3jkBa&qua$~^x3Hg@0U!)q;fAo^*Dk5tQ1m8*MO+L-OzE>WS)Dr z2Q4T{<BvOfqwexn@SUy5w_}UI@b3^xNOZ%dN@uZHMuMT;NWverx~LxVodiu!9(`>Z zSA^<Ue?IdWYM-T|ZfYw{oG=EpqfB|&A!mH_$A@N3eh*s>H8?fPnPa?OR>>QhV-z{! z5It*-kY>#;d;5w*=q$mVS)zTK3nqSiN$1Y{Nck!sjJ_6xX<{$dF8m0xhI*KoQzHDZ zjKcCi&RlhJF_qjs1o7=*cuCqj?HVwY=QR7{j9yajO0kQepb#l0nq_jf(r-$<+|9A8 zV>LKkcECqfpFq2t040|e@M^zAw0u(Sc>RW!sI)SfYVUc{#fOT#bG|;_KQ)BfhDwgL z%5rF)wF>rW&5~4csu-9u7RLq7;W{5(E-j6d9wrj^O1&Gd_}l@?jq)7veKrbeq1+)C z$w$Jw!sSm&d~BjFZ=A4`>Q<#Yy53OW^(s<M?D{>2fg#6X>+?MDD<6dBM`MKUoj>5s z<vf_S_MFs>n1+M3$71c%0XW6j8lC?=64sYm(Ag)N_%8j9;Jl$Hs#`vyj$V_X?1r|) z#+AXu?|tw}{V@n?(&k=T_B5}`2Pd4Gk56ScLQ+={RZjOr)xfPZYwspHQlh~I7fT#_ ze-0#t-5R3B&1pRM!CaDg+eBABwNspa4CbDaM`fW&yyfDL)jrl3Xrjhrg2&?@Uqy+H zI*5)bHPg%IGQ9n81)Le9jfJC6iTU$%*zw3PaCk9+{#%^MH`WftCy|cWDz}Pyc2N+E zO~zsI$1zgYo&d;=_kG9^-*u+qmyjBmZ94?lj%x!S^_SpjxfZVN_oMzLIus+HM}6&d zamuZajs@2~gUPGGoRg5m>ci}zad?Y3rgaR&S{g&(Nh$BNFNhnW?~-@pN4T31$=_8D zQ^CYtbY-oyH}B|!JC+VddpA|=c0Ck58zr}A0>hf+wbkE>TOjOduuys@1<xPti7Ql9 zN$0l)zc)WjN1OwsdDV{Nr7ZGQL6)B!9Kf|}%3+hZ3p&hq!1A*bal`XLxVtzHc3tX) zaleM*+@o8dfoAc4->q<I@oWT-3)Hq$RoYD*a`dU}E;ibYmDmzxFko>hbXxo2ufQcB zbM++c+|osOvf>PFyl%*0BLu!RU^1*el`B@w-zf}MG!Wb8N}Oit4Vbf{7sxJlqS_aO zc$<YGI|j~VueR5sPhJyc{f?pimlQasQG>>G^W<UqrtBJ!Lq?I(`#I?gsfJ5l-S1xb z(q;<3I;DkqWg69YGG+ML!$%MpZOht5$v7eRo|s^;vih*JCkXBI!j>UJS(q2hmHYRT znu$D~4_-`){|9F;%3wXo$9Cqrfcnwbg)8j`zy=ra=Y{8}*V7F&E%XNItltDr9ynt6 zdKq4-x);U{+bn1nXK==~0CafX0&eMM7@znF6haiZZ@mJS!)V?OAs{WJQT63WteG=P zd|G;qJhsW8(pEJrwfDu<9giKRSx@6zbDQBrQwH6h+J|@EH=w#lTdE(-S0=rQA7ERo z3`X}U6YK``rc>6t;L#jy9B1b$9&aCkDfM?~!X$z7OMKb%W&=>7Ae5_oAw|h!awph{ z^)kMLa$*<;nalHkLltOpZ8mx8jpp--E|_x1hwBFl?3D5hS|T>mb)F^8c`=1&-tA7K zi$hVq&wtf6k9zUL;fC-i%m@ygoPjmXd#GL7(N=ek;ht^H;)V9h)G;~%@Ps^edMt<E z_T@0YIh1aF^}#<%3u$Gm#N<1wL%WLegu`_^g*Dp(aF}3;wQp1C&5AC#H*Oih%E^4% zRuJx;w;{uxy?OEvf2gU?pm$py)BR`LL_Q&l&Zi}xX_rgXN8<#2s3J04bVm@@G{NI1 zI$*xCoYb#w758NZVsd*uDLH=@*5^({lkt`GTdx4jboRq+=*cCv3_sMS;*T<83brxi zg;w>D(|;iBPB+IM&n8Iy-$vT4o`u8G#`4ffQVxZ5@qGO^xcF(FaAbBgwwII&19V=) z$saQ?KvcnX{!yHu(_hfjRsu_1iP5^|I?41X20Qn8*k#2;eCrX(+s~Lmdh;2f-QSz$ zXPl(KcXQbyDwb_u4P;gKnWzw9i5JZ#!4sHEazPp_v#u-D^_qcRRToJ;O_hSR&VvxJ zog%U(@HXFn^nRf!tdsaHL(W{IAqhSB*X{_M)ozS^Uu=WBpO;XsWfUK%yHCz@?@PHV z1(+dq)_txw)8hM!$a!-<*~vL_jj9HZ>$oax*%u7Xp+j)IXBMp;;D`y5tNdi_c#M#4 zo0Z$$_;OM#-*Ymj)02AQ%Y0o7wz1(WFH9ZF!z-ZiKYPl}=!!OJu{=Io4M&Qh_*bot zW|_ycC3NR=3W!a6mI#&BTH^P;zIZ>u91ZLLQOI6TX!7mOF2U!;V~ZXL&Z*v<@ZD7S z`#>4(fBMm1?NkYg-z5A_&A_RhYB+3@Jom0S0&A81gy4}!p`dR7<^|^oc>!<4Vf#no z!841`Zjk!Ad7=@Y>Imi5ab4N0E{SU%9G7mrDyX=vnaZ~}fM=tnv@3Mu{UM)()soi% zi&SatuNA^Zn>TPJIFWRI*zh7LX4&KuO=;<W;6|w?Ze7w9{Z@R3ZAC6(|CADHd|v<y zLmI@N$`zu*8%ORY{ce1J^Hv;LBoD7<dI{e)exl}$*}T467cecAXVK3JN`~}g`_p0Q zV3SM`<qEf^d7`pW6PW3DR`&>W=j%=?_<YW1nwU5ZZ{%%(<p(0UpgNHCE2m;pmMuU3 zzh}4eJy<^4TwOO&5#Oyy<n2x&=xNbL+ESM{V6l>8lWGFbU!o12OMP%%_eAc<tAjcB zEO~h^eLk(F4+~b=K!cGb|Jhst$r}#QI(K)Tm~>10@Vp<c{;0##x|WL<>Jxaav{xOW z{ef;S9>B7PkAa@<cS@gA3Ge3k^AJrtUOwrP7$Mz*`@9IiD}@T&_Gv!fZ5v57MO!Il zvjK-)PU5m4$$cSpqP(Zt;+iIXP+6Cb6&3R0?kA_9zI_ZY*s9AvI-f}{35kFE-(u0A z)EZXH-gW4<Rfn&4RpB2FQEccpMM&Eli+amvQ`sg3)^I-~NaCBSFw36Y%VYqjPqGn$ zO$VadyXzq1JBJisEOTH}SsXE6kxqdfD4iUK4x@exB>__Is(w7Hv-CbYa0>pe{3~2e zzAqm9;lfWw+!on7j7BbB0T*Q@-o%3!p#LKtTiYKBiv7}|G~EkF$*90qx1ChkXE>(q z4C0A@HVeH+Fbz-Bfu*M&Kvk&+*Dacg;|6aL6dfG-@{UJvbjfNk+@dC0WH*cVS^~xX z)&nss=YpUjZ^0U$rJnlkzSzAqAAFR0(Tt(SXmP(ptQatiO*F@d<x>w=pBq|7%3kt( z{j`3y&Q1f28goRvAH5Amwf5uT@7GZEn+!7luETYWJ#f8xH|VTwqHlE@V7ALd{xVLR zDtnbeTg6Av{Ok|6lA49BhB5qZ|2;UiOHR~rd<uSZhEQ0}PKa+<D!x&zqjW1TX#aB? zM%kPZ_xImWy*mIgBBDF4k?y4DjYBvm!~}&u85kiqN1XaFp1)g)F#e5?kZmG56t3MV z{#e%oHJna^@f&r1ZlumLhiGA^_Y1h49E1IC1n`1yVQ6VqL0bCR&|I+rW)2(0#)D7L z`_d6){L_Y=VwTZ}W_2#SqR7*_OoqNsMRJ%upLZNlrPi*>LTKe=-X>&0fzdwd-KQU~ z+;o6s18S<T)Gej|S|o>^st%XME~8$Pv#5LS4x*i7$aw61$>FEOq4_bw<o&@IekF@X zQW|?7vZIVo#+bS<;LP3cZZzd<AKV}>&_AhvYP>2L9}jpAIhX_a)syjNoCoedq>B0b zN3+^`OYU}iAX082wN_lFzztVu-|Y=l|H#wvUiM6Wv_%ms*XD!LwstDnJeViE?~Whx zKG5PXX&AmtAJ124!J-&@ta*8tZq%*@rCJxxzqttZ?%4)bF{Q%Lg8QI4^BxS_sLN#| zF9_Q*AJM)2heWGycA|!iJ{Q#jH(e{B`})tRt}_yBTjrvB!7jns)&>_YkoY^sU3j7L z2eObkMpJ8)D0#+2e05vLvFckUu3u6Q1BPx9hM&}CNeTddqjhnP#F>n^od&XzJE4z? z)ECk0i#M<O!SO#Mc>8-9iK&%}zmg7Bf0{Ib7o67+mU>3ftk#9}@X|zR*pbcEf45eh z%DhD<wyc5$jvHy~`Ua|;o{GH!%7w!QG1RiWKORxC=FiWJIKGD>3yVLAliyn4!Ix8U z@5Xd2>-z@A&rB7H7s}zcLwYbLB?;3F0iRA@EDj7tOwB(H%HLDj#4er2>0cEtI?dre zZDX*vd^8?-EOq6lI`Gvm={$9L81MZld3&zfi-lKPqz6PHb=#r?Z%)qOQ?iASGV3Gl z2uVYi3}XyFRSq?4g4tn^3|Q~)&V8mQ2_E|b93tHEX;y49EeMpnB*#9Ha!9dQ{zl?a z2M3`xZJ^(Fh*hnD++)LU`e-~CmA=WKk?u71(6!){iK^s2HkuF3QpCXfrV{5NgXZf+ z;LvzS8oET6m&A|6OZMYv_qrQ2Xs|y|kz75!Zoh*e`9G=4{cc#?|1o*AJpt3q<>Jfx z8MrntP2#h;;q%kW;B98P@NR!^Zf~fj*IQ1D^CnC8jdA&qWIIH>e@e>RtzSovZP!6T zUNv+-Avr(Sj1=y@Z=(#{>ahK>E~jpLMcX5Ph-u>i@Z|%_sx1ZI?33_h;UL(ikVc+r zs_5GFy0EYzPVBrtnYT7?lWtQoY)q5H>bY}ao6%(O-@b)5ne@Wj(jGvsaRv^a*hsSq z_CfbPv8XXt;JgVHf=RVGC-!?yi)YQm!o(o-O|!r*w2~gNCXc&635`2%Rk!4u!97uf z=V$kmm_{}{ZEQ6>RaW2<O*MY!v|gCmEg1_=_J@PVCqho50XC@TQStT<aQM9v94eIo zLE@>k{gdNcxiPeFur2yzg>zi2Ij1H1<KJXu&^)x7w3O_4o4f@sTebmYulrGX+CV;c zy+Q1|`X4Dc|AZcjdE$=o7pXvbt(YF~&%f?C2&K(Z_cQVr(HA*<^n5(M-*TPI)|-Ig zJWt{F^l13KZLDBxeNDI=dk)&AU18DK7nBv4#;*r&p#$qZu^`aJaZP|M_lS{v!jh{& zw?+rwNlb}L)0aYVY6r!9v*JuQbAI!yFI^dPNNh~_N!Na6(=0Dnj(hnQRO4jmtX~^c z&de8@uiD~q!!}Bfm0X@vJ@|CA)S~O|i3|4^Q)omM-@dUAnlH=qtiRItw9H*%kfdVc zuA^WamM89#{DqKWibe*CyuwbpcS)YK)pkE9bJ8K0x$KHqVCId#T09_swZz%69l$q} zUeUIJ7sxwuC+&&*e-0@My=CsvLvb`7{T#`?i-S=0s+o|p!xUqjhLiQp@iZ`{JHB1^ z5W>Dr#nFj|RJC4-!v=;!pBEdbOQXaPmiR6o@<!7n^`5wM*8sj)-%<TMvWjx#@50?n zpJAV#1~2s2=Rq-%{PopA(E0wDPF+gDeIs0X-hUaW_3{Kw%aAfaj^i-v$A9oRG!#d5 z>md2-YMfK*jwJ<+qRKlxesJ%d&=RXoiXWuEhm4P+u4yy4_Sr?&ou#1gcMJU;Ka9<H z>2tS+nNTyd84@?XBsH-cruW!QH)Y34ZrtANymJq|9`;T&n&yhPUqzq|?jTR|buhTL zfgTN!+~;7)Gji?Vd7Uq8_?ZVWUM--ray)uCh6%!vrI5S1fud?m$am-f(73Bgn+qmm zom&oBERuXP=1J6&Uk+#WCZgNCIXM50JZ28JM`Mcws$3GmC5jU;)umC)I3A91PL7=X z<1WnYxr?+;e1m&;(edp^b-eH<imbN%gbu@ASU0o`($~&`n|*z-%BYkUoHF5X?cum4 zRE-U#Y|__1(tNGr2S&M3_;7Iw%UC}X7m1d9Q|SsFn6ZaSn)G>=)TixNYk)tqZ$M(1 zEs9QCs7IYCuAY_2bADBl@5KJR@7{cV9=j1T3-?0F{i*n+=>XMOWaHn;L70;uL(aA9 zz)?-wuU(X-;)O+Gc<V{gdqXX4lYIf%5{G25_j$-(v4h@3Tk<Z~@qF`kAJls7jf--+ zaqHkzoay@lI<5slv}+`%%+A2Jn}I@0zi;%ox00~4*Bd%voQ<3OXH*Y9=>`8~mB8sE z$*3Ve3g7ugf<?i8@p|bx@r<{WS-HJ}*8i}D5#x5yb^SGtvv2@Mt|0iX7J@!T+hK=U zGdab-b?n-DpUjsnraKc=dF`VpTw*nVb-Q1N;J>EWy~_kxV>KD~x1WPie}Na7MBv1Q zQ@G!T!zBOi7nLU+2ko(PXybdGF8&JTDPe)EW0a12gS@Io{xj!0MhRrvt2>+AnvXo) z9(~`(bH#yWV&#`W^itgh8xqgZvi;Nf#sFV5-dhc{a|YhFor0SR3+d6dsqk^BIe&H; z$S13=K>fy%;H>V3wi1)5Q^S}K-8N;LOVOB95zXoDVO;&n1`a3<g4J(Lxns#u==hUI zYxmB9T_4r?*sk-SqN{=7yUp;w2frNu)A#0<T}AZ!$Uuyn*$ZsvreJ)V3#Yva!LV+# zC7#_H;e^s<NHW_CzXyDW%Pj+`*Rw&ip~jgN6&?ybzAgcS3_rH%;>Sa`4`a(q3b-(~ zCx6rLgdPU6bk|}oHZJK-DviOsCt(^IUEVFaqc{CrF6G;0BX}Wcfl<9S9GMyeL4&I3 z`g1G3-P=cU&IGakh%>@woxc3??Ra`2@d*ZOD<toh6gti`Fr`<AFyP`Eu&SH~+1g>a zT^xsg#%Jlsd=>UBOF_joQtozo0zU2X40hg8hbE&WXe(33k^|$>#c~<Fnm?D%9*O6b z!`BO*<7BAlgf_QjE(g=Yvck`7b)0V>N|#~`;BTN4@0L1-&mR8=x69R0Fc`oI#~;Gv zSH8FsByYj#{lb?BWsb^-VTH@>j&@leLHD{RURKj!`=<-=SkOBP?EC@&Q)4+Oww1i+ zT!YzBKWMGC1x~3egn79-ynXT{?AV}*7eAh*=`ot(%A~(lyOht7jmtn(lxFs^`JO!a zt@PGfZ_iFzW^~A?Qn(wX&Fxc<L&}9bSn~Y~Tquw<Gg2PX?4dVZwWt7X>6?0(PeYKF zCs@Tv{MDd|I7IO@sVGa|?Q8ATHIheQp<$M|?UOdh3_J_#0xHFX$?xgQv~MtV?E!JY z@~L>)2YHI`Yx<HqSL&RoqmJ!39yI<Vy?&g4r|yMv`sEZUyC*q`JaxEB&{U2yGr|i! zr_-_jM)J=NTbyFx%LNM~_}B=7n^GPjXxI<OID=-Y%a34{HfN4b)pL9+?VFa@NiN|T z4Rq}GEumcd8HJuqA&;rOXu`O3e7eGfr$k4vexo6qYp9?!K8xML1Mu3XFb@6c#DNDJ zpuM{qDlRkSm5GC;e6Dokl~`9p=5K@#=CRoQPL;T-+a1`fF7>+_>?q2>oBbaa3Sw?L zxIO8E`w9^a$4O3U<6d08U7x#_6~UE#lPJG_o_Jd`3XkOtN7*s@c=hZrQ0Tgs0yOi$ zd1e^Tn;67Jzb^<ruLQ30xeRW;$-F}45~ME8L>mEdt<@4?M5P%Y7$<{n%j#jc_XfK9 zx`+y6tueV_IcfLQ;iYq$X^>_#FY`MMO*iB4{YDF{IH-=1>gHH{vKO5+EP<CVKhc)r zuOg>fJC02pg*V<0W|hC5-2S6g2rHSu#TpfKsOSSUmn^BOi9c7}_ge}!8N{&pokile zoxyDGw2Ut9FvH@B5$x$P4x@9ugcdnJZt~Z}Me@rXk48SDd0rE!uZb=e&#NPqHzUa) zZ7b~@_eK0vT1ZVZZRpziebjxQ7k^Mo=a02LVf1Y~E=e=QM6>Y_;F-wxYMQ~(Vib=3 z5XGy1ouGlM3}ENwk7D8}1=_vu87-Xj9s+CQF)X|f?wpX!%7-Si)!j|h>((Xsa><h% zZw|q(Cy?c&{qD4L<KS+eo0PRIn0gg119ul$DvC_tQzHg*!|i2o%B_Un7ti1aOG9|! z&b6TVwgE0FCc^aVuN^J*{5i{Y9(Pk$B$dRTxc8{U_HZ!3VNSVl!7q}8euJ=UR2B|3 z5U^!nIM<a{QEGb|T^!pD&u&j}tQ_je-k*#Gt3`>>ymKn9R|({;Ef1;7X&sy=<IDH^ zJM;O>LA=|~MZEdzCOpe(2Hi8-l5fF+gFKGV<8K4F?58p-te%F3LvjSu&F3VCp)r<z z?}4K;lkx1wC*sPPT5LXgDUCklgezt_(afdNt>l^yhCV$>su6meIdd{ie5iodYZhR7 zvb4j!^OH`?J`~30O`?8JfHwwI!QrR=93^?Adn(3~nSQRrs2^WwnACe1^yoWn&NIZ} z8j)yOI}%T?u*J#0_fn)<4MkscBbC=bg~+B+JW%H{`54qu(=Qu754zan$slg5Ny5}I zx{z_mfC`T;B*ilfxy#Q};YLj~w;c_=PEDhaCi#%P$BbhZ8nF;H0_qf`9o?BCXqn{C zTbCNMRbp?Pc+?v}dx7K*)aNekeYvN36MPOqIM%<IV)nnGW-T?C^V$+VjB{YTu?zxr z0Sl6Eh=007;Y70&(CdCU2z#GURqHTl>Tu&w<r{EXHU{ImO=R=-y>KvlAkKRV_|v&N zT5XEsi6#<<*u+fg2AV_XokuiuxixB<_P|%$YQUP5`R$#Ncy_uu9&NZ)MPF6$f>$!1 z8?DA0a)z;H?O8|b&0pcy0f~#9e;%B=l~nzDuf(luzd+x`mE=-5ff{$o!=csEy>vo1 z-n_II|E%xEMMugYa&UrhP1<EW@9%`>J0dYxT?edh?SkwyCoU?}=9&3E{A$%`d^1Ro zuVwY;bwS-Qpkxxe{*R*b@CWMs;<#icqihnPXdt5~?l~EW6h)#)NGgdWL?SBfy@%3X zS}F?noHR6O?{7;HX=(5DdwzdGuek1go^w8*_j}+J7`bUJ8LWFrM*_{{l1`zlBzhTN zXOHH8C-iva$tav}?2D_+Zqkw`si;yOLEWn~S!KpVJ{2sOXWiaNna6CgqL(>;c2dA- zv2!R;)8-2<gZV|LaylB6!UM)$mw){b-P@6&G;Du~a9|F?8;}LyYKM(2uc(uQ3VZzw zgNhZmVBHPjh#aZR%N;e*f0_oz+?#*~GZj&(){v|7QrYn5WZXJe8!iMtgj&C;a@LXZ z%4D%uw~Ww6pHDt8qHHWT=;q*@x~UvmcTTz_or57g0@$I;RK9zD1YgwdhW8>8F;}^U z@{{B7bC8u>du<HspMMC;VuTyaRGGBSS#o@<7b%>pgZB@uv1r)?sQ4F**-b6faMMq& z4jCb$U&WO{qO0U_s6D%{pD6t-=uXOPCtW<F%6mQHc*taZ{J1O-r_ZRD2i|k%wAbx< zOjscAw%5bnr9=7I&nmiOV93o^wRytLvvfnbEe<bBr)CQ~wCOfn{-S+|oC0D{<8&Y@ z4^6@sT4UghX`*y-Tz7VB>VsM>9&-7uNZfHY9;*6^KApMP^)Bnfe<w<G%y0xxcgvUU z$^zKj?-EU~tdW`rWbyrh$6&!GZ#?|jgAe59;^{xP;Pw|s-l8;to=saudx{3aklamV z=`xN@628)+E~fAz=oHlsF~E7IndtNS5TqUsgoFE==<Atjyz_t_`^d*(>zjkHxy73_ zjlO`k%SL$FElO~_v+-5Vcq~6S9tUsC;526wIcb+U9`Z@zEr&g^&bWviw+pt_Lq~3j zyFpel+h}p%G_rK?<o9DA!pa-1bp5(3c5>9gY39?pBIpfxy*yE|vA6Jpj9CXKdns0a zyFZLi?e*u=5z*Lm`8_?|W{C7I9v?(Z=76ca(YHqjxyxz;-f>z3M^8`Uj1#@NUcEcU zjNT#n-g{fw>rEW_8YlCbfA8cK--wQV_vTx{iQFXmH@}m@ICfkgzOEC=1BW+5!`pE@ zsyPg<Zwm4#TO<5>x`LN~H;Cf~%tCL?X8<4i;%lyurngN(%l^JFY)m@7Ua-oeQ8@`W znj11sor$||8qkWS>$GcZvt(kX02iF6@RS8J`RJPtG_avWI#q5<m-A<O99QVaKNBnH zZ1W7b;S<T9M2?|oKqOC8O5pjQAAn<vA#bWO!Tv+fiA+^9T`!-=XB@k5b7dt=cj$vk zRc-L@+X;dvsDsb?Tj87um6d+ZrI4l<CM##A;UIBt3+Q)Up6RKHbz-vo=8QFdeBBqf zo$kVqO^(Tig^@fx>M%w7eS!1qXW+nOu}^&;!Wnww@M8IC`TOosZ2IO2#4Q`ePZJGU zA-4lv-YU+aVwRcO_5-|b)Ix*XJLPtpevpPjFEN9S<MS#b@xiTRD${k~=T<uWU-3|M zop#VYdQE3sFg!@`EXCPdGm6*t%4Cgv(RV56$JM_D_pW(9BpUf+X7eyq2v_9#d!xD6 zliu8L&>ZL4-T~u;HhgW?c;0YOrWQZI_U4Bu>&rb*TiBEBJ(W1tv>Il{J)l--g~*j7 zL}ps_8jS{F`W#iXE_+T#>xbi|t;+OwlsUGZ-b;qFKhZw>MtV3slP|n~?y=TAkkhOU zc-z2WI9~4sE#7+Qmw%Y9SGLF2&WMJ_tKjn9j(lh5JF45fU0!g0Dpb9jgs<gwu%v1! zWfW~8v*J`7YMsTe*LJ{tA3NifE^%^<>nsd7Asi&f#ojC{g8TP~;TPK&Y?FuM`pBEK zb*LL2Ox;1ZdKOXWiPs)))muopeiqF38^~HOWAUxp8Cd0<#`7PBveq~s`N#Tb^bef_ zDSGMnN;n*TjJ!!ZGk%kQrw5=}HWGR+x5596_e1xDGP;%?$D^aHako)U#l7psRBvo9 z9n}e@y(eQ3&Um9~V0$Xw;Dep#RS7@bOE|GWRchyW5`vripvQfQH-yZR2SNz1ywZ`= zb;D5AwGST%&#jnMw}iB<PC@t4)|~#i4PV`LmikU`=FYXItgbW{!osYu-#Baj`lnnv zXX1k~r-S+Sy!BM|%!3Q{bs;foFRjW|;Pk*>FwyzE)HL$D>=3mccJ~{~4JTV6Ehzxs z?G3~Q*=IcJvT~54<1whr6Gyjc%ZduG=tcGjoYcmXcQ4Dqwv*jp)nkUF(YIia<ydw* z*dHp^_5!taf*(xX`9Qmg=={MRUABYB`Kw~t5nZnNHi#}*zJd7tqBoxu&wKuxh2_!j z<U(m49II)ip{u^rEpbjWD7_0eW^D2}uzVYo2EK&TP4=w2Foz%95p1E||7fbgZyMJ= zmHR{}q5JmUc(tLFUi>@{ioI2F;Z-+$`q+?Dt4&yOL>O=Ae-pOA`${8UWX-M1=|-I{ zo!=csWMu<W_0#3eznj7JZUO|%J_qC1o3MA6LomWzaB;7sqw2+4*tAXw&u$uoHruM? zc#$bsq>+u++I^8`Up!BtFcLk)cm1}w=Z?_-LBSWb@mX^oe1aQf;gpO%%hEZc^E)X- zzYI=0*`Po~3O7wU*i<M%^tAbOSha=}cdaD1^Z|VKs|IzwIR>j8+*xOt6^s*JwwNR5 zXy7g26=TNZg(hWM@UaIMtDl9F<b)w&MpgCQ7cMMNBfGr5SQf8LZzmmqvyK+{+j<rT zt=<fSg|o~vXFLzoUk1}ArErN-C>HEh#;f~Hv3{RDHlFNCxqgBP6FC`n1Vr)iok3vx zVH7NAtfkrqHoW=323buhl4n)zhL%#n_?TiWJN@(G0N+30tiD6aJG6{~cZhA8k`27N zY!7{Q+F|k5Z0`E+18w^3&JQQ1@VnWAxjFf&T;ib4`N@I<yJjzZQ<#pYHidFKRaIJf zw;wLec}G1YqT)X*sKUQ1tk6`W+M#vey90S`RwV8^t%jPVo%w^{f&~q$l^cD``LTBk zeH{IjbcU^#`+eLeQ#%8xP^~9we+#BhmvivZqbX4BzLVmeoG{FMsWfHtR1R5H1KFpi z!p}W%IAnqgZcFQryH@MKoq3bN)pIxw+?mNuk|n(IIS)F@q0(F3Y*fk{#38Z?z7;vn zJ!uw#Ih2e01Ydn;$YOaE_T>dH7s1pmdZ3s#7?Y1G@}?|9{FWk7c~d6a#e4DhsFAqd zU=-hZF_ouXeFFXc+p^c^_i%4>6zT2g&hPGgl4cBHxR=$FH&@3qw|idEO)&UucPZe@ z^%7?~s_>?bXQ*em1EvOyLA5=f>C4V`e57qIUfPkw_3;nj=oPcdy0SbPQ+7mt{4W(9 zZH>7196QuJDe=FcRCe+-ry;NH$)G3&6_t&!^rSC%D=wf7ho8VGZNVQb8i3=|L|5?X z09G?kXY)0-yfng0FdEFTBG6Ad>AZ(7^<G2s0|(KdPjL|Vp_$g+G{VzIG~tkYUBxC3 zeK_dY8B1)m_(OIa9SavT5U+itxp)XW^zX{Ksbkr1hZ0v-ukjeMs+i8~`UGxs%<)Lc zEZiTW$D>Xb%XNnW@$s(C+{fsRw5zMFw53esrk8|*|A#y{**To&1RB7Jqi(ois6KmV zh2iS?PFQHH1e?{n;?wq{gqv<G_RLAg)e92j9K9f(R5uWxE)L=Me+{u_<bLYpmIr!T zdbnYMDQ3THp<N-f;9yX998fOP#CFxv?d_iEYU>RrUPkh<@HTjOnFbri2IA!{W%4+? zRZuzavYa-{o~wNZ@RG@E0rk?w9m=1>{(A$<`bBYmP6ChJrHuV9_JsGF;(1kJ2KydK zLFw&KUVlx(yBbEAalIQiJRQ#yU>qg(SAxGfwpjRKH{4Fn!LSi}=wxBdJ~M>-ZMPn| zyog{s!^f2g>%}+T!dPC=@rY#8&<&dgj)KX{g21iQBD!Cbfu&dc_+z^8Cc3o6DUKTS zLeYz2SKNhH=XF7Oek5ksIAVm9#@1b&u-C7-G-*~84~<_9zx-`@dh0MgD!6*nJxy6D z|Fip(h+0ri{tK608sO$*qfvE?B4jF$hGL48-)&IBmW|dt`FtXlB(s$KPH+ydrE+LW zIG^rfiZkXUQqM;XpdIZ1?-oUHpT=~$aYlhYY)psb@9x}np(&1;+aP~TEu-?XZ0uPl zT!~iN@@RL#!CZTSF1>TWh|X8!{2>}xGW{yG+?@zIS-)XVzfruWyh>8Mk$}_Vf5CsT zp)@_Afizd?AZ2cmQq~OSX+z(T(_<~Jy>P?h?4lX;x6}d`e^lWH!_V@ta|9XO02!K3 z$bFqIru&=Vo^EdZx)dR6=Q`>c5zd}EAr!dD5B?^VfDLP4?0bD2Y%e&cv%kZDgCD@| zZx9c-rOtz<n(&Y*%1~eu%_Bxm!1-a;WR#G^_fiD6XV?q4W`Bd!x_YDCmgg|P^Eh@M zI}-CkGdU~86BMH=<g_be;L!C6G(krRRh4hih`#A;yD=Z;Rfr6!#|v6$xLmsYD3Hu^ zl%+@7YGh)e&KjHbpyt0eXf?_P+q54l*l+IGB~xT`M?9f3A*sC6DF~Nd6keo%AL-Yx zAUJ(jbg{FFY5tmMKD4+BmVV2VdcHMc%RnVo^Is;PGAx2?YC{C0?2UY{O*Q!b{tsMR zZc971@1VFJCOrD%ajJ~=;k+}iAzSq8k6C=7=xV{KsP)GyqWe%X#{{oU`a@$2)VL)$ zo0KiI@a9?_Zr<My>plW58`gnWU618}Xf?_OfB|AhddTJ_eE~=8c_BgUh2yzP`(1D` z(I1C2TX31X8vB1Kt{l*kie}&2O1`o0Xyxbu*s=Fd>ZYxN4Fwwf%dCn<-;Lq8{9yJ! z*A{P03E?-|&tc=nTeNWT4DK4awep`}w@)k4;YMS3^iaA%^)4l}(EBXe_qLM1|I6lo z9Xj!l;Fq+(^DO1{YK2C@=>3{H2p%WdOO4-CIjpw>p8Xoku6=bd=h#4OP<Emu!L4td z9!=wN?^k-CjmHWVJ=nEEOA1Zw%0_$>sutfMpW?-mQLPfiPTB<fXGidpS;{C|+TyQ8 z|0*7ztB186B+M!C0yFIu@`St%FwpNAY!W?&)0-OMZ})JRqNczjuVvuc8-5sJHkX!! zMR0cI8n|rs3GTLAKrO95%6CWhmP2+L@?=QmBQ<U~F-)1Gzs{9Q2f1;QdN=qM`3G9^ zd`aiPTW~e)1rt0Gi<GuV!6h2PccH@d5o%m~Y%Knr>y7u<8DaPFF*wC-8+4qy5SEX= zAa9BLE>HiViGhPUiuZ~&W?Xi_r)Y!2Z44pt`89fTYYi-pwdP@sN!(4$kuFmw+})s0 zI|REXWmlO1j68ud!T6}weMkdECoQ7HNpuEZ!9DdpJWer*wfaX%f0l*t$530m?e<Ob z%1Z>dyXJBmcj1xo68o+j(WoBq7XBqWaAWa8Ful+bq~5~>kGZ7seZNW=KOkLREjk|e zz8!+s?JvlWefC4t$UfqC)sCymZqVorVch!o0Ze^U4$hy2BfQyxU9@u8x`!7Zd!)&0 zur0gki_HDnQS@ihTxn>PI(pw-L}T1Lqm$T+OtN#wo=GM=FV!D=9`na@Gk(kZQU<Dr ze51{-`=MFNquP#1*w#6jSN?fLiUmIG8g7BtwyNMK{o&ZgY6gd1xgaliKbX2_Thgm6 zJzgBIi$5y_<7C8i9G`lJTmu#5t;wF$(Yim+`|S=_JN<zks~pfPN{5zzOy>2=kI>QK ziy+7}ox7QcS&8*5{wUl()|S)wg;o%6`?nvq_^09BE>q;jDaKSg@F>}-9FWfJ4&y|f zUg*;P8yQ^v=YF+D%pghh+8-DSzRF+9>h=RZcMHbB5BlIM?#_<&71aM(9A1;G;cwO< zXh<52@vgD3%|Dzo-)m#ypSv_AVUb)iK^x0r4p50*y8D5OK3K0c1Y2MFa_Pq^=yk>l zpEm+K8Xm12D869}n}=fTfeX|n_BxbUuY~M=qajBzM&vX1k*{G_?6G4LEkEnRR?Fr~ zF@fXZvf$)9IR8i8BF(7l!6#7jH3F0UjByyb@$I!sq>=6VVd9J#?2~v^R=t(Y0rrBa zG&F$sjf~=iF&21Y^bk2%QxUD+cE<^6SK!;XZv5nLIBoxT6xKWxbHX;MxU|P&_;Kto zUFu#AP1*~f_K2#KvbKsA)lOxjIN_YFjlhB17r{TTvy^>rCwUbwmOuQPM2AHl&O^sf zj+~>)T$;)M&M9Ky84tdCF^i{Lb-=!Q;e6z~$hAMvr9Bg4v2E53Is0J*d8!WQiK{Cp zFfkH`DEi>i`)ar|=wzki+EniTC4i$Y+?MAzkA(eej?jCR1}N{DjZ<5bVCxzmDvs&H zp<<suOJ@{)zQ0voJ4G4u&uxM(;ohk38^PMU#6GM^fy-t-f>W;*u!olms;z8;PAe7g z-)X^-xZ0IBREquV>c0G+O9oGVx`Ix>6&=)mlc4<QWiayZh)wG+NWR1UIpsk=?q8n7 z9rZLZ|6Lea73ZRV&Li5Kr^&tI&&ataQ(=GhbetgWPY3-><)_cWdGfbt;fb{8*G6R? zV=T^+#kpH@2_FKZSN$pno~a_2oNNvXv7<^z!FtC!SkkEz1{%j>%B?pZ`)f?m>hUZO zoti2teI3hb*S<h-t|9JP^q5+nIZ^l87&6sbPe0W9bGc~&=@p-YwwKItw0x2Rb_DYB zB11eiu{(wr&49aZHkk0u3#OWGsPy@#g%6&-CY!ahVE!R}TKnTI#I>D43&h;k@xfWi zHs1<N46^vKaVXx|{f=4{+%d>!7XQj^pwdoR6d2bP*Nm=#xT{l9C)bkI5=^;s+d`>6 z>KeG*x1wX+9Z^}Q2{sfY(zCYL$aA2CdyUq>H*Kb6iuY;puRgR+S(m4FD1nW2P2{pC zSUgV>_UY$Ik9T?Ds0V)d^P)RWh?nK0D}C{AR4T_m?!d|hBD1^u4|KgU0=zodB9z=0 zzVc8m5dMdd{1udNM;-r_PR7nt4#OXHeI9i}n|&Vl$M~3FwA|24V;?*6zfObXBWd2a zf5>4_t!t0YKKtdYMo)~s`XAi6=q3MZA1iNE`U>j|cX~|OJ{E&@e-cRz5PrEg8mxLI z4-rh|t(&{BVcKEIHtjOBEGPyq)$ycmn~I5|a6Bw~7Op)MjqQ)tO2xPKLGP#5+<Yb& zRYhL2-vU?6IqS|F|NMsY>CTwmQ5*Hu=Thn7Xg&~=jQyHdlkO*x#R?nC?q`bO?7(Rl zEY7V7OY0@a@|zU&F`06ggkW`aCO#{Q$CljbxOLGD*gDJ|>QeGN;uUJ7tjsFd^{NZJ z%$*IFa5xml_My3t_sYFXdh?rA>a3ELPIoo+sC!Qvo@zdc?dmM)P2yCv57Fh{S1j<~ zv~bSR>j_F3%iw5t8~U|y2PqG1&vlIh@Jjm>UN>|&rz>{g)jd4%prOBfS|bg!M|7j{ z9*SsWV2Y;4Oxe3=7~hPZLwlyj@crF_VK?<KSU{$52Mxsz_xrGE@nKl$rNO7gJE8e* zFWyr!5`7L`l|PPm#j;>`eAD?heCZa7r?uR1eA|Vxa+xG$s%c}tQ+YIhZY;0g`heDr zG?J`0U6DJ(G+f+R3Jt#srF7?GFmjcr@V8!-^C}+D<8iCS{o0-u?lWSelb!j$(=)Js z&}8X;V3~Atf~(}-Zx9Yv+fp$n@i%QURFPbcOn}MWzy>c>&?!3!0}r%H-}_qNybKTi zwcdr*#k}>)lvJJ-`$$e5X-u&TTPmOZI42c2w&xjF#sABE11(f&kFIY^N%Q<3xk$-f zcp>aK>Rc{v`0xv6wp|aaZ%h+R%_eE_UTs#;)4|gEa8xi*peI(H@aeu(Tytdzmskzt zxE3e+@n$H8dkv=pIjcccbYy<JzJbdPNu>WulmGPp4{rBN!<q9xxc}&34n2bIka4{k zOe>wv#ric={c<GwdIqzhOM`6i@H*W{I|zGs9)xte2t0oLs$`|>MKjK6qw?ZZ>~t~# z=lN#v%3Zr;r{!+ws--Jw+H|78Qothxt<Yi3FZZ4XA`hIC%a%Jf!gJ+=RD899o*a&) zZkaEn^FtrWLp5GXS6%{N2#=GmrQHGj`3ZRb`#Lz@5X;A|Pk|)2nOri_lQ$-}=RLxc zblNPC=S*vWp|x+p;f*BOE-<Mq${r6HO0hU<q$eHLPr~>>2OigdG=HfZ!(CI0D53Xf zY5A)OwD9)<SkZeEt@roFhvU3BL`e-p3xCql8L@D>_j4L|N)d-HKLOAEJL7l74dmPL zF;p$Gzy~k#AoBY=>A2n`w7XjjpMI%J>TRD$JBGDLO@WF$slGox(wR%m3c>jJssZj7 zOwFB{FKAR$0%|OLOTIe=Yvr3ZE?2lN#hiDQ3XaV}Rik$FDB7Prml$!wVBxpAQ$QNU z3Y?+iz$erj;oh5pX#eAs>}Og^w9J833qHcI3gN^Y=FeKI7sCQWXEyh=;=3l<+&tff zyoY<>?6<;C+CB|?Xl8L;m^TjFox+jd%{)empT#>BGt_#27_7pRJO*qRz5R^|n5CJF zh1->}z))K}TLUovaCdZnJ%Bteh2en%nOt8K!O{IDpu9kx4@~+2e>Qq@;G$@c!0zJS zKGPB&Od0?)9-`<ly@s$~et7g&82R11Lf1-rabQC<zU!Wi*S8%7T+ow)ZAW8l*$j+0 zE;<rVMK;#uH++zN1mnsJKgr|R?$T!uX-B%0=b_7?4+I0$D4fr}zD8K;4TTpi*u6~! zxSPb`ob0odkUNI1JlRXr(t@!(Zk;4cXGqP;loy&-O8V(K__^0Qxn%ZY`dl@H+nk=s zPCc#RiRjRUj}UjsiKA%Q_X;X+1-#x@crYTfFmzfWoD^BK=?9NMzy%%L**A#Cr#F(> zozK$hp(o_0F&#Man-d&6Y>jT_MO4GTDC=S%s>j@>ydBEe^=mgyn-|aVZ;!)Qan7BQ z>B@(;By+rNAR6BKDjR)sBc~7b^3ty-V8+1FxG>`i*r#f-t>ANiyRQM3V_k8nk1Ebx zX~d(~y@AH6UYvYJFz4DQp=WA7tT+~k?b;X1(H+y-GxjJ&t@ubc%x{p&j2E=eTcQ=I z4VBeNc6g%juIx2b<e5_X@%8hr*qdfj{`@I?EyIOg)r-u1btui*Y=-OhTF|&aFW#7~ zjBqj=^7szL4j#e#91YN`a5}~ucYx-9OX1DR0o=*{2zZ(L@u_i{F#W&~woacwR*ET9 z9X7jiQD=R8lf4mkO<W_jTa@7Op#2!$Y4(TAZyu^#wN0quuLkk1VLtpacLJUq;sfi| zazKBNGX6`x2=y1lTxUucn(aMIwI7G$(YQ|h!LJAF**frTwIy<2k#$HOu}AKHErT^| z0`O4dSz^JUxs<sZ9=_Ayqqm3CfO$Y?r*5q<_V?o-zs32@e?F-OwZRfC8Sbbj@FDfp za<+;Krq8qH^+m(EroaWOGBWvq#Uqc~gY)5^2cSZ7CWq7xq452?DEeX!H=EVL`OJSF ziG^Q4&tZnFa;Ys3Ut31X8V%4~Jq7Yli5#u)a3*GNhMB*+@K8@bG&%Pg{<A+uE5>x; zudb?G9W|4BE$e{CAK#?+0W)cGv=8*(R!J{3Ehv4|X=*pC08%ceVq;KOoZn?0E#JHm z+Mh+9?IHG_2d|NRY8YD2jlh;6aeQGyp;Wf<5&cdtgWX@Alld~>&x@{5UZ^3?wiW%c z2v0oRW<4Y<IZD=fFQ|>~WXMxGNh?Oi2=>GfHlRPG-*yS~U3im1hkL@w+r!{?4{sds z8)#{%F+ZNRLhfIB6zmTf0IBt)gA3a7mBCtYwbu-f&tSy4y*hJ;5D!vVy91sc4dSFx z!#PK7E0l{Yc;TcA)P0O4ZXUXt*7$32aiJ|=9J7@AhG}ruUN-EtqX_bznBuExh<)#e zV8L=T{@t&IdLHjD*xl>t>aZuUxw{qj?p{tCjd#-FPikU^mW8LQC4Ss!#XtOZx$iwZ zkn8)KOMy-+X^^fd&lxfnV$Dv`xOu7U-0X|PEW7Y|eTm0>%d5;^mLME&8nkTVMDVw8 z;Cs{bsr8kw^!dpv+56j1xY*YpcYG9%&}-rNWMCc?%`&9E(ivFo`%#Xuybtqj_tL~U z8r<7xzwEJK3g>S)4a<*f;MB1tvWl-h?`ZPn^*u#qZr*+3=nrE56vFfFZ-i>$`k4M9 z5l3`Ug%@3Ocvw;|+LAh+s~QGlRO%}@9^#K%+74&6^JQ@V)H-N8zyo_L2Jzbr(J>A0 zEm)L&xwT^qEZTaUh8c+r;MW*j71AWldrflhxi=xi$5PC3ABw$|9j}Rf=h1P_IjV2{ zBhMdu2DFE+g3PTYFlUqKLJTdX#!1U5G{X@dZ=1{$<4VZv+-iBrv$y2EI~hBO{>dw8 zKW#4-J*06zX>o^%c(Ju7n;-6gr>s4&YW^Bf`kRQi-|DbhV3aiA!wjg-Rm77Y+_12b zA#>7BI?}cc@3ag<y+Mm;K0T&Qfh(l&{YIk8>dPiJpD8ZT62nAiy6MO!vY)k$Dkpz| z;(xAWTI>San)a;x+X*A{PQrnt7Ad6NSR89O9`BDG%jO5gJI--KMdiR)-npWdv^1RX z_4dECb>akW_}muvj#7t1&4)?l&LK&q*EUH#>Zx?o@CVi0J4Qd1=F4x-w9**kN!&Xp zO1hv`MMX(hX!Fl_nCG;VHVzuY-CEB=LPIRZwHonAX*iZqIyc4x`w9lvtFtd@gG)Bo zm&UN-C~c8Z9*gMa%)viJ9#pkSvQq4W|IgXhE&4^4vwzW{{x-BkWCDE#zoJ+DqQqS* zi$C&6TyevL_wQC^`x8!htxX=Z-8}_XbZC;FKdUE;?`Cvjvo=2|*+gv<&d6o;(d3?$ zChzNUjXIu+!vj$Ycqu0r-H+~}?Qa~pE;pHQM}zyNrW#UF(8fjcilhaZvAnv!7s8P; zdiErp?=^X`lamqWeX`|)9tr$8Kx8zvPQ%?{W6@^yak+KOc-f+KofPpfmT62VcQiL4 zjP{TX2CtQxjTcbilMEiPKAZb{^nw?Ccfir=9=t8li=RL6rQ3p={><_+-PcaR4NaZ6 zads6|-!|Y|XBFi43wrRq#4@619YOipAiOrhPPW_M7Y|!x^Q5ik;RZ)>?!$0!h_b`# zVT$zSeI{2<O6OuZ2>tqWki7?0!QXE|{PmePFZvRQYZqRJn%p(ycl(pHe_{_z{x6yb z^nOPpAC4o9X&xNdwHKz1I7RYhb$VI#+hfSK!=#WLAowY!tWz1r-QJy+MjjB1mem_* zd8{&j%~9jhEq&4U`2?8elghatZ&3W-Y@{{@oP0u`4fBFI>q!yyP@c*@pN9&zoZ!iA zF~yvjGok6>G^{NC3OR$+n0uUn?@fd8?HM0@{o^im>AeM%uRNxnH+<--(gR3Q*2Key zyJ0|Ak&FKA%y$;5@awW09<6&j;Zfg571#e+U`dk}J8uw~Yz2LEGWF!Rj&*Q#wIv#) z8M2OdCw?#Z2roNV(ViXN{HaQTv*tgefu<ATQ5VFWpKr<emEv0-G!vtGDr27L$@t1? ztmrM_R`ucV)o3<pfAr*sbGr(UVJm$&vsAtm^O@F1*1)Z8;oQ7xUZq{+TPZG85tp87 z!>t}78(2P++-J7O;45FDV0tv~JU$Y>sS0nV<t@6E(Ln8QOyOSUnmGRW7%6_b7H<F0 zmVS#4#^SA0>DHIQv}AEd&g*gt>OJm&wRJN&SN#B=IiI9=HQ^lC>OciG?ZNZdsY>6D zD!BgTAiSIvfO{5Pr1#IZfnCu`I^pAu8y_iA{q~FU``P|%G^>>cEIL5$!(;eNACTt! zUIZ_ewZjYV=E0mAKUU6f1NE&kEgvNu&QZY-x92n&t|_5=f|sBk`xWv#mP6k~o$-!j zDQ)T0EUz7ZkEZ;ZiJm2ILDX}kee*it0%;V}e=YJdGhfaR-43feYT(XWhhfy`hm!ra zQRFJ8LjUt~pzu{X>3s=>y<6sx%UwHI{^SdJFOXqEalXhRTjQy?&Rq3#4QzZHih~88 z`um<_9QH%-(H^^U^xerUr!6C=CTEVi{g}=>o3W^bmfObx*fw<`?E~rH=QJ7Z>-^}8 z%N<$gl_P!&IR_WsJ)@Gzr$J}aV`y7ofpczz@EXq=(lUvL*~cu<zN7g2wzWmyd!2ak zpM8==ZJpF{R5wg|Dm(-y+@wb7DWxi}0XM~2T$AdB1Ge_W^7SoH{4Jfct*m+1-YUwn z8I9@JC-a82vsiIK1h-yvgS54O=%vF1-gex9CfO*l-`9G0ZT_FMXkIGz{h`SDssmu7 z;0-Msr!3m`cC^^x8l<#W0EaxmU60qm&6%BX|98Yvm2i1(hjsLJMv#2@ZUJN@X3(z) z7koC;hXSk2a7jBGJb5SqN3C&$Q}@$=Q)Y0<;vAk3w4YQ%kn6sfQ@{m3e6IaeHXrED z(v(R&%*Bs?cO1{3o-T$uasKj|SV})vf1*Dd`?Frra0q^Oj8=UK;f4j@U~vZ%)Cji1 z)<sFIJ-r?@f*mT75B-uq{4kf^C)HBF4U^zn;uwyvy$Gvq$I-|uE9t*3XF)n~oYF@o z;zGBcxGO!CKMZL@t$Fk5&YN?vX=SOr)*zO<PFp732=U-Ae>&iZ8f8v4vcW?)_rdAN zp~7e9L!D~lP+$nezp)go&jezO=Q((z{8;|+bsXLg)#96t9;~TZAaB0d6PxTjLBTy3 zufNgeAJ?_zaZ_U8SJ+LuxGWf=_owr$w)*_nU+lXF^ud(Kd?*wyjU~UvLrI+QKL7WQ zZa-ZmhgJwj!M2h7YFz|B+^o-aZ3EG1@pUSgu0kPi^?CZt6gc8h>~Sxz7(A5~=-9kT zSgZbnw#Uw;%*7Wfl^a6j&->N*?FwbQSXfBc3jJ8^Wfj=ly`?AayXk7(OpIJv1(4|l z`CfjEb#Zv{*$XmU>CK%}w$Y}odm!{nAicF5$%FhW!C}x#((}xv(FeqwQELw+9?YPh z{e^$vQW&_m>BXu~FN1TZr?kF%5_jJ-9{2qf*}OO3$kEWAZe6Q}p_*T)d~k1k_-r`a zC7gsjb%JZ9;hYf|jecTBb?{v(C)^VGrymCV?V>Kl{Awfh_DII5C(^Nlr6=E67t4Y9 zB4_Gl#feWJQ0cp5TJxiltXtmE-_AGS@%4ki4aejI1s;N%d7OrZ?SWid(b+a`h6|;d zJVjE*%f)}dJ=+4~JSBYGs?2X}9QeMa9(6sN!G~T{N#nCmQ^6+D@m(eOZ3DH@>axDb z66x@r4Jx2-p@hTgt?<g0P)uH~$=hS{s4A`&9LDCsrc2vt_C^m8rV?)2jA1Z7&XtQb z0<e1FMtBpci%Cx=!eirJvRh|o*fskhbP#jqbk{~$+IOpm&9h?KvNa6X+3c0H9J}JG zgfaN{$3*g5>cY_vytuqu72VjLz)LHe>D04KTz1?MR}^n24fmH|>AIiV1g)lf^^4(P zqhQ+3?a0%+R)f}`$^4>MB6Sgr-(F)b$z$Du@siRr$i5?Zf5pN<zxor*IW+)l?Yqg} zg<~}8xB|cI`ADkmlFm2oAFG`7CIjz0PNPbVQ0}2C;p+fTZ0eB9`XVbgDcp?vH5u@s z`0<#M6@qJOqhP*s0l8Qy;kb39@tE-@sbA+5_PMkJE`RWaYe|8q;~UBkkB*ktYz^Z( zW}`U!jwL5tFyd`1>@aS^3y;*Znu5=BL+<G$z9If+ph=u_X8y>Q!}tCJYo|*v`@=@c zOX@@Syz(lm@7|D3_#K218dHU9_dj@f(HWI5d9!397`=sqrB~8ln7Df!t!}<f+giFn zXwYYB{Qi}6cLwvfS+C?$EnhBw321SAAUjSGcjfmwsN->3e(8;HVEj1D=rDrymxS=e zI}fNnK=kN~a=85uKsA#{8r(1y<K+(UuiSxOVHo#nx<v^c;^4$02_D#c;l~Dbj+~pw zdqcZ&fQKQsWY33m(II->x(<vs#6Z9PJ#gRCVUWMdQYu@10s1^>$Ayby`RVr0kWszc zy*ktheV1N>0_R|M8nzIc?%MP3pm3fg=7F7~cTlggdvJV)U*(uAU$hk&>O1<0v{H8o zG!13I(7hh>ymB$JF#{d)F3`7me}KCB%Xgan5iZV#akEqeE7k-@d^s$|Je`ggpS0si zr!+acM<KjW{Qzb=g19c$3hq7`%TG3&V^~xGFWh|yW~O|T-4{9VuFlaMRcX&Z3*6w^ z$R2194IcfPBPq1aXkL(`#9Z%#VNJSh+$jKQp$muK`T&Y8o~ZetjE?_Gz#-w&@JW;K zVUNl{y~U<vaQ~o3**W17e~^s<d9G-jKMSY&DdNxCLv-oJxblITZmfLA8He7tVYi;T zygNAoxBg9IxhAtR%xw>K=_2e5xrfN>S1<NYbb)nUQn2$Vb@3dWg_c2W$n<wG+8(+M zrPi<L%fZ=jP9=!Pto7wVpKaMJ-<;0vjRcp?SE+H;XX(;(@%IcNs485Bsvd{v<-w=) zqOvc~>=TK7FN&X)Qz*|a&87Jx-ay#^@h-L&&&V<ZTx2tqhweEIdCz{4c0rZ=e&bx~ ze`GRh>F$L3Bg^C)Su8p(sra){u=N55uoR*Q-L*H8hwUjCboUj#Ec^y`iAD052g*Fu zXC);Jt_8>Ad8GNPO7{D@1y+vUL2gghQg?$HQm*JLXPj+?V&SB3Em5!ZS*nWmhl1FA z-EY}q%4KreS4ItwjJccYT*=qJoJ{&TP+?s<s+nDsdbGsRYQd*@`EV*bFEC+4cYQ87 z6wQx%M&P-l>!G`j=-XbpBG)~#<F(yv*z&+f(iIuq4Zq@`_(C@K>wivqwi2n!^YI?M z)C5o0Jd}DL>j$orZFtMF&k*X+o-@6M(39atJoQGT{Kp)4%sEvsa#g`eu~WoZ$%Nls z^MwGh+q$V}Ld89NdCJmU-n<3bXSoso$@>F6&)VVc@h53O$2~M}+bwdRFb%8U#bBPE z=znL7Lz8z!9)s80Vb4y%9QMr=%|17P(t9gBJX;5>JI^PR$4dD6R~8?3lR@u5IQ^RP z4x;+yQm3oi=yqm0G!|*H-4)Rb8l;OY=VS0>Z7jF%@>w!a3*r$!EuoENp1jR*A~t8t zqJZ`TAZPj^3K5+`n~iPo{3vC%cP@lO--h$DzwYc1-WC7evlqR}4188I8pjs-^3MDr zm|-aVto!zoXV6UQct{I&h}rY=s&qEHlaAqwm&1qK7JSq-lMd)wV%eRu@cG73Sew$5 zuk=tw`l?Lw^=I^O&?*@3@Q2<_@j%nj0n`Boa)q(z(W#h1zqVFr|6mQR*6%^9x{Tpb z*Se#@?14Pu#u)C@zazi-lFoIyw)i$l%tVj1XX81_9=>q_d?{xJp1*fjI{L5?yp0Rx z>{=DHybuSa-P&@m@kMk;dO*#yK2dk0N6@JTc=~+@@a<rU2SYm1m4dfYq2C*-P*EfM zWq>&^sw)jc0{H*Vi?gG9^N}!-2MoOiD=+Hto?MZuoBona6D)Ce4;`#LZi245Hn{0R z2UHzmEr%DXfvb5>oUyYD<oYLq`l9<%`9V8Kh#1e4MxOw+0&Uzj{tul`TO#iM#$0sD z2-|k*!J{-yQRn*<`A5`xYOT6Q!{+z^PP`9F#kSHReP7{JJ`en>3t#t(h2IOstnHD= z!Oj_r&xQ!sUYHpSaURJbU;a`|+ClkF$8~h;a;;Qytv8nkzLvHH+TnHI&Zy_;Qkn4A zN_N*$V()1`$g1%(Jr1p*wr@jO#Yq!8n4h78$|4`oZi_tma1gXvI|sHYy`esN`=Dj_ zHIZ-h<rIfVJR*3Ze+tHl{PZ}?%-1H@@(CE2Xyjq`Xqe=x(GS(#C!*!5GH@8_!7Ebx zU^~rybYqSmg`S!R!)-=lTyr-Lcoo4L<MP3@(gq*OVkc{zPYYW8@U2l3@Zwc8-cN89 z4-`>(K@RI#W^nw~LA<2ytaRsO5=)V*;CRb!YN!d~$4>`y)yFcLHLY4YQ5FMrd*Z}> z?<_bsR?7FrIG|SEK$PQhF|g-Gh&(hD|Bb2v@7Io8=`#e5XC4#oh5%eWAOgR96Fte@ zli7dXL{2uG1KypoS;gTo1RuzRE1Nr^u8m%$ndJbEzUIIk-c{2K?Rd0$`c2MIx8X|d z%d)zdGuj;!{PQJNc>RkS_n#j?3zp>3XZ>R^;;1wI#U;|cnXy>6$&oFdses)TbL@7f zLG1A!P>Q-fJl}UwJo7e{Jz`%#8*2@?t9%kxdZyscvQIQdmbl{tmWC;%OM4%-$BYm) z`Y=3*t)m?%@Q3)Otd_9(<wRbhoFaKSI-|*`!T7X~IeTr?<rAX6HRzvDrLwqJTe?^Z z|8NSs2wq~Tl!C3&Dwxut5BIg(R;h5&0gj)n0L8Nv<kD9a+nJ80Iq9FM^VD%rd3`;Z zT@YE5)9$!L@RBXMhhRjes${6EjzOYF^f@gV@5Cnxr%rD+>d}K8%FMX``6qOxyD|5e za}T=oie;O2fg+EqKsE<nL!~&2*Uag|kMD~6_(?s!^Qw-b1zY!Ziy{x1xgYkwP$h>8 zE+P`-MFI0uIN5ET^f0(lWW_H*_h+$ek=6jtr_Io@#ghx}w^HfxP2gd0m#zu#+S_-_ zK~2Sn`;VUtzE@vV7PrXsZ+)wjQh1Qwb+y5MYL0lPB#JTxw<3F(1#EAvrd>tq(D{ZY zZeDwz5=I}U&iB0W&k-N4b{$Rzq7(Q1{SkSgYd`M!=2nGVWz5y$TlsphA#80<rR{%Q z@jz!ydbuzT(lsu^n@JK*TQL>iE$`2DD|S&pn>pn9yDeE*Ok?M|0;$axdk!@9#qS4_ zU|AP09CLc9ykDamAKqxm-d7!P@{So;yVa4$&1itMz#`B(UMu}7>Iy2$$FsNlE$HOv zj|%;|OZM}pKvR!&m^bSuJXv7Dg_Ysx-1Z55nxW1=lxIVat#MRhqmEtg&OjOzhW=gi zAwD~ZufDuae_}Lv{)Usn`|U2-`HqL3>rL@?!b;dx=F8ooo(ivjq_=~1gPPcn1yAga zGb?<tG0}#nG+JO^(XDnrepO!AP8091QAQkHK+`;b%i(hjDqp-6@40sxH2P&E?H2u! zl9hMq+35hB`6U}a8w{%~6+PNM&imliVSO&%IZrC=K9-Y)b(9pIw}98qo^Z8!DxUss z$?LqAQRA98R@kD05wF$ITDK>j`cVny6EpDs(F$08`=osL_FX!5qmb@}$0A=hXRPka zmL5B4%TB>D^)TkW5k+KsAc-TcMq%6bqdB*xmbMJ2gy#e7U{LcY3>(k{Cte##jsb?K z@>B)gJi=+?j{a<ZTMuorG;wA3#o%;K6Z>s1q4t3(>?-!Y<H{_#{_Hrix^SV=A5C%P z5Cx7DIiWz2Be}7W==|nI^x*neS+V3J<cw42;9T)te5@h&>snQ*6dJ@DLte@2bxS?; zzkP@8Q8%Tbn|*Q9wQ`EP6^^@)Me*D1>ePI~Nt~&>!jRLS!7r{8=%oi!*LOx-^60hv zXWK(~7@P=_?RNQ1o*j5adGo8OiJbV_9!>0?L)O_$HZ03R-3=pn$F&P&F#i{X+>ONc z)1N{Ah-`it;Q%KZ{N;#TXRO!j%-gmtqk7XY;c8k7Zr?xA>^W_)c28gY(Qh>*tDmG3 z&mx7d*anMlAE8jg`=p+o!n3T8!?Rh6Z1vKcnqmzh!*#fvuYZxUENny$t{18~dt%a3 z<m1bnad1r}28HzFb#o-%eQX+EiAmt|i+;kc)iU*+t-xK9bh%bZozsF%!R4k2o}F!g zH5IP7_ER$NyHpB4={=+@Ss{12ZZ7Aw+e-ViY;a*&H1{=|$o89)pqK4Ocv3!-KXtOi zX|@MMp0q~3ZEY-D7~cig;iV9`(hXEq6mYnMMAj$L(C%mh%vX902Y<(+iKFN;?P;a4 z*Mez9f+gSazX-WoyWxcwi{Z+PNc^-gp3imtB28-38CHJ1TWM`<%_D|y5l-b^JgD-w zJXw4*lG6Hd#L?mG9<!M?U%5_6Zx4db7lOe*e#4jt9(-FXgd0DauwDKlIB+RS{yR|t z-ELXPclG_rQ7cBWI64$u7L`KX!5&yL$daeeGQmMV+tSDI!*thd8XvCKhsg)-Q|M8N zSH_;Dl_ojVOHPK)U3=ifmAO){&*z1cy@hPQ-6BO69f|7~Da3jzv>2U~G`G3qIb$od zb19eFlrEOPMO~A+9MRys!Y*Qmjx_A}6dan{4~wn`!9w?NcoDu1Ha`yFuM-xK=DOch z{cAnxS_}8{oTW6t*cLk-EAd!i_L?pn=>hYuT$Oxt+kmd(D(H_+7_mPGFGNj-aqmvh z1CdGHbAAHNTQUmmE1dA^O>Z(BJ@a(qbY0H)C^#8M28yhs8HQQ}V&+s;INZEaihFw# zG}HPsjhx7C2ZT4KXoVD2u~xeOOt_^!U6D-nlJM#47CIi_hg!Dl=|H@2z|`c^z|U&9 z`cecIf7IhO+hcj0R!6@3#|$4gdC9$wcE=;<`{4GEf?<1f80+?UDt{=@!SH^jwDoxe zf72U9QNKmzP-PN#TC{=;D_;t>;!T*l#g?2b9>9wkli6^CztnqKG#s8MW>mE~tUq@+ zCIx+zWwCSH5NyQ0i;u&QCR^57u?HenYSNMVN3c@#V-CEs#Am{Z8aSqf93HjEZRUCL z!X|%uqj!v=-kEb<O*Sbm3CB&dOT^yXn3si)gS`(%Ntr>7G`8J8TKRP^q=-9z+k*cp z<4Zzti0&JD_F|Zrz38!V(`s^!)5ZButgybP1@<~oM0Mi3bI;w^UFYBwu2y|T#zmU2 z=Iu;=y+#MGelo_6XA@Y39jH{e|Du9Zxu;(?@30X3@BL{UG|G|}sq~TNd(>4HiCzC+ zOB+nOV~1nR@@U0D!3}rZMRQV=u>0?ya!r8(KGclC{%(D6{Vx+3u)-3|Y8O@3bkc%$ zzS{iv{#y9&(>tiG8ipfQbw}L_104JzjpvG<Lad85uX%6C!9VP9q{%b-ugFST;_Z)L zBIdxlg5fwnXc)#^i{|G4VsP~pPnZEusq2OD_@^+O6^8cau&Fut=KDl$4AJDrhjOs4 zx-&jpZO!x4f?4s-KN_{rk{f@%Bd>}`P)wf85ntnQ(-Q~&Z>=i!IhVj4pKPNphYr!e zhlzNvVl&lD(`B=(8_0Wc7J1Iw4VLPg$@!%+&$Y|I{pukcxN1G@8%dPfZ3m4kZbu2} z`h2$OKUw=?JYSf!jJ{VtmE3l&rRYz!l={9se_gW;_6!^W-v<ePjy#nOo9kdheIOS) zIH1a#LP)j<<}McpNviIa!f)}BZnmkW!@Va_M9L+(D!4aK(He)HjEC?ddwUr4DirN! z6jJP|{V@Am7JuoXjRtyNJl}W%uKp4Lub${|`TNsU`{}BD<$R8$5j_UAbe!>6Oe$ZV z9mmi2=l9<hHyo#E<f6-!I7}OKl+KHN(Gag>dZjZRlg=IW_^Nt}9G?GxY@bXVeBO*^ zEj2*jBg#B2^iAdDM<JxxSr?nPkHv{v?i|u}9~{_mm`)0>L&lVm_%XdLdTLqVs86CF zozsV$VpqZ!gGcap`cz&_byCX6Y52XkGd`dra_0@9SeL(z+TWXm^ERomkLzF@Z2L*R z8@Y`<+YQC5<}SQ<^>p-03FILEG(L4~9^4(fh)&$P3h`5;<ba7MsN{u;Jm+jV^|<cr zaanaC*?gD4{$2t6792_2C3;wKd4sgo`?b8<d^`0@`3~25OyrOC`yrss2ap`iaene@ zvQD((pMSeh%A|C>sqTaOY({X-knU_gD;xGt8v(9`llaf*{nB9PT)yfwl?v_*lQ%6i zq%8&Rus}<2M^#@{su%=g%YmyfCT~3dHE<A|vS#WzBmov>dE>$>reJ!noHGBMBjeqz zH0gO?zFj4Fkn!!&;?rc97cq!;N(%fTt~W=TzJy`#C&4eV#xbt1sW1p$4FwCMv1nu_ zrY;O)_0Dr8-P<g$A2<Q`4?iHEd@&T$bB+1iYcuM9Plh%D>9p8Qostwi<T9}@zB8>K zPc$QH+ovrUSVrQ7Ax`YGF@<CEJ@H{=A2@k_DsOy|OkIw(<Ha+p=$`p=IM&#QH#FX+ z!%ZsExauH0nz<c(qtnpAY%SzCo`v>34be2R3RZ-gaL%l8c&?)(WoVqIWI3G6)~<wS zPCKded^i-@Ooxx&BT(MhS(?_ZFO8l270TR0IJQ86r}P&t;sbN#S&|!v-O`4LrM+lU z-wAldQaEbGuC_#XEZBC>pl*-`Z}UTVZhjlAj{iiuAw{&^p%FIDoa=EbbOCf#SqC?@ zX3OZ3jMJ`)^X>Af{7CgX?LBmjE_n>DjBhr^Qj>C;Y(AXp>J4F?$1f^)-5vIA?TEXx zf0MatFV;QR4Hu7i3*Y}8B-F4*r-gz1-NTf7{nt^pc^bg$Zt2QBbSB`a&&S~#tdXY8 zUriNu+IZ=?6`m_FqE{!D%6UJ{F=PJ{+Go6y(rUZOqk|69_m;o%ijk)&y17(Jdl4-s zENjnhev0r<GoNx^TG4yM-JtMzgRH41Tr)3z((?S7I9<mE#|#PO@8_Mt;^Pc_w9KEa z`#3}5@q?8)-v;1CrF7nxE|@Gudm&+MZyxIW9MT7fyIaH$i26E|=k+qDC9Sy-5~f8? zJ?voWNDcO1F7iPCWr5PXwwPY{2uy<VX?uM;X}-Y;@=O-FORF?)+Iid~$I}!Yu8W-O zf`Nh~WQj%D_n~I~AnA5;S1K(Z4}JHpghRgzVZj?+gq=I2@b9^pz0HkxIu7NR#v`y_ z)qOGwGUnxik*m}=j~+gB#eextbU*SSd|I;1V_;$+k3H<dX^ywy$gL5$licv;^`7jq zwVcBBTcpgUr|#E-`^x{NZ>LgkKdyJr<y)PvQP~e|jB0xWGWVRN{$6Wne3y=VBkL!G zMID3p`)!aSoGEbmdidR@7xul=3==-Ir!p~ra^Kel?<^OLp0V30?Oi5It2cm)(|Az6 z{y&D!I~?mb3gbdWR#ru2i-?4>-tRd`WUmleB`K>Cii-B$dyiiW4V9Mop3^RE6;18E z)1H3M|6Q&wyq@Pd=e|Fm*4U$J1RF1JgV&YDao*>#JRr546uU==w-(~P84;AZ?-<md z{2|+AbinJEyYijJLj3n@0!`dy&c1ftaC_rEu-)##7gn8@JgTRHVTm(Z^fBh=mu#@B z^HBOH=52HOyL0D}8GLJyF_}#m4->AX(lPbd^6wKN<hN`+jjh{7!Doy3=+K_5Bsjc2 zrg^;h`X4yfDGFM0v^lBg49V#^Q`63;RMulSFW3}OySd}O+74ZM@SpyZrGo1{Ven)z zSIw#>7mLf3CK=$g{cq_?%n@p<)0e$V^Vvf5Fn$+)g*k_Jz>RjM{4-%3lsA^*Mlr*R z37Jzn{oyzo@_GnqKOZczeXi^~W++ciRKdyP`|{0U1-NRw7N!aRf|l-l>11sjv^qNm zzKwL}eeq+kd)*=$az7RVlZA`o(GzMpdbwuviwvmCEh5M7UueFV^*-LcklZrgLGzgf zP`6U>p4wK^$7_pe*zpqlGF=PDE-jT;Ja_{ev&Z6!`~6Yz(vU03m}k^OP1k-a!RC)K zy6^D8ZixeUQp{?|J>F2ec(g0NSZ2eHe~yvYxFA#w72MB)_8f4JB%Ku%v|{ysXs~qU z57ur_^wyQTf7HM}$`#VE?>79<#h&j)ABDbeU(w0{KOA}T1q42K!w@m|+A}W|n{cn- z_Md^YEyw9)577;}*bk1KIRY^cS3<(cI>}*q5P$7Fnd(lim)8d^0Hf1MxaM{qzf=2N z8?$#5ukuJC7yF$sY}ZifT5=#e)|HTTKu7p=x4)F`JOa(-zjW!y2oA2h2Me`>(5?Lg z*&^1Ct9@?3`XGjT?$P+HRS0${oC`{Gv%t#06+X8=Nk`UxrE=eygby}@{;_SayCo0R z8t2fmxq?6GJ-T+2XIECc8P6##HIOkjoLAfrp?Q6caMYmda<Zi*+FW=oSMSeOSX$qN z7uwoj?QX$416r|FjwM#B8H)Rko%B$2pmll(R^`}g7^c`r?&1E@ZVLxg+IkSW4;9Yy zYf&tHX@d#2;{UjyFWP)a#qyWBq@$+GhC#b1<-Za5{IKAK{uaL2*V-)2IZBg?9)sh7 zR#0&DDD2SQ1nKI#B`Z=!=W(O4-`hu$NBkW!-lm1I>N^Oqh+_<$YMu4nFjX+Dle#$b z@@p|Xe8_l=S!~R`E(b!Z0cBM08_Ly@e?Ub&5Duzz=Gw*G@UGaySAPk`W%E7ZcvT{w z@lD{x`qeOTy9&R^E#^Oa#STKuoGqsaUj0!WdBdSNE<CXXu6)pjUo{q}XBQ!FI-o|g zUyFRwm1?=ai{N@WWy#<=hQqswvs_h;6<u|>PMjzIe3NKWzkYnhI1xj$vnAt?FX(zw ztrYiTskD3UM(Q^@7pn&(L8s(2ek8IChARTm;F&RA(?#Kgek4Cx*qwtLmr<wIAL!$d zzIf+sAyy@~63dczwE9pyP3yK4+SF<CDa~~3b+rb1)*0h?=l1f8E0@S6CLGTzy?~=e zC7duQ1Lxc+z-KcrgTA}q_x=^LfPsS~br#;|xQX!maW{TcY00f->tcr<J$c^8F!Xfo zMq8$u^U+c5xM1Qtx;nGHa9`BGw>>5by9sJ|B}@~0esJQ}z0^2UxWSeN%>=Je);Pj= zJnClUaP`1V@{x17ki4QD>JPWUj)i7u9=ctQzh;hIhP3CWiz3<dlMWB^Ye$c}%!SPR zQ5gPw5We1anBJC5tvxH<gQJO0AS>)1*mh3kn_(ZJ$j%+34xWZ=;~^OLtqXKm;0Tyi z#FmSrF=y;{2rTO*pFbN#x3+(lrWkhOR=cjtMm=sp>(sHereVo^X-OiQg`{y{FKt+! zIRZ^T+K{H(So}6L6FZV6PxWY$Z}gZ!1F|mDv%+a~bIUL3NBb1{ZL60@t9Eo@1N{*G zVC&6pBNK4b(I_5s+L>2$w!jlt{>jS+?4rX<g<D2^?p6Cq($D)VA^XH9Pzo8(G4m!% z3kII@O89Ue=E|!m!lezYu-iznrG{|erV85VCV;`4t{CajQCi;B72CWn;ovvD<omYY z<;4$0cl(1K=5*f-HK+H{hJZ0(x-*BqwHR`t`AsSO*(kZ*-;e+O@ZcR^^Jz`NUb^m; zj2jiA$FXv);--fNH_nM?9h0$qxgnV+ewap}@mtP)l81)v%=mog&U`=N9Gu!3!2<{M zq$M9*@Qs%a20l;6*{(hqyUm0*XeCk6ieb2>S@?WD7IL5cW6@mey!7OvyLc!6hUFe@ z`S^VoRPE*@yR<Wu?zRc9-BF<<W*0s%ceo=?+QP5~u2RslNFM(eIcjx3+$QevO?kKA zo{KY&&0Gn+#QXO4OnrIA!k={VxeDq(@5Z|3jnvb85N^G&9R~i`M12M<BDN3Vf;jOG z8PbX$HTd9H4{M6mF~#g&UHR3r4!pKQ3O<jp!laAwwfY|mY5tEIcxGQgM`!I&l=wcB z@ANhoe#3#heMx`5u3-pI23bgJay{ULe;5zyeT;H+fiEnWps-7uh`&T`e(Mu&Ui#dd z7cMfzzpny0XF+>xJ3fpLWR1lmo=G_U`Uq71T7*;Q?T{a2^u+%EZGqDJd+Dj(c#exo zqxN%z7xCc@GF%eRd)AArp!Z1%D?31~v^pvtY|0nAWgXrha2_tdu*dvWN_c6hE$;KM zW9_^Ip0qPo(b>=ufA8&y){00xbNwUKT1D`F;mm8O)_@KD+ptxu7IK$E@bLgs*fmSR zG|mBkoJeKE`g}}iXwS>WSn{8fZF$b!Fh2Cf8G~+n@~Nq2TsbBb=i3&r-gI#;Ox^~r zRZcu>L=hgiFp)Rx7G8^Cy<uh45||QbfV-|PfG_37{Qg>7eky!JPd3{N7OXe#sI!)g z$}-5j{}}H2<DmTCeLD<1*g;B6c%YbW)REP@-&f?^7o5;-n`PJWe`)cvZE~W>%FeZn z!q<~Wb1R*pn7ze?4RdmEMUKd;qaV6`_aOHTd+4xB7OuE%%)g_C@Oo`046NHmV_rV2 zUH|;5Z1rI=%;CK>y;PMak8RI;+H?|L=X<2)>BcFGW<bR32{7ODH?<aQMu$ZsxNAuO zs9U{Kv{gPrx_+x_FF)<db60BNwR0WtZ~sy-AN7-LgdgY1pH9#x<_}e0*$Ao^Pe_mD zIL<iTgB(uxmNYA0(dLtC_&Q=Fzf#^R9X{@f$xCm8e$_8(u38Pl)+O?kxL{U9>GR*b z{j$YgTU=Q)h~=%B@Km!1j~Di3-9MRF`qv8oz6O4LJAupC4Q(6;;)`Q;{K3xxmi+FH z8YT-MSaUoiIjf+Dk39vaO`sXK=D@I)`;@5Klh>M;$_@3Yv_QoU{I^_%@}|M8A~@z+ z8#8g5VmK!&pOzmi>m-=!65hN~LYsdaq;Ids<BbbL#BR=)gWgxc;hY1|PkRve_~$1F z1nXi<M+Zj#hjgIhacS<D@xb3p;L5HB$}X>$&B|1%SSJEcR)3)Dg9dZC-+H+F?+8(* z1MK+R7VAg##9kZqSaY13=p=aK$ukpipx|lkvUy31wAH28iC5%a%cjGulgHrgG&ipP z-U4y1%AoS77rIUlXSdN)sqE+&T$&sPX|p?1mt6*AQnmvs>~GSx^J@6x-D%hr5W#%H zSmZSvc!Ti^Fsz@z^0X#sxl|5?*E`~C7s0c5DrSVi?`g}6G*&Tq2^Eun%F4R_KuHTo zI@B9)pDdAmI!1&02?w@2@4+#jQ(@G_URe0;6_l36bA)<=JYFy$a)pEG*1D&)L8A<L z{KV6w`(lE0?q2{Zne^wACrr_3#RRr_JH2*EbzfXmuZF$!G$76Qi*S(C)6M-ltX49B zH}!i8b%#Xm`_MVMy=V}Izv;}5=d!u?Kpm=jZi(sB#2q`jH}||zCGQ({TQVHfC>@-6 z0o*N0YA0V?4A%n<xW|A2{IH+!L|jti%N-w5WsIwIQ$Glw<XB+VzDe>`Z#A6wTp7*l zJMxY>OCe|v;6aa}=(S`ZMwq>T_vWoYb%^K+&M$^H3!0(X%z+;5MLzLy2Gm}DsTg^~ zMVfBs#*!k1pE!l^i-r3j@t7`G+?WIT1xrb*bfDDoC5kUx*1{3GLBi>I6uvbDa>CJ> z)I&FxKlwZG#;2Xwz04FYj_S(M4SM8yYc%IZW^wa^KD4b(5SG6R<Bex}a^;lv{M%F& z4K{>ID>Y*<&ZiUq%2Gk=XQR<=y2N!A8|X!iF*`329qxh}s4KI@Ri8W4zP1MZ#Y&mo z7fuGV1T(VuTPt1Hj^v+#PHerT8wXG!s4LviXW?+ZmHi&nrv8FD=YG7wdMgZ>_Y~fU zEN<YbO@g!2kKJOF;8ff>`ZjhV|9uvX2DyJkCOB74HkyEa+y5k;6Wi!*;7s`RC5wxT zJ8)#oa49DCCpi4v1i=^VK-_L{jpi{qAfi9CioFVz-8J~TT^RaLi=jc6>LGm6TX=Io zj~fr%r^$ucxT~!;%HLj7p9#V*wBM8mi!3vgOo6%MUeK6?C|ppt9<-G_xg~mm{BB=e zjqGKFSB*~7_ny;Xedkwj>i$IZ{=P<VN!m#px^Aayt;VBT#WLEO9b1Ey_PFNEh1#57 zb4ev&G#Bi71&z0h1ke7r<T|<+9tmuX&To^@`L2iT(B29;HidnUmE!zHYmQfL0MWdV zpIjeEhV#>;@PXCR_aBb9w_6??O)$cJg3IaX*Bx)Hiss2hmTVrG&u{(q!={|JyyfaV z=)W}qqoWE?!!{i2))qp?pNU*It3vRtKf%_A+W1zmDpm3tsh`dZ$}$mmrOGhQE7joo zoIr?r*@bJLhVj>rhrMF|o`=0P6WIR41F6b7SK(A6*kKC{@I>-nSac`~FLs!S$^H7W z%gk1i?yGp#cYXjvPyD6xwIY}Jb|tx=?8XE3M{&r-0x9iOYfjsCklyWXfYGxj!O(36 zxT*CE7~sAebk>(cY(Xb5TX2mMeDh(>R|i(jvV)5AC0rtMOuGh%oW-k|F#gvU8oRlW zGm?8_ued^veHn)bLW4=``*q4Nj-k2lrpe>C&y`mHS^&>}G5oiv9rg`A35C(KDB=AN zIC{5$FO5uwh2;a-YJCjrmH}3`SLbZO2Mg8cg>OEOW5dHUVbshUpnH8e>@;kG>@Pvk z=58f*wwg`@7yYE7Ky_-3MR=;nueK$jBTvitkHYt?lIQAq<KiL%C>I`x$KfX2NiQ9G z`d@^-C%v#xIh#GZ4M6kcC=7Vs3NvpUAz!_=Snd?Tiw3Tt$-|XM=W_r~t+-BcC9Zg2 zyXbv*#PAd0QeN}?A&gM#&Jp4)Ub1c<r4I{-HIuv;vb;IO#)GeJSq1iq+u;1Q({Sh8 zV0@b*_|dbg=#}CPtXdiZ7hdO~clvZW>tT!RI{;As_5e9&R7dofXoWu~Jp{wGGe9-p z5O<q=rSV?-$l=y|D!Z(TbA!@JGB`nHUqjgXkSS(H=t%u%G{Lo5Cn<d2A?hwzEv|<9 z<R_s$@WvS7;<>yB{!O?<SntM;+J9;IgD|wnvE^@7L-6cA5Zo&-t|^!zDYbhn>D+$+ zL#}7w8Q&V3xMH-_rqN9@F<nBvM>v75lM0)DSW`Qxa}&%f8^Eo9u7>Fs$I|cfZMnha zy&SgX3Zx5e@{FKwwAFVYDi^!M%xFvb>9lXsH!VB<{JvQVHu(e5BFA@br{I^0{;kjP zIJy@YEIcv4;hen&Z@)T%KmVL1{}=7ZeFQ0GN5LRgzm&vH&riXiwXN`N>N)y#L<iqq zHpcy1mOxL9+qB3?_)mwsk?qewDYd>HMtoJml<V`U>|r;Wu&EO27fP_^ya)Eq_rL}* zFTYq?%#GJv<mt^rd0o<evUw@yELE21eef*xw$`SUn*ZSUbS3t1FGP<%_n`NaedO7u z86ITz<A9GHxZ535>hEL5C#R?3;$%tqo-Mfgk1KbLQ^S-WrgSMWhc=}zAg8l>d@nu~ zzCX;w=XM2fxvPtG-?2Lu_G_V1k=3{&N^F)le@nNLt!d-K?zm_`J&j&&!#j`X)6<&4 z=&v4vPs*Jz?Z8&r(RG`=Y0e3NGBq^pnS{mdSCD$D2QMrW`_*z!`18@1=Fd}t+S)W; z=2gsVxLnK@_CixP(bqE8#1QXz{@_uHE%qk-c&0PolaRU%&Vk-t*U`)uD{0~r7if8P z4jOYswxMnzjnPe}zWyn&Nu1Z~W{SR?QCrrTv<F`03`38IAb#Y`WEJU+?^Uh%W0iMJ zg~l+9FVo}$cV<!BmzsF4JcRyziR9N$`tu3bZ1C8VDY-4}%m=jmXr!eVuRAc1FWm3J z`|``Ebz~rPIp{2{vTesf`!Zne@m#6@rV$kTUSD4P@|#zF<|d+H19;31Tl^IMLB3W$ z0(}gI!`>!MZl0FFTULn~hB#LhKZ9|{lS9%X714)v?Z|b*LV0Uy9>-c`dgTa@U$Mw% zuPc2g>8ie`1h^*8+<a5<svwq&8`9Cgz#SL8-6w^;@kEzO;gnkO7h11EDp7Z)t;r?W z?zK#Xf8I;yi;`%{h7Nec$d<Q%N@0h-=Dd_15ocAxtY1CZx<;2r<-LQ@5&BFkdf?PN zU0(ghlq|ygaM8QRa-Rw}bQbI;qvu+9>gO~VXQ|9S+!w#=O^|LJT}a-}MyT@jHgxUs z6pT8(k{|r8lJBiC=fl6|k!5{fe(~v~qT#I)|F=4mJyU}C5)3EW&=tM%_ehaG`{{b^ zK^nJd5*c*44dY|>!L>0?+#>Ezslr_{wR$<E8V}{R@d#5kJcI^z<xQFsDel`U+Vm(8 z`;MuVX2-elqkE3LEmJV$48z!SQ9t}PLJ1zO%f=6r&rvs*2zD-?Djil6v*^55{Hi&f zcI|sFMOy!a2~%=e(;$v*2k)ju?>n)kV10#!_viYXS}>@l3one!<G-R;(dYI7`ss5Q zCR7FE3flp4;o>%Yx7r=sct4k{FHM8S24lPk;_VVp00uV-7>EC&r%zNc>p}(()-l2v z_ucW|%vu;Veh}RK2)rcz3#Ipb;o1Gq1XA4=j&?R{A#{AM_}h0Lk^?MJw{Vc~73sm5 z4z~FBVLKjIsfz3El2NJPBn^C)h*C)>HkmjJl3oqRGchB0CcUKs&24m~=AqYAyDT2v zy%z<X^~2kPUeVmgqLU%%aDP8mI23AOQ*|J(Sm%Y_?MCqY-#(ljG6HqHN3*)!PtU*q z4$!evh4RkFF0|M7y~q@7r7hW^&|5nZ=h(L46MC*__BBY_F!lzubb3qG8EL#E<0|#G zJWX#OzXQb^1!{PiaB0`|aC+ZkT9AB_7Cmf(nW@`rKZ(zhdfF*T$9g86c5}kIiMFCQ zDmsmq71%m_9)#>FlOKFGz?;?m1mjzsPYeh`-vBSZF?1|HtZ66il8*Q@xPzkXQ~?+* zwnSeT&Ydqt!-(J~@ZaT*^!A7s{&8Pf)BaEg8f<Jw3l~<)t18OrT2TP(-We=7!xrFs zxsXpRG)4EIM|4U_50CV4u9@|vBaYr&L@Rv8vU{95ge7b6zeTp#zt<LNr?ncWWOu=! z-f3LBrxy?G+Dy)~`qkEy_UDWUV_y3$9HoFY@a*d^uc^Nq!RG29I=<hGCqW(`s5iyM zDFZNjsViK+s>=!bFKL<EW_WrC*|E<fx?eboFYR&XMu?$BS5`yhiXdwL*@^on4OOUz zj+KkUUC?ReG3he)#-B6_x6SUwWz{J(+gG0hzIDSv=UsTt0)qOEy}0qrcJjGZ$Q?{g z=vr!P=#-X#8y*Maw?p=PI;lUr3L1)C@f1v&X2TikU!YUzaVWf&03DCqfTRa^p-o~m zpB#LYlKxY}T}#H|$R;0LvDg{pi<<26)PsG@v@yQ56*)&dgQgCxxiq>hUmFs|`F_5< z)*+g!X7u3Ti6RI8{4RXFa9#1{q6N>K*$sCk<+6qxgSmcvWxH$Tl-lx=EV`g{YiceA zji2h}<$6TyV+G^lY9`-)d5K<q`lXoO5CdLkf6(OiX7E+N0OJ+n4Ro!C{PAxXl!)E0 zOMhoB`lBtp%MqwyU_m!02cTbdJFd<##sdoktEA=_P5#-PBCZd_jgL}kzUN_j;Ny;h zX#}5Bt#HASidvsgBc7E|D!M~f81{Ir>|^zw-j7k^RHtzMc;`8dGH_>yJHl!CNg406 zBm8UD!iDqtak|e0>S+;0``%<gsoqyfWlk9_lY{xi<G=K9MlqiIBlw46=Ik{!mO~75 zaBsUlkagR#_F!v0G|Ju&S5Gg6taW#5+m#ECvuO%07CnDm-8^o0^$GoGv7-tT37gB~ zxz*V^NKRD3;%zLr$|ZRG><wBHGMb;JjpHE$kHhgx?Qy}(Vy=8Sif_!3kT2BII&tSs z&{|!4I>H1KX6nM$ihTSrR|}KQZ2(E<L+!o2p8Wf81~o+mb5foeSG8@TLv3?8>)~lR zt*0OrURgsYx^I<TpMYff(F~O>^6_Kl6&f9#3A@XB;qmDf@HnFe)QmxR#=@itUDf%G zkq%dT$8q|exo~Io6?(fe790K*;rA~kl(6x(Y;i4)2cGE4Ltm(H`-=G#{c9Nay`96Q zW`^kS;SvQWZ-N_*M!fW^;H2xnCi5$PR6o3!+lU#!p_3+@TN8-m`-wiCP7k@sZxJZA zx4~82lK8i$4@O6LqtdbA=sk1@R(wdPncJelT|+Ig&9`gPs4_3C8}pjBtX098w`}0m z4-KB%DV;mrx59tF>&V+P0@Gqcq)`73<g-5?qJ2s@r|~hooo7<pdgNEd-=5w0)Ot%^ zv9T4NIPHn?T5+s0=sBI;B<`cj&%jkJcW_M?9=JIH`0C;>*=@HS*&mI9<{rmjOwDb& zo-FeGp4Vto?P#`o)EnFUBB&WPlpU^LhbqsJywvO%WmcGq9{oUWX-bp=Y(*C3#uZ9! z?F1W}jL`c;B3qp+LY=xS+85rHTLc#?dZq(qtp^UOX`tF3&2q~WW$ZrhDeZOZN6v$W z;Zlby3X|NPv{NIWL;qUiy?_)5ZSbeSrdryt=@@NIRPcddRs8kVlzSeUz~e@h;<IH2 zik5B>V81g5pQWUsb@?KB>yB0Q1zn+O{}#IASj@2#bNKAEk?8j4A0&15;@~@l)J7u7 zSTKoK)ZL_;FLL?8t%<OITPrLM^yA{71P*-Q19|pd*j~)o3c7iUxt?%pp5G2B8sF)z z$7fRc<x1n)4dYXDW!hV*&M!pHZ@BJR@;I1?#b<icrhO-%(e6GRc-#~9f|tQ4ohJI% zn2U1_Y;fNGVbEf;NXmV+2^L;U;>#ODX#PWgIpxzZ!NfWUA646nef==Org!ADmF;QI zutZ*QWf(`j%SP3u7N|aI6=X%(u>QL)JoJ+SH?+#)_~FhtHr9dHmA?dwroNmRz5req zG|+1gZEUfgPhHmdqg{&$|62Q*Q0p;$^jT3mH#>xPE(2)y!w_e@{t5M;3%J8s6TaJ| zjxq0+(DImA(K&fW2ak5g&V6iIz5WPT#he4>*Bw!c+^6{LYf1i79?<#t?~>{aM~J!| zfuZlTDNKI?SFP+td9%b!?}8Hc9(fmHXSYDcsJ47k^i@pV+GDw{51KVK3Lk?nw%)o3 zF3s6XkGu&oT{~i9Ku2tNs)MQh)A8!~Dk%T&cTHVU3GXZG!oOh>rEFaZUz|+wwcRty z>=%mWebdO~++o@=OpAT(-T6hnK8-H!k8y7`<><s_n9+3-1$>=FCF6A1`P3(=se>=K zn{7t_GE_uv+7%UYJB}KY$*R#0Kx;%CH;oVCKEKOBGTcT7gd?CZ!5l5-Mj?B9aOWwB zH1=!^w{*TCyBVtUV(ZhebNeG$w5b!?t7Y(%_gdVzb2H&Kb3Q)kmi%PhCU6>;jw8jR z?|tqm2{XpvnjuqR?;|Z-e#njEXNmiKtO{((ZKOeG=aFf*y)>&O2e-XTW#8e2IAgc7 zqP<lKJu6Y?6iEl4v@zu%aZh^ZVTE|32<J6eiG0;V&<)DRTeU;wGs}vB4mv^8978r( zP@{O5FdODu4(7l5>*%veFb_DEiB1Or?YbpEL}$@Ozq|r2h+O&>FB^_)YstP>R={Sf zVPtgr23UB9V0p%GnvvLnH;&v4tv5`k1AF#LwezDe>+3$qjou~yYweFKK1}2ef5!8Y zUmbCkVI*A=KXdzRHQt(;S?hboMC_FR&?m3qJV?1ao;s_~CHmT!f6EGQX&}ydtAzFU z^Lg^fF<7`$i|zh)#nt~VQ<KAcnEasx3y)dA==+^&&D#~>s`u7>pgD=J^;`m*RkYFT zzy7@UP$jvz=hAL-RbE~mzz0$SxP9RmX_Cl^^+-l=U;Y_B3*Ps?oT1#yMOpZ~i*bRv zC;U6|jLuiCCwmJ|?DDaLIA;U#Q+_s%Z#NiM)|>I;p=AJZ`^o#&Bt?(0sd&DNHF|e> zOC!{SIQmdmUTCK$FW=Zmqg#hC*}S6Nw?^`j|1==ZA`T|rI|j))+W7HmB3mpqlog&& zA*SCdsr+}Ybk%R8G{0gLs?7~Tr+0$wTkMU;HSF<Y(nooV<v(efbulH!s$%%&Qr<qU z9)1UhVOqNlUK_FycDDXP>!aSu_R3xHM%z+UjIxBEw;#|?(?)peGmkE8R);HNa`~6Y zzqy4AMzmE4r^ybi)jSG4W~9jO-YdcP#Xz<`yAmoodDF&|3he2nf;Tl&xTIns>8S(n z#+kG@aVTyZYC!pYhoaWJ7I>B>SkQiJ;Ha-FyFNiKi+D~~mLeydY63;aF5LHA8#Y}x zh);a(i=*Ck6Wyg)uJ^N0=rzoQJ}W0nPZ#zR`*J;8qc3u!`MJ`C<2e{3WubDPEFmMc zMF+oiR4J+AN%tcBFV~3e*V<vJ!F`z;??BADli>bZ{H^yAzsDY(**>;2r~mmx8-Hzt zTSMb9?REjH2dlB}UEzjZ0r*=_!sN$xnCjo1b;MnnysdHk6H|;z_r_5o7a!TI%_oN4 zp(}mc^U5Rs7`|j2=6Kj}=EOp@UC{<#-7f=;ysNUoh07FYcU*ECPc(J87FMd*^RlTE z@b2816s<IxUuNx~yAPj1Y?3n8*{_8&?;Ww~*l`LEy&~N=bH=HCtoe9)7`#f0rt&lS zSYPLWB`&ihRds8=X3!P;!em-7UHBw-?x4Ae8{y*t3*Hms%mdO~Fm2p8*0>YIIbPX( zy~>yE!xOnd^u$^v-hqZEFCl#M1XhdtkLtGju=5MSH(P&&Ub)@`bCU&9(TBdYO4=x& z8~sR<W76TTa8|1M$4W;d9l4~z4E^J(NPS<N;3ydJ%q=3Xcsq?f4qTy&F}=uQnVraj zuTv~J&<qb(exUb-HMF#;AKnUI49#n0ssGx4uw3-Y{X-(za&<2ZwT$FChg;OIL<de5 zETWHLZAn$IZXXMmyWXilx}`Y}4ri#7t-~;`BS$tk=F94VP2ja!3u$01X6f&ds|+JB z@>@GTTPqyN>5Iw#_9HSopCFr{L}Pm;@saW*{4=#X-<Z~xr*AOgUIFW2V28HQF}i|2 zAF#q9k3ywWj`P7|{$R9tdL2ye|CBVk|D{#$jQQ_`4D9P{jB{Nd(Dlb6<A1miH?03C zxm(xP>bF^;XnCTFae0<JW_Euli0usf6Vtf+@^i4p!Vq(&w!t^<I`C^=Em&Mk#AW^3 z)ErL{{@+8Q12*9zJWnse<>yH%ON+$d5$19$H%C0C`d)f$qK!womr&qNC0_2C0$+QC z;H$Jla6>PMUzCVB|9mZUUFj#^dnMRCaWODMu*l{+jhElog~Ps94OGK9?DuIl*<IU1 z73G2rUGkYuZb+p+)7pwN=&oEDJ_z*nbkX?mSgy*q<ROFGapG!glpkftDKTZRFLSYM zdVdn#Q;Eb{2NiTZa8FS%(i>C11mgVR7HpjD%;|!yncti)eGQvI+fIA4tEn-+_^r(E zW*4K{lRxsWL}O{c$}aicE>+&OMu+TD-6>a2!m6V-G<WbDskh(+mpi)Bz~E@^klq6e zeH|b&Zwq98@5&C!I(+d~Tb_M(Iy_AH2L`W4p#7N7@I-GV-CO4^k0@)VgYktpB)<<= z-dF;AznY_Aziylt;msq;2hx#2d*rQFHeh>TG8w;e#^--6=wPmxd;53fNd29Z67RuZ zD%$hlS=Xgz4_UqgBXG^{7vL0Riw&CquhuuohX-fl+GH1gk)p?ExAhYLUY~ajPeQv& zOKD+NB=MO}=(3>-#ucSXE!AfsG4d_=v<wCduhICUViv5La)X{Jn=`KcE&r#`;63K~ z_$WF9Pmf+l^Y@E$^4H$l@t4D?*AR(seRt%H@8P^Xxq;^8yHo$)F{l}pf&&|T;panZ zz7p4+$KHQL(?ZR;li)5W9W=mK27lla1hLV^aZ=2h1t9hI<Nmv>`BKIr!r7`wF9gpt zS%L9g?#LO>?$s9kSITLM5YB8^D0Q!Glr~N^mHK`h&O=;X>Bo0%yj!P%TZKROgJ8)- z$rHfXG>Lnd`16`aD#E{@#Fd>6LE2YcN^O%VZyOwj7ekIwuCFm4UK=J?^xFwnnmoB$ zbi3Dn5_8u2ZWN=`8Jn$=Yh#l-!C>ow{3gCY%qQY`;jB0`-*pR=n|-kGk1uG=xd|>m z+F*&&WVjQNfl2?REBx9S)6sURIIF>&bM1eC`|oJ3wiSD^pe$VNdtIK<e1hJ+S_T-{ zj{cdC=cGLm{8r2x_M3f@7T<hIuZEd(pKn7rYhDMO*nAhXDnqD`?>;&^NROYTYVqfF z3^qNxp^NA+uRr;<Hgi-F{xw@e>T43A?NMV~)~JQALf*m2pWV>8(ih{-C-ICGL;2(3 zd_EYYCft*yoNYRqpS?X+TRpiLcWv|I{UguRRtBp`mN!di$LCC}+))nCKGo5#Vmok) z)5e04!}!zn?r77L$mdu5qTV|rB&~5n=#fSP-4pZPHH8Lrwpm4fJ2)KkOqyv&#$|=0 zUJT!J+$QPnoyfB^yL09JJEZ#FM}F~YiadU6t>|$$u+Dj7-u|YS)VJU`C|PyIU$Hj) zy`?`7?O!jg+iXaN=~ZNt@l7f?ZYuWtA#6KDpDq_j+-g-%J~OL~zOJZ&@v5C!v9vSJ z`$^>2TS8}@-(;2Z0G62#r22uDFv#o!cv=|I-yH^AxUvAV9OCHR^ky<$V?q%bf=|~b z4o!rsw)@=s^6ctP7=KPMF1z_-S=>!>2;2c4WpkxgeeJpXB;gg_(2fV)&f+~0E2(Ok zj`Z0ko!cIXL7z}3;o6uXZ@NE*L-aME&zXx5HlQE0)(qy<%Goe2V+^mF<P8<7nYcMS zoPFN+#?#{t$r^*R>8$WBSNb{d*v4cSl^TaLdW#Ix>Ln0zQ=3ozo=Iv4mGQ-v6x`N2 zgKvLaEN`-Gk2+#6^tw}f)N^2XQM3zk4c&RQxKEt^^$0?G2<BF+<6!YG8FkE;!n3=d zq37}_S{`45{zJTQ>)DRt>o<eqhyxu=mf_yCD74o%!rL{A;PBDWsF&%9J&tNIbecdr z=X8Wu6-79a3*|erk=^!~VXTck4$eDBORfl>PtHW18nhhN{CP*(N2=tLCH<s^d{d5{ zAiT><*He3?DAtX0VKc4eux`$C>c*lQn$aKTzKi68F~VOIw}$?kZ^gsCPt&aE7(A9- zK`xI|_>}bw80T)lar=bpbgm`dZc5`*8YUPx#tDrzH^9OT*I}TYHyaE;q8RmPDCeJH zDoH&>8&q2q)*06+bmKT|AC;(B^dO2YMSr8MffH`jRFw>;{GgD9{&Zxo3dWd99OPc1 zsOjm#ox)plaMmpNP&yyd?p>n$)%Eo4TApy&7vM^9Z+bf63_Q5-hN>W+FXfqYl}<QL zG|0rl?`?7EiVvWF<w)(;*eu?0*^t#N$KbuWU+Cbyb+l|kC@Ed}N;Q9Cxlhk)wA#Nl zD;+Dsl`FjXKd}$FQUQGI{w`Ynm>{G3KkA^HfaW%t)N1W;TE9?(HTR_QpxBFG-su*N znEFPZXzb40zh8w5B5x+1^Vm>xA98vffauy-K40{m2u9)AxgTjn`Z~xvy+K|!U$C`< z$D+5>JlfN`Ghe%rN#m{<Nx$!m<@&d&I5Bx5{ueh@dU~`UOw^0QmT$s;x4;xLE?frZ zqh4&5A^0P|Mq>WP{ZzEy7w%pP;?L7w!GO8V(#5iG@^2Rx?&Fip^TjUi-<}iH&LV== zbp8u)TSw%>zRO$O&DeZzU%q@ggQvQQ%*kObNwqKqbyn8P&QX9Tx{JHm%?W7aw~Y$T zr^78ziP9%DLgUd;F3J_>R<|c~_)9KcRZZpdg}S`>vNoE>e*yQ?HlXfpfL|<|z-i9` z%2rcj$MLhs=wC8>be2Fx+_h7U-_ns$Sx|XtKW&V<DlJbEUAb}R;m-OY_*8TPIxaE8 zDOqW7_?ZHC<XxfTJN>{sSb+(#rhGRcfij;D#c!{A;1{u%bk5Yo-j^0p#0gDVdGiI* z9Grz8CivsW6@75K+iThL`9f*Pb&;Pj)WjhPpWtwc8Xoq0N^4Z~aK@)IFi>RQ>Nb9+ zgQurRigC}OyGu3~E$hZ-ri&if><qkfQ3VrcM&Z!;hbg_j1AiT4$`gi7rT#jL;7WcD zZkntwf36zCYwR?sFh-00I=J&_b3f>EQVG}1nk|QVcjdonhWzU@3YS|Dmo}tgSVAga zqJCJX@)ed`9Eb*SUGZA~HFEo_R%EzFod-<!#^uNR^1MqXu&ibTpAwmy`6D}WQov@Z zwT>SSi>{P&vqgv5e=bzpI>6GRMD!p1nZ~{x!V_;gVEpU{P;pBW-)TqiWs7&@;J6UR zrxfv$nQ=6<_kXk?R|~WkGt7>3p)<!l(7dY+-}e>F%%9!)ZcVS+PjAa;bKq4t<(!Ut z6)tq@eyQN?=&{P&!JNJ>RML)!!wo~4U`Y3W;1`;T?s<9mBeV*}A73V4j@TjZRJEZ8 zvn13=D#m})OVOukCvBe-$R7VbP=K-rf7=)bAGAC2;xprE&95RJd}tKsE?r#v*xnJh zOvptlZbiyspL})o0qIxP3wpot8}YGX+}tgP8scnl=!*$FSaUEQ-5~Mgj@ev1cL|(^ zp&Z(nf^80Np%b061h;GmpN6}&4~>`9-g>DojlTH^G^52FX5=8e`^1w6Hf5rpxc8@= zYt6?l8rOap7RR@hs>yM=0eRn81M^;7ld`iH%ja)3K+5Yto_0W;2d?dd4g<bINbiC4 z`bZKi+;o#{Z5=qfA_-RhE(4Lf<BhKu(j*@X9=Oy2=jIJioW0W#S8Pec8MZU1%e2<K z#KoFH%*jSBzYPN~L}9C2eK>HWDqepHJgk*I+KIiSV_pimiVWt%_-1f+E~os7`t(uE zeHAvEc;QSn+<7*XwJLJR!!QxHIEtB?vpwXv{DBJBZ?q92@Y@i?rb@xA)a%OE7AfI} zs&w*qYmL7wI&yE%JrLtsE%w>*c-rV7gfC4*3&De&ywRQqY_a9pO97XjRl!|jJlM&- z5j0|)ar6=sOnW|xi*_89wU<n!b~A#g+FTC|x>(^g!M5#|Hj20E_F-GRFDIqBbAIME zQn9ZF&yqbbPUI;@i9FbbdciGxRs~M$W<t)<`7&L%WzF+SyrfMJoKY(A<5CZ4!uQ#- zbM1MlZ+CrkFfx-9M(m`hFa^Kv<tyH54fMv8;l$1<u)2rHzcrqs&}JFTFAnAljv-iD zkOMu348ZAAU3lvqb=>+m1s^%8(5a5Gg1Z!r*8``LLApBDTNK~{??dv;*+3BikEDL$ zT{`V^2}hc0@F3H8)Gm2N*E^?S*$_AM9fkO3!9_AO_v9^l4`JKAe#oSQ5sqQF?v5o+ z{`Zw~oFi~ecm>tYNWkt7`(PhX#!=gYq;}35<PT3XIWM6#sjV)R2lzdwsfGc(Tkjsl ziTSg1+ZtE5LB2jh1^l*srgvjwd1OrvKRPo7Hm|u3I&-|?#p2F<YETF?PkJW#j1<|g zA-}2Z{RL3pv;^#(1$f@`4HcE`B&Q?OKxx!yPJPoyVO^xfi!b%U%H_oz7cH3X+n2$1 z%`{9IH4N19e9-q+EJwzwvDVi1Y~7_jchhSGe%pm}_7gpp+i-{IVOY{39=G2~7w!`U zUtMX>mp{HCs}7yH?t3_Tgg=LFdih|aae@vCgy{(J{jJ>RkGmU%Q@LUzY^wf4Q<U-) zDWf7V(0PV@c-wyJH`1Fat1EvfUJdHErvg=n;?SQR(QKPG_8Noi+L|FOQICyhJ975f zXpaBsfxFC2XjR!FO4L%rt=+~$`8+X)x_46!S@ecpPKp;!y9p>mS5iHHLK;}}4_fOl z7hcr=z|DIt^th~tZHqgjmFVIp6*=RXuV-m-mq32FLW2!<8souREzn;fxY91i!Ty6M z#$~GEj1@pTuQ~Dez)@H*$dGR?yh^r{86JEYhlQ<dSyk|V`p&yatH$1!qW62_R`>bv z(Ci@DPE|qo=aJB`#D_<YHKb!nbr8ACTh86TRvtetR?+5eJckOFZALFu-gwEG+wAxU zpN)!yFG_+XL&o!*-$tDOTX-r;C&Bi<%W1u>Jt(=}BJ~A<Xl1fTvbmp+dv5QcquO5~ zILe4d?#Y$3_a)+)lyt7v8Ni_zWr&`k&Id<U!d$*XNjoZNR<Hs4y9vK!U=}WT`j=)E zh2i)z!SmZ1#t!QF9Kj>l($yYzzL<cW*JSXo1;a6|(-s(ZAQ<=B-K5~=5A>iRlX-_D zZl2JQ6V)1|&b>Ua&*dJh6;_G`osfrF&4#Th2j%z?vuia@T$VeAyq5BebiH=;%$JV! z(co2!jabhk1xG$t=iJBX_+hOow;s2Y`UR`uzPGWU*Lk`0vW?)jpYFh?D;(gaNf@or z^hftWh3t0uGxQjjf^VFBSh3TS^{4EY+$w*;6;pNoXK7A9qW#hD#3q5;bcMrruh3(Y z7An6rf!jL&p>M1rW~Ez2AH83#*6~D8m>hy`c18FpO6*T}4CZdV&eN)A&*^VoYjMUp zvROk5EaGcYetahg3it|ERc2(i?kL?G`xV|LmC2@e9eKiQ9g49r$0i*M-nx_M(>_a{ zKEs6{;U3|$yyUgN+?DL~VmVYq2loF7MAsiWsM~WAE%frnDsL^;wIN#AwJR=c=!5SY zjzM|yBDu$y5?nFq5S1^O4?WiF@Dv+yXWAKqCYm*%IaK)D{?);Q;xqK?i>G|4a;3sk z-2byy=?iyR0Z&@%%T9_k)ZFd^uYHU$>0~UNdplY381IC8!^ZQl9Vxhd+aJlS)}E`C z4=ASi4}kb@`aE}Fd+uj;5^U#N;r@k#;b~<q|1p^k{eoJv{jSMkHlD*%FILDN(*$#S zS-BKly_8P3WPx=-5#DMUg{u`eXhudq?6{?q6cs1tuV{w7>%|_bc{1Fb>4{34_JfO- zE2{J<<SN5WV40`H-SzV@T<V0MR(+z~s;*qT+nIyT3)hGCGqPCc$c7JJlhf8_xhdBg zW_)X;;*|j$U%U>MU;IiRhs_f`@39=!@eECm*QVZcNAQvv=G-#%H$-}064|G@aI?=x z$krars~^SDi$ZgH=Qa{Ye9(innghV8-Vqdkr%<0awQ%!rI>(BgtnWy{7IjNzm!pMn zp<WrE`Wj$&$6~Y+p5S#FA>_U1y<$e~Q5e^vhb_yhV2po~!p`m`^?EZ5XLT4LJ<rvk zfOb9c((-bc?q&;#;aXUgdq8gQpeAjtZv#elC2-GrBeib5OmJwEA=AMh-~2j8hJCkt zE>SVU!>OO3)4IOAdG%19)M$dLA~$bm*p8a>tLR4;!Bo)CV`IH+3Q>J8*PeMw{s;YW zsFn&&dGEyo28j;Q$-lCX*$^0Q6~?PZ&V=}ny|Ki&J6Npl116{E!PItMyiw#lv`l}| z-;8uL95)u<^eEtp%Z~i$m^xhiT|g=Acf#n<gR-mg0Q{1eP3uD&$f&Ly1{UkF$&Wgz zYuS6Mdn9%sb%Nz#(?Az5hU3<fc@(k7j~yGmFzZt+zIl8TwsmREOLIJN+(RGq>JUiB zD|_>l5z+XvNS#9k(;zauE1vf<0jF7m_++^_|NqP(n-p{W^`G$LTq`8A(kOI!EP4jP zGoa0aNZt~&A2!N=1lO_^kK4JGI*0hN`#2q0@=HK<qrWs?shJEMrV|yHaL=h7WlyIL zoE^{xN`F@>9Cb@@^W@L)a%eR<c-V7*g<y9L>4WFinc&GQ5qQ73ALB3|*t7pfZETzG z)Hlb57l=1w!F&rjK4KvLId+AHw&{;MlZ^Ps?r8jvCV=a}ME13^<8}ctY+LSz-||Ff zG~QX>G0zDtKh{ET!Rp-Uf2{U!<QF&@s>$tDuYg;4K5MV3sjb~-gX=@{`N*9{+9on& zi@qAr&y}m7OZIuvD_%l(u04Q~PZqd%ya9f?WeNQY@4^DX?B8#Ag*2UK!TuK_7o{<g z@0$r{!|Iu|DCZQkT(pBF>Y_6=y+T@~QAPgaC-Tj=!c+U|G$nVON_UTBa?AFYG;Ql` zGTSzeKlns&!p&M}*=k2I20i%br%&+Kkl<zB2DwLdD4#iIjdhg<SP=P%+)v%4*>7y{ z)^jtOxq1^Q!)Wf)&ze(&duB|r$kh!v0&m+Nq+4MFXzOk#`rD$0t%mN9@{6}f;daXW zak>JpM|n#A${XqVu$grAZW#%)kk_Mkf|*)3KzOq>@z2C)99d=$rwX+BsK!=F|F$-J zM`cKJ_FSWqkv8~tNjI!E4P@z_6^GqO<-(;})MMgYS=(&^thzl2^+T00e7FlXR7GR) zqmI1ocnj>YUITiLSER-nI&gih6>b@5OONssg^zqRZe>TF_f?0!H(Vy`Z^*3r4*qG{ z<5TaQa>wnVTw8dHHatnhc7Lzaz5x*&WHl9rc_4VCn(_5m8#uBs09$MHWuJgSIDCgI zUsiSI@G}+kYo~&{H}<04VixeXSs63#yVunA7%9c6S5o3$(JxTy!0vC~L2=biQ16_I z%8kibuwXPQtx#Yo+ToF2uBiP+1<luemI_u`iTKD`=|kW$FbfsD)sW%v_U98>U$s)P zQ?V74cjROCn7%wVC;~g#=<+4cLR!04ll4bTmFRH=96M(YU)^<ZieN?+wb<g&XG!ck z*%S}&J}+yCY);!Fg|e^XQdnIse7axzbDK}L{O-3mJ6$Y5?Ee(Z&e&mzR|Kc$Wnw|O z=tG~4#62N>$lmombrG4Hb-vl6Q?(K%E?1@<p(F9pl|#_$bSDTo@>(uduLRHCs+{#M z0eAImgOQIq;Fh&sIBuaiLajB85bV4EjDAu4T1~Xn)y3`)VtMxQF6^bD$&>f4QJlEc zj?a{|=e+;2+3MpIa?G4Bo$D9PYdbmATGv0JVDXtzwV4K^?TvBcoA+RHu37S}y(C$P zdwuNr8L*<~ds=PlFIaCAaN)^DI_oeT!^QXb=<cC%?|@YhH+u~nC~l%({U+koha2F9 zdoy@ZJA5w~blse1KwOh4%&zf-%2O|8E;<SwK8CZq=+lLYPQ%uA34CmEE;?6yfwq1c z7&75G4E?#E(r+&FoMY9EzZ{EZuiLG$v;75;^-RPQyG=OsQU-Y!4&iyLND2&{CkOP5 z!Ij3^ctOnYN{9R;<#p+JdDUW=ef%30iyh#$mgh8Gu<(|Zet-`K{rTI4!!&*2L@voq z!6SR;!<@jeV7Dg?oJ>2>1)aO}-BIjmRwwd<qhS;|Z>2P9R1d))2<49l`=IM<TYl`K z3N=sEsNbm$Jaj`I-+%qj%jk$LYr8kd<?fFlFkNI<fB4rd@1V<XhTMi(qvElP$gB4( z?aLQ1fn8lR&~;%gEjYcII=@{+yC$xrDOU~f<Slp963o2IZX!SE+mElc1H31CrFRZ1 zO9792aYg7H7#-Q08!rw+N6Xhx)~OAe{dx*YF{AOqF|nJpGJ#%#9TO>fzpC34dH+5? zN$Ju}SgmpwmZ@rp+{{VxneUF56K5;Jw2g65lp`+6o4|dXg7Nj`Lb!1<48!6o1rzi$ zD7L8L_o<rrSbGjBZTkrpm4gtT>d~gJwrJ4UmUAASqa$j6rAmDrp0?pQRPHarYsNn8 zo?goPx`yD*+gkG5&0FN^Z$Wh2Q;GYl<@37?YaaS0nZ4er;JtNWd`|G>?#~#55r=_a zb}*$SqjR9!2@9#q`qvcJemJKei=$;3KGMif0p#S7g9Aqyaqv?EI1-!6%f(D?PVPs# zYjTjH;)aXv+zd&h%^YxPT0!>~xv{e8FzlL}N<W16>bqJ8JbuL!c0Rod57y3u=7<2E z@gW<3*!RHsr!}$aj4@pGRmG$AqKh2O@&c1c93ULvE~1w?|9mU7ZLQDtdUkL`WGbvZ zta#cbE!GxJCEI^CSoLfme(FDts~&~(U1i}8SH1u}PWTBY^c;A9UWZ>CRuS3MQm&yg z*t2js4cgWnRX4d}K;&9d?pUu_oYRWindtEANekqXVq}%uL)gVI9^Gac^OcUq@bAWO zK7OGs9$0&Tu8JP|2QM4Emzzhof_2bwV<{`|>q?1MyCLDcFAwhHgsqpi!`{aJSWq>O z&S=^2evt@i5Ul3FFVEzN8>V>YdmJBq?#XAKTeH?mdt7k18_xDum0F#9FZZ2yh=zHK zex$#c3$D#(qdgfg@6;~3zQ70hd^-F5Sw*E_i>q68fE`O?aKxNhbSA!tuiE)=`R-PH z_HS3jG5^uhZo6ug*4>gztVbw@F7MAKN{tk-+fH~ci;3QShrr-H)LYMz)Auzi4rx7- z`={z+>{>@$*43LgHVeP4Q3Wl!>d#|`$KZ><VYueT8JRpf$R9JO)SfUM%(ssZ<@RgB z<oL+pTo{;x?g#X+XLlX+Qc^>{8Gx~c-nCV!w<XJ`HqeSPd16U>e4k$ccgNas(AAj~ z>7|7i0^QmDRUAD0oX>tnMRHY`I)7WGfnEVQH2jYjez)kuhZeWS6Q}a<L$Gjx7<9rB z*VK9Pi(>9S=Nc)Nnc(l}UA4o+&o)8%EvVAR|0z1}upZwpj#os5_AZr%HYF<6b52Xq z(hx~BXdp=?rIMME5*Z<7*4I`xpXWZ2kWt9W-YG=cgx~%9zw4r|=ks~)`<(at^<n|3 zdhq^q3_1Tyf>{HiF?i=to^j8n&<&QT%kL{k(zD638rbapIxJ1eA3x7`XR7CG;D*e0 zxGHy!U0tfk?!Pm}92)_X&dFfIn)#^Ubrbs3kHo80R?Kdv1G-iCQ<VI7=EJ!dCG~SC zp5NsI3~w>-Ox_=g)5UUr=Kp0|4i2>*?4{jjxO5ZfZ$TZT3ih}lJQcf-Zh>vb27q6d z8X7gEu%V4x*?*_X!RF>rvMAmT|5<U)|CaH1w5Tu3-Sd$B_|**BA5G9TWFVT=+S522 zQ}Vrhfa&r3pTzkrD`?^T8mAUEHPC^1=C6g`iX%uV`UspfGGI&Ix{!-5pM!?`LZKh` zWuCqWK{ncW#QP2yFbz8Oc_J>|6~y`#q|>Tg3tGx&-Z}C%RHU!MTL0*vOxPuMEoKns zWQLL<?=e5YPfRazvY51YKA-OyU`~1vY@L@zKI@h;pY!STjI&QP$b$AYTj9Hu-5{5! z20Pzp@pqs;8EA5M%)%Wka3T}J^R20Gv=kY?Q-T9uUkg9^y+>DiIK4UlnSDOhPl$Z| zo)xV(poG=$#Kk(iS1tb&`scaQcFqSW8<B>GO>VFy=IfcETn3y`^W?vSE|uDTl9*k; z<#zmfF=tUXi4-5tl6ox@cKP(jOLrIy<L7R*$Z0s#Bp1ye)<VYYEkamq2!_}@lj3&^ znt5>`HLN<o)}|(k)_?A^1#dsX%)u>ia$5xcI~PE9jV3hUfF1cAzABb(v7+VSvxN93 zrL1J6BI(LEu_->eRIpFL)+@4L{<0j7spc|#oQ}pGX2OI=GwEkyGGS^Cz13O=thW_P z+c{wD%YJx>vpQBL%Ft`xqaCj^kY3GS%HGMfFpb|X?Da2cw7zwfUAh&F3(AuK%W}z} zTMOO0pRjeq{m8FtiLli9ig5h61@2Xdz@0Bwv%goIaO-gsIu_C`Y@9lc<k#DiO>!hY zsB}mDge7pY$b%MsN~4_%jd8$DV>0Od1oC~`AgN^@{p#KZK8GXdQcWry$uS`7DU+~| z{7p9QZyQUS&<plo{>i?U)v`HD^k96e0_4030@J5Y*}Iy{OwS<+tEINFm(RDjHA^K> zaC8IHY1P0#{WF<bWU}OgUlU79yTyiGH~<C;(;-HR``gU`6W#^jj+S0*YlF1tD4YhV z)#>z8&5(DH`$O1cZ~WDuMZe2UsV(!b_<xS#{(t(oHn~M2yS*PZ?F%ObJ87{i!;x*i z<_|qine2j&3V!%uPh-EIVEwy%srkSfiA>>e?vacIowh+ZM{65XjC=)?O4^y#m_+ua z&ljlLnSsTr5p=VBB29T8jWwRpR43h=_u!?;@a05Yd803`neq&3IH$+k$%vfY7qZ5+ z5w!j0NK&kJ$Ch<NsL&t+*Pr$!RiE#yaQ6gUU0_Si9T`|M#Ryx68nFvjBBYyqfM%oX zg6rIa;AA<PEN?lI?MPR)@x?%K3(xL&>Ew`3lstIEtzfsU7l7>dI=1X$h;X+f3YQ5L zkUu4v?x)oYIV~U9S(i@sRWcIg5_$K6^H9<y191BcE2<rM6P`uoQF!AiNur52=BA9q zy(fzpE#jX0|IFF^@C~f>-4xtzB}<#lopE>H-soBpi>pu1z{h=L*pHse?Eda7Htgpg z=G{I8J^$_EuJd2u*Pu^VYpYlfzuO))GGW_V1X6x*opq{@!ilyOOr|K7f^h?QalV9~ z%ydlsd0W`sFoiBXUIBl7eVA;%1m3^&qm50{V!84~w({z2_UUCC8-3oGlK#$zF<N%m zpqEKA2HgNTyHerBvqXs9B2BS2S9uoq2V}(82r7wV@xRm&q}gPP5l38ccqh+z%-<_^ zH$FUFX2`orE97WG!(fv6`ItopUzV&gJSODcorncirEF5iDYo%pI{v5_hQ}_M(_+ov zZlyn!sA$b8Hcom3F6;86Wo<L?M&~ckelZ*57F!GFI<sh-zbfr~6^R<Bwz<{+bf-@r zEy$uMiQHZdqE?MQbeDO+%}#lIe4Te&o+rbx%W~w|)FhTf+vD-E?x0)#TCAG;o(-;_ zhNC>v*t?jeP@*;#KUckjt%=e&Tsa4}xW!Uft2|1b+Q>rU3dE`$Q)c$qlwyufCHeT- zbl%`8IDOhB(r-Itg^SpJ#|iW<&lB%$;ZE39Lve{#D+D}?gu(I?(B)PDUa$DgzD2wh z)TY+L@Y!qN`tW)-J3$9u#V=*D-g?;fW3Xi9Pc^~5<qW*XztGpW9kveC<}Rc<rvG^a zx<xcYuL&R7u8(F^wZRRZN*#qgXKf+oaxycsT`BY{9F1@0sR%2tj%UFU|FI3<J#g{5 zFYr)yBBg%2&v~RelsVxD)O9`uTZgf*aHTsHeoz$BM#Z6tVlsWbmxAjJ!*Q-d0Dc)d zh%@ZGDdE6O(pcLEAD*|s4XLtNon((P!~KO&?};>edVdP})*{Lt93-yUuZ9)Xh*pZ@ zY3qNRSm0VOxEW;$j~=wUg^bUIVXubbp4v<7(ByM4`ZK>{%nHHsdO0l1kENUIp0e~A z{m|0DjSBK+VUv9ub3L(}y^K1?><5h^hd4WQNhyS-VLOHD<z=9vkVSRl%;3b=N<psK z4yQvpjZEo>;VU%7UlFCkEaMg6Fpl5DXMF|5QHP;e|BYMI?-Eh9DM(!KGXQ^YC>4!c zlVSMI+sw{o0X$t2OKFsiV@}!8sjce#9Z&@c%B7;!v|~^_<1f=~O%h^C|6_@Ew^(?e zGL{OKBqi;{?yN3@jNqm0<dhuVUHvEqKiSRxP5UBtST!*3&T3E|IEVRa9)mRg|Jkd_ zyXe#NNt`iO81O6+H>VAtop)`~a<n^rn>-g|H5?Jb`AoO<3hR|=O$F>J8)3`&b=4|# zq4#u9s|mnoK98Vkbtc{yvp5G`9bf0Xh46kr4@ah<jJiA9>Ff#G)%^S8{R}3|xy;PB zE}&m3_a#9+oU3|xi(A|hBaGFS!ib<wG0I{f^jbce6q`=LvmOr|?b{&sc9@9uqmpQi zqcx8DW565&Ey?wp8TThFW?z@S1j+q2Sg(JP6|Y?gFVuSBf~-y!{4Wi*IxfJdE1jaD zMmf{`s6vCA$Ks&Ub;68qvngy?3O&r+4dI{lA;s)B3_hTQKd(<f74yLu7gqz9Oa|k{ zcN6hI8|SNNHNnnzMUYoIi&pjCA}Kr_MJjWGu&nVEl&-SHkrD=G)zi`3TSrh`l1{p< z+00SXjP5vV;Th9`Y*>6I86+PPcPh)`IHPCG->E;S4DVur^Coj%M?D)5I+q@OmZtYJ zj8M^g39Fyn0@0E|6tq8rWWB~>MY}#t`sEB)TFV8Od?om=!3bY{pN5MAr0{O&MY!y= zj&-jzL8WQDH#&A9*g*js+{W3m_TKokQ5KJt52aZPUGPXz2Xu@G7w4DpPQ~Vn?7-@g zOk&0}dMjUpzHJtBFF6bY14g2rvL(GTI|JXHjF`p`XOv!}ijA_%!D-|ZHuG(n+m(J% zH2HBh`OfBkkR{(lCCgEgX}cXspZ86@_+9D3=}I9#{2r*TaOFPOVfd?YJv=pYq?B<d zh0b1c1O;pUy>%uPEwwH2Z_8+k*E#@Nnh)4;#rv!?e>1yiu!Tu|Nn+Z`vQ%>13Ac2~ zqlc$F<=t#&CfgE8{*DjL4xd83y}t_+TMvrr&r`|V--=|Hs$kup2;3?gguSkOgWyWe zLCbs$5i07u!+1u3f2!o1VNceVZVNKrmf$y|g6R$5u=+epj2yV09oZhk5`S|a*3?!p zeUT$|d1~R>B}c?DrTLir(V1k<Dsc~}I(9A(p?&9~X{LrKNZvewEzOfKL^=;nt{G3G z>l5+WVPA=LQ6z2tl0f>O--+%rch3IR2kf_dJ~}I8gWIKMw(LkZ)4O1TYvo<}8Nz`! z^ZZ4auM4aBZO*#4%V9%aliSd1PeA&p4lU@^!d>@Nz=N|xqdey0dA|EGT9i+JyW3#L zZdcsz_nld3=g^oAE&5mMg27So)XyajYvrZT=0+cU)~bScE-DEN-X3F*R5B%tz8JGh z=~<Eh=X}=tl^jNP)U(b7|JZOXQ|y|25LRCBMT1j+g|Ew2z?=1FBnOxZ_0||kIp-UM zUYkg$)(gQ!JXfrKX9SJ59D#r4_6W0f>|raz->^#ivF!GnV4AjU0Hwu_V`KZ=grR~z z-m0l!1FYA>Yz5|a-aL&Rk(o=YERL~p_?hKTEN5oAsnk-FgEMzc!O{r|Z0z0;2y!$f znObe;^{tBy;<=B7fz1%q{S-77{|B!e2IJ3}Q?Mz`6iwVCvHN`oQ#j!aOL8`|K?*6% z^w1_SS(r)h)HezTswZ=Hih*!Rt{MzKPN&V+mcTE+cIXwJf_F43#Wy-3<kP4}C8r{= z&e;~L$7s6g&B?`*YcGXTo<rHidwSn`Y0(SOgf)aYkeHy!&Z<Yk%jxN~ck4uw4~Ze& zzX_Nk9R===T7sPRVfe4S6Yl>RCS;{d<{p6xHsV19WaX8xmcFhu;oN=L#l0xT!C#=P zAPvt>ltJkc=9HcB6=u(EgHH?joYh{I*rinTf6P0=T{l^pdkT#)G6#L9osc_W1~uh7 z32BjPB%Pds*Mc_-y>;ZNd#?iB^mr<qdC|jSCd@^p(o5`a&42L3PLDL|?ObJQBd8F{ zp>|<`Fe|WtHBXC1_fhJ2*i)8W%Ablu1DvpTau{cN4ZuM~{I1cKK*J#k|2rOo#`_OK zLET`uIJS=69GHLwGri%~9&(cn8c%l*ToG*I`Lli5koF}=(Yp*j>m6}QY>3Neh1I;H zU{C=c51eKuf7796QI%VDi!wX9YBKY^uMI8J9x%Vr&)6Ni2!w+c)GsrS6t8@T>Hl27 zI3`y-9Jfn2DCq&sjZekq-ahDKy@ao~PqTF8C2U$nH&by6z!|4~*u<I9xZwSKbaIN} z+4myW?V^XbJ4CQG$YoWF?D5GLZ}#eKB>f!WiSgD!`2PGBcJSyW=C4ic`v@P(4>!j? zLsu~04r|UTIKqB9USuWf3fa^!SDJk38yopViOdWKz{{WWsduwHsq*Kb)n7)^cv%@u z3%>}*erM5=y++u_M4v(*_abPFbQ{&+ge5UH_@pd_K1y9;dUc0|D$fzzO%qMd(k8H& z=OA7=h0^qVW2n}0rezv{C)y^l_AgI_sH;|3FSi^%{Fs1uCJ*7+-xVNh)dcsY!zkT- z5|evZ&mKgzg2SIg9C!b%xVx_<+avFY|2WgWWUmcv7&ZyR{k>Sal0CV|U1XuTBKVbt zV!t*iadvxOaxj{JtnYZ-ma-4z!qhQlMhFe8G=j@NrULDoMOLec^HtTcY-k|u&CJ8- zFVo?i^=Rz+$GIva2ha-NY^rb$f%xupYL7oF@pf(!ey`O>1H~MQC|tl=8iwFn<2{lD z-d%LxsDs-~4B(r?cy{KR4bD7cMZH^{XxAmc$x~zLc6|VT`=f!X(Kal2ggp&?o<^@v z4WZab9lWsFl%kDPsL@RiHy!ck!U|6+^C*PMZX^8EIv>HjnVCh`z>n%w>e3pGJ>NoU zeZ4&<p0sCc-&|rB(hSJqV-#6#$;aXfJF<T~3zj@Ip_uYw@slRN+1n1(&wmJ7=gG0( z+c{tGkqtI@JL76<g(q>(CB2lm(~Yxw+S=M6Y|~rzn|o#&??<3ZA7i$?bdjXjL!Ph9 zkm9|iKhWE07Os4iN{@FsNjkHP@MiXAR<v9f$Gln%lMDHJ?&Dw9{mY1+@m*8W&W~qr zPxPR9Cr$a^tDVJoHL=9{@oZx5Ias5a&ull939ai21edG=vHguas(QLnLq;0eTs#GV zY6Ixnm}JuYO5&#X`=Om@_AL4A@W{w3OzF=hc>h2NW9x?EZ|-btE_=cXzv{9%a%S}G z=Rf$iv>%1qsG+K2wGd+wif7MF=ea^RRvkB#%K8iz27Fr}Jm9(9x22vq>RSb5_-}`~ z>20EJXg%BcFcD{1&%zi5Low&50w25vx_);zM5}>87&x#JmPC!ifEn6Ur5J*jID6ra zw<4v~9f5=+IarXW2WG>DQ>fc#@!+BkX6%zdcb})>ji`_8?D{<VVqeS5K1#&FD?`bx zAQ8>4FK5$^j3?_uciHmhzAS8<KbxkPhvAzW-HPoWuyOT^pzGjm7QuNahAUjCs`dkG zUo6l1_D`aFbH~xGo%66dTmcO=V_5h4MR0B0J$BBFpW*&<pbeR##87@7=OE2LYtESu zlS3B6Y0-v;L<u5%c)()k1*7?Jc}ia&N+aW1*qbROtZA(RX(*1tjoWiE*kGuzpL5zq zIeTDK)leesB%#4%3~RlVhlw{Pf#)#JdDD-iD_xI8>6_|;$Bk~-hn^(Eb5_e+I@yJ< zeTAS>RVWP(AxD|j!sB6;?2Gksh>c#()(oD-&oNVA$gqL*^=1+s$X8)&c{fZZgY((@ z^g@|M-9p0f1^98<44k*ifXbgwgfolHDVO_FFRBI7j_H$_`UYRDKEFX+^k|=WY*PU4 zarq86{@jEWx8f-xY#3fTvlglXL$SN<t}uJ#C02L+=$TI?1#Ir<3z8?QR-AinNsU*> zgQVgd8~kw;{(O6cJ+1o1Zf}dDHU~XsSk#+ksqJA4?{OExwN$873Zu0Lp0S70Z9@L3 zmF!ObLzp=v8CG1A!CY4<Jlh!s&Eu!Dra8g1dsZ0j3e?6D#RxLk^-b8*GZzeA&1Qjp z%OGwe{~XSoz}O%^{1KFmQx*o3^#x`8ae5riRsSW>fpsiR^*J=V&J|T6qUq_!Al7R^ zAfNHhWSPQ5mh09b@ss5<q6-&baY+>BHL2j#N_~2E+Y2LkKK`tEEQO9N7N@LfXKk<I znbnR+bZO+y^}r;|R_KrW^(|o0q_6OOyFOcdQj3E8Enwb#3!2DT9jBJuguQ<P(0k`# z{QcD&_|^iJ^O;lK%&9nCy%^?6wKKW9rYs}T9H%B_QPjv3++AjhUQ@UyQ~wHV)EiE- zKHr05@jKWBAq<C{y90{t=OpKw<xqXj4(@qSq#1*?S-0tH2+2Aq4xKU)LyC3j+t>Bt zjhQ-lt;3667Q5in>t<lwcQ8H<bft3@^T^yT64F9kv9EE1`0~Us@(gT&HRn%4f5|+u zUU?ddc8Ow&-Ws8tMze`)3z^-yFq{}v0%5Ku;+*f?u@<@rUaY?ZWln~udqiM$`aLZB zfj{YMdSJ`FgHZqQGQ9k2M4EMmpm(7I{P_Ilp7e3<@te)6ZF_@-z5`6;oPw>~wRo}j zQue*;pD_29KE`i}!5+u`?B^a;&W`d!#iKzu+9?y&bw9(A;bST7V=Js`S|(mpG=cj0 zv+&TZ#jMUDgK~o9$;n)fpN$p?0c#Gx)B#1{o|VnM6!*p>sj5OxVvD$S&Hzd|>I@+Y zX&6+|ODrG1Sv*o$$i7q>vFp1=fy0^kxVa?*1#>z4YaN5C5q;U#xI|LZH{_qmPeLd~ ziVh(fbocKL=6NBFRve!Q1`jnc;%OQDO)7%bO9r#K!yN>@yQfFGcDQ0~-BcXnktO!* zwS_6sXF)G(ukbb3i-o<(V!03hgFN#DI{khLC_Ej;_6+&PR{!DskKrf4K|IXPs1?Fc zPeu0X%N8-_sv9M%Pr`8#fvh&*35#?ZLAmFAaPy1dxcYi6{O6`ZPro=&NxUt7Oqqc$ ztrgrYtSxk|wZYz_4zWKmlj*O_6Hu$lmwejojl!-)u+u1#cCIX9X9sgW@7{glu9-np zb1VpT)mF1($~tJWkaMD*cCnG1m!??h#y<F{P_%y`?*>g~SFAPYc3le__0R;@*oBZ( z&v5p7EKqc?6RPev!zAPBRDW^>uy8}vsWvAK3u!#ku7sb?$s+snf;|eXgmdqLF|=_X zn{7Lmefbqn1H7hS-xqvu7`K`grfp<D-?oZB9nz_}T!vELPNsFA_}qBy57F6a2!7XF z%ueX7f#CO}XwFM7;qBq=>|NDX*6Fg9^=RydJ=cvOPo3{$hH=((<5>FbZz8z3YG6>t zTso7~7u6QbgLDr!3TrwBN^2*fFnb8qJ(b3mfYD(7Ybe}Z#<_V?USzW30lN~}i^kTv zamK$MI98fd#mW>~JAbn<ta&<}w>co{M2r%oKFFYpT!~mMp9UAFSHsHvs&K(_7=`|^ zVQtg>>G;DhY@*Bt2x<yP+p;Wre|QF6UlN1!mP~`<q}h1&&SUYOavnM<PN$lKhJv#E zD%P-2m(KTRY<0g4OsjGZEZypa(}FnD(1Sa-R?1TAe`exN?hr8=rjH><+J*eC1QcI7 ziV?S^=!e>3cGY~KYk4`(eYTIL<0o|}Zm<ESk39@M9X90Nb{!J!5?OwCAlfbH&x{|Y ziW%zr*tqpP*SXjj_vNNyPmUw1&5oq~`(5ck&>=Q6!w(g%jKmPz5NxG)d?$WdSRMHQ zmX9%_`}-Q<;-H)2=Zq3@lSL-e*2twDvYe%}tsk|hjiCW?7BF9NEIvD}gu7<B!^CSd z@K9qASUxwQl>wiiBeE|V+)z6!EtAY972D$jv!V3CZw^|{tYc?+wralF0rvC8A<^{4 zP#AsFhjc?H<GtioF<_CF@Up)<)W8%rdH8yk;qi=FrCM-5T^!`)_M)5$cZ_}|g{4O> zv$Ou(m-}QX*vNOm?T-oMXrcg9Ce}drc^|UX?uQl2_JW6rnRsf^dRQ|viakzL6+Grm zrj09ZpDio;1&5o0QR3l8gO|05w-;EV=l9=i{^@gIer_QMBkd__h&;MXmSJ_RU*Wj- z09H^jgR0K9vF6+W$ntZhtNKTn3113&9!B(Ui$QUsD#g!=#pe7ce705()pf;ear{^` zvG~edI9EGsa13<FtKy5rdNgxvCYy6%0a~>xVXvkADbK@{Ha%SmM=mcG{EnX$Kbs-8 z6t7|<$dKY@t`%-|42K5=t%6T<mk^`X7mwF^QKlyEs_R7(%i;OkoRLs$JQe#&Y4G!D zUsA0CoU?ZtsYWZ19(Pg?xIIT$*7Sl&m&`|L*%*8=CmZy}hv52iS0JR@4_q32@yexX z7&!kF+m>|)Y+lU9Ky81V9~eZ_TyHV2?!LIK{i_=jpNVJvOyL3VIV2oTqU_JQ_~Zj; zMHWkuy2@ktsH%mp-!BqVnnz-G^ClSkBc9$EEWrMi{M<TB6ZhZq!?^l2?0MHnytJu_ zpHDtPzxOI=u%nVyo_)>8Di3RF<H&f;G<N)&KAmt1Ag5XfIQotA0#B}E`;PJF#5IF7 zEPrs1*BiFrz+ti9$C<dw-WF5M+(^YN9~Whoipj5cG1KW56!cVzN>mp@c2PFPTe+|+ z2ZmEY(gOI<5kbAH$KcJ?WBHM7jpWbQT*A4Zg~(yqWOg%x0;lYN%7b>eiRV1pzDg0K z2a>7fCb9Fn4b~WNK0TjB*I(Ym`UjS>$MtREn@WCf*D#|cJ96;9cOX`#-eZ-G6R@y) z1he%OS>Y593VzPJ)pFC(q;Cvno{ol!OQ}>FRmNg1hTxS~#`s~*aGE&zB)A`z#V@Nq zgS2x4obR56FOJ?4Cw)pJ12`;f_lu{L3avAJcrNAF@lhDQXp@_h@=~y}n}@|?cY)2@ zR>q1{(5>h=Jh~@GS*z!Y-)z-b$@h2){cX&5(`oe0FpsoeX40uiUi6wf*XCVF!aFm# z>-?85T5Vj(_Fi?MS1m``-)KIQ^C<zPjnCk$Q~{iO&$IiI5OC~{CXFHo==Cd|{>n~~ zEaB{+gpKdTptlm(I%hZgnwQ9?E$V_A`x$VxmjgX<)}V=()$qN}AxIELpq#BIwr24i zr4rANgwLTv2baL9idtyO9xJ|CXaH^2mJoPt7RpcW7SCClxYZrVffEngSv2qNT!<?K zovRj@sMQ1P$!37*26*(j3qBfeOcR#%MRW6kO!y>^J!)p8yY4cSxBO*m17e}hca@u; zv_2I?y<~~+O=wM(7W~^8DvFjGw3PETYI^m@^=|z9KJEi6Pbq++e{!_2&;T5wJ0%yq zhTtO^I|%xF6TXky0PkF;V4s4)WK(xa=#Gd)ad-r(S=Wlvx9<vWHq)`;tO&Y;xI;TE z4(Iqt({y9rCGT;;uJs$(tL5=5=WjIbs0zjO4nG>A<4haFCz0;2zSt+Yh^^>&A++!5 zM~XTD5V_nLdtK*0>pL1PVw+*@voUOFzYk1(#$6~{8$%afyNO!gjc8cD75Q*y=d|K* z{{Hg?<waU}sNez!<zK;qb7X!;sKDhrUNkPb18!Q+W9yeWbC$yba$9LY`kf=F^tA!) zz2b+0j3y?>n4-^eYnpvok2=cs3pE*rY(h&cUQ7z)jKoy>^msD7lFp@5zH@1Qgf+QV z9cE{FhoFy=EbeQ}MZ*XctUWn~PT#VD38D*`n!Cb{Ux&oShQ_F{*?@jZE`wa0iEwNC zRBUn##N7(ccxJgC3pjQiHr*6(%=HXf8&o0a@%+ZC53{M|-xt;$^Bz|EmomMSWz2k? zCQjRDfD^P!A?kFe<l8z~rkwGL6;)`_rVR{W81L2$Z;+_-dFjDS1tIJt_rmWzA`Gp4 z!J?LX)8f;{Ov)?*`}&Q>-n9ix`G7X}Rr|8AfwEL%{0b633-mNm761CZhwT={Ac<V$ z=1?Ngy-GeGD=A~?_4}Du?-YEKaShg3e30xI>@OKH(ge#k4#hKX^4*3^SOJ~8O;Iu@ z9-nUh%7(Q_VUnjgG+W(*lLo!eGOR!Tos-YDUz$nBrS6C;zpF|<z4Jt$o$9FLF_)6r zZ<y8QhMI+K?777}*qc0^?(B4>c@N*QUn7EWZN~z7>wFx3#LS0F=TvCKufg=yS05sO zrlVJ6G-;m5Mg#6#Q>^?8u_nWq;;NT$u1tee3jHV~PL#B0|ALcKx`nEOFkD=7RJ=XK z8NZ)iCv2N%k86h{QSG@EY(CFzw!gQh)oG{TN6<G|^=*$h;=M8sKT*ouy5yL%+*C61 z%%oi(9cjk#fh0+u0$N3D+4<F0wDx@-{o7n4+1x&sRb*Sj=iOe+DIkdMUF(LcwW=fs z=2(5_v2cyQ;}y^5;dB}S*FJ4x@*%(6j?JzT*Nb(q{RL+hhAtEi&Ywu@ql@5!<3nK> zzuWJ%ltx#mhQ)H#OrkdzgJ)~73_T;<thAE(eUgKzUj7uOZcCj>LvYBBFx*pR1QGp} z$n=UArd?PL>6yNCSd?c5winpMy+7dg;6R-5;V|@_{U2PCw-qhg2jRX^#{Ahn3DdY! zKp|iR=ck=uYZ3?0;zUhq)t*2HhUU}U!Z7TJQKs^1yWr5Edbb#rldSB84HkZw${mHP zz$o-Jl<aiI_@-Ne_o1t9&qki(JMY!Z^4xsV?b;-~|FjME$jei0ryH(Uz6kBRCa|<m z&ZIEy1{fZYrV~7${-tOx8qXPxypRa{wq`T7bUicBE@u}aQ}N1R-cj$82v7dwJ=eo& zR9%rls=d@%>sVtJvHUn(Q^-MuL6=>B8EN4JO)na;(Sa3O+2hLC8|-MqDzJDojSbMc z!j^s;M}JH8*iX)x{`O@HE7J6%XWtCa+TD;%dVZW0tVpK#t(j>4Y9>vUvZnp!*I-@u zb*Ag5f`LC5VBwnK91eG#?db@_09zG|yXD4y{C9`ng*x1}e=?>0CZ>>~z&VOqDJ-$Q z8us5yW<Dyn#2}}1_&wc^61tAEftrWeq6dnUyJbBr{;~kJ*#%Mde-X4d<`u}x{sixj zLuk!i6})lL43D0ccC!xN1rfaGpi!wt4!tua+fN8QfA5W1liDPbMK&n!mV+-p>4^uf zXprVEW4DfOd#vpn!<lbOVMK8(p77rf?rNE0)4~AM{ZiznTys~f*>?u!MisE(^^s(3 zt19Ny{(*VwoK?2&x={TfiPWzSp_mH>)U#p%UFzp2?kNkQ2O3Ku;EE~s8!v_bHKnk( z8M3fzRg+M1UWe9&$m4s%FdX;l5qvnWgp-O7!s(BP#mOHOn9l2Uf?u8vEUMkXE_U8x z6&>mHPRbhwB=a8m<W3g->^QT2vJVW${1x6E38oR*y5JHRBl)N^ir!8y1#N|NN?dyd z^ghobMN1`ST<JrbJ{*F!!QA_sbATB|^hSS~NTEw5m-mea(;R&{e6i7(3YrF^?M*iv zKdnXZ8M0NFm6(f(*@4*2UCvuhtN_ow2SlIqd|x-N7ftQsN#mV2gPubxON{@+lm$O3 zmW&12;`3~O!CH2=!<|e6mAQL%3mmYF=R1lT#&$hs4FMC-^JzP1Ogg~!-kwBPj^@+e zqsO6vbJovqH(@!((`a&eCfgT!9hUa<=FF{?Z2jTaEOuWIKCOKKN!z5M(6fMj{u)81 z$D_z6XA9Gv*ubXSMV!53s!FnU*))E`HDUI2HIiQwOVQ0{7~3}x*_LI(nKvzh^;Kop zfumB%(_EQy4tmf({b(G2p_EDM^~0o1X2RSbA9!!9KSlrD%x-7LKwMZnEf?ou<b6ew ztZ)Nuk6?UXYz>==hf?joELhW(Poc*ppqA%PCoU`(j<92FT`uqcU5ppmtTZxQu#`;; z3?<ujh}Yg1g4H{3s`%3ggVv~Hv<>&aC$vL#h&1|GZDtCl5u}-Ch>HtmW6lEwwzAnB z!^`w>Fn{K1&6V)_K|?72YL3FGNG!PUlFc7^1Zs9qr{eXws1@pkSCbFHbUw4n*|-Nv z$1~>9-<$Rv^Tpm7#*n2s7iy9x)4RtyqQj_m_GM58`VBnkW?okguMcoOo9b#v{+SI+ z++{!_;|D90H^bJG&bVjX9%i~N0uM#m!+-p{&Tn!iUR4Lz8vRrZ*|LZU{QglKrzAWL z_QbfLXKd)JB3OPl0m_S9QRn^$TrzeH=52N04v|4LMShF8ts?>NZ)ykHoXvc?c-Qc= z4n1uw0Moh%8kxURT<`3M2kYOl%QcfAC2<D4UNH?a%q&RhLtpxrAA^H4?Qs6<B#Ihv zPMqL4Sr|QpXSX+`qQ@g^;d0<LSjk$1C7eaAw0Ht8I5r(BlP1t*xoMD6?#^@rlA(1& zKfKyC3Oo3G<#O2{*k5^%dENEJ%YTxwYt#|Q+F?VwACdZnhOo5`+@pQ<Ec<6WpG9@) zVABf=O6}Xls<&-*lbP}f@<xq;&Rs#c>cmMfdmBrOhX&KVg`vo4OxW|W7us!eLB(E! z$!|pg^uIR@NA!^*9ZN^nqMU|#SA)^D*ayRQa37ncE?pQUk1w{oW|v=wkil;++%-D~ z4I5M7yG@T+tez&E{j5Uc<2+gE4^hy2TFb^5_Ms1oO4R#K27Vvght|2}(ap=I@M<g1 zKn~2KA-~VUs=$F{ly)EX=XSC5Lj}xj*G=wgNS3@AwhR{Sktc_hT6~TZOV|HhVmEzf z3QLX$Kt*9cA=@Y%^)#c%wyZB^4!k4wJ>LwY<c@$;&tqu&G#2j}e}?<bQaCRsgB+P2 z1agnmnr-&N7N2RfV`36H8M#2>0wtVl70hN&tY_l|Ih^?Y4lMBLgs7c+Me*V^RP*hG z`rf@TZPyXD!+N1`#pDy5`yod`%Ox;-aWK2scO))aXFxN}^eDE}jh5{(#P@5IsoFXK zW+`gWAIlTYZ)Vt%^ad69v5R+#T4c#+@@Ra0%L^q>6QIISi`bFhP^>VMX05CcjQp3d zr$0VI)?Y`OWEzNi9a&hdlt|G|=J-!@8w8cFV4W6@v^Q%UL9!_VNt4|T4f^_3OOUGF zDM~fkq5{vbJ?cFbRafSrX5c8q!w<yVd~Hk`nv2IqX|fZ+dDv4vl0Io`;XVHElpj{c z<24SbclRzl5X#xV_v$cBFPZ%}{|;;}m%{UA#q7566g=heM7$h2pXRO}K+A^ko|0^n zXk1__hN_6{vB?4)qN#-YO5d`1^~YfFn3uwlL(}jH|9odJEQO%^DcF&4R2;tUt8>W- zcZ6ZlWH`Bp&9c8QtiAr0P2A>(Z|y$7vsd}Ty?+^a)WVP5nF2KCdT)I8uoo`t$VDSN zMRa*o!CaPXWhWv{sD15G=9myl50m*F*})%`ej}CE=i+1AnN)Vwo@N~sP~KGyT{PnG zMxSE%pyE!C7lqJ~^Qpqq>;!DfGo`9$Rx~E(HuU;Wn!0O}aYbh+c@O&v5$>;9sjrQ& zy3&!>ebgogH%%}cBuztFlh}wSy-`wSz;sKeP;HTbXFPVY^AbA}qeC$A(hp(u_wTUg zj6Q`7oJl1X=8!cr5sil=(B`I8YTS009ga`Nijh~@*7nzK7E$?FII;jD919_}^%%?_ zV@Y$zSz!3o3i#YOh*svBFp2RVp>duiHk982!&YT9R@}=Xe>q~Ixd&F2C*bz|N8sO` zTdYv}5xX`bl>*9)LC?bi>-19T-V<Z^n|V$=&-VeVZZ`_2CiI5>U28yXpdqWx_#&v5 zY=TXZ&2F<ojj?^V3`KB0&b+0IK+#v3*1BJV`??b->&+QBc0Grl&2_<1o5QI1>LZrm zYmUlOFAHr2u{a|7BfB4eg0;OEL{Hx)VR?!?j@dsM2h7w$t$8J^($P~iN{B)K7yZaF z<)v8RAJ0bl@O|{uCu09BWjZ?MEqL_H!Pnw0*7MjE^DUDxP$P@VpXR}6+YmbNcP?ef z4x=%uYeg(GL@ASlVsdOO?fc%$1m7f-J=Mf^J&S>tk&4t2>rJmNI>8(TcQ`nx8FE`i zR`a4s9DDvWaE3J9=y=O~Ia9xQ#eB+^FJy5KenI8!265;5>uicU&tk8OgQkD^)O7MF zduwJy8TFxb`AaW4{`4y-`S-_ZYTN@6J(jgq3V88M4~R-<VZwnv_~#?Q^}X4cdew*Z z_L)eXkwY;4PBWV|MV1;}9dP}1SL$CFLvLpp;_drxRHf%Yo4ze#!}%S^=Y9pq4^_ur z=80$;q66x6(zt(B6Vr?zL+V1FnC~zegQwqSv$VRHcS0RR)K!X;u33^otTn!65^#^W z1KNg1*e2I6kOF;CW8-`{%5x0|j@eS{p<PT_Jp%XswZ`hj4yerEU!(2(sLtLL8h5Co z_N%cp+;0YU9w>zO$G(ZVB?l#2c0>sqIp>@cKyl#^8FtC{348H(Fg1E3`gqL5vvy-B z@KAqLRGp5-Uo+@gZ;t=;2F@#41U9qv#Vh8<*w;&e_O2X;KiVI$=$50x_p&8$iCJRe ztz&GbZ4QolISzCNs!@K=Z0f%y3!LWlq34&)P`@IG=1*A+#hG(xTt6i?zx=tdj<c~R zB_9;5l_%1T-K&_(fjo3v=1Q^N4)C#gG6T+>DB<jz{22;#dCC~N%vo#Vzc28<Tv6QA zb%GtW%g4k59WeC00UM<1CC_Jg;Wo2nXZqWW;yc`tFzdg17NetsawE2~V$QQmbRI!F zE_-15yDWJAVV}hFCHIfDJK&*lkJ!@Caio;-N_ep6E^}NQfV%}tly}#IWyw3)qpi*C zl$N59yxS9d4h+W-lfI;RSAl)MenYT*v54=ce97_P3ih!rMR<E%4W}N_MYXCJ+<#;# zyVLoNyEAeqb;2Cf4Oz!@Zx{%r=B7A5U6*FoU16<Ff1uUHlPsSt5yt#2VEK+GBsD!@ zyqDrZH-`#zZul{lp*)?`?wyA(Q&aIk7=cDNchagJ1gXX0l=gfI$(Kz+lMY*4U4N5B z6|{*rxAzs)`;5YOCF@wMb{1V~n@FmhM>}y9Fo!|vki6v&d^n<m7LA$oZFxR+H)`RG zQ_tX2O9t)jjHhimN_faqf$clk3dNPmIO>WwCP~zAaPV-n(VI;lRjSUOIJi&noqd9B zX<iGTct7aki9}MkCyV_K&O|%*MiRe05SITb0)<vpczd=1?A4_3xxzSD<!Z~_FokU9 zzi9BiXUJMtNFcf@f*QLVS<24oG`TERxL*H~-3VR`DyNEAL7g(KHqpRzXH_cNu$T>a zl1FCOn}o+}+u?27CD_=`JCeIwh4mA53x`T<NGI5vuC~90)DKNC{=g{~l;TR;GREM) zfjoCJ?=;g7D`LwMQ{m3?3a0$G(5)-+2QxKDpy**~)Lr$A6>SN{K*u`RW+}%~(+@#f z%_X-ycT+U1)@I+d_rTGPJlwl$5-vFqNauQeX6n7Xv0BZFwg%5Y<FV#c;@Jm3ySZXg zNF?oDe2$GYnZ~{j)JE4tZ<g6Vg*JBgrlq^KGHXExuJ`CsHk(QJ)Cb_$HBJ=Q?8W+b zjmGqxlWg2aISSLY#PL1$Y-_X@+c4Ig_ZiyR6xr3R_irN_eKL@(-3&B$PorRPr=CSM z+fuv}&uSIbv3Ct=v|+<+N(v9-ec!9_{lQf7YzU#2sk5-i^_(y(;t8x%(r1%9EvZXA zn!4xwh06_HqPEW{oI0)v`d+JNj&T;4v%w4RzX0sfv?gN%Q#^fzXDH$UuVtH3Fh4{1 zpFaj=f5L>1IVWJI<`~?D9;jN=0N=xBVOY70kaF@Ee3s!|7^`Ba(hJAbK2|jCoElm> zM6i`hGFjr+XlO7z1IxbX(zMC`?59q!*uu})W<Cp9$Kt+d)D(k1{NF)dtu~&2tbs$v zI#c^eEAU(Rm{~j@1aDlIGP{%{lyPzcQ_VSU&!=7zSJk9pbM$jj{5cMHPiu#Nqb5-O z&M)j&MnAgveHc+~IPU`WqW#x%Mfc(n<asSe9Pg?~CuJI0RD(7iOftjjlSvdgvXOmt z8ck3Cjl-avndtXcn%)ZSVr!2(;cF)>-?~OTSebz53#Zc7yEj29lY#E!sc7kGk6S0k zQ7`weaA;UQz4GjVR!U@hs@-YNMFv{cv*=oN5a#b_g%tlca5r5Q<ILhf7~qAh$b$Ah zSjF0(?S#D3yO_(edE&Ij85q7Lj*c(n+0~!AG^D>X&N(!mwi(sH*4k;fDm@u83#!Dn zZIkHz^jKOb5k>XCUX0NlVZpf+99t00I}$yR%pE?3?Oqtp`(wlTc}~Y%pj&;nh+f-F zNv(ATojA1w_Il)#+KzEFw;~H&y2qeWU^?7PosF*TdJs{dgIi1*Mfzt1$K^*OT=S#l zh6%JkyBCd%dVA*ky>skxmnFHsb>r^7WU_VF$2*^<QjR@$H*T24dEKXB^!N(Frw`Dp zn|FkuKQiq3Z+rS)Xh#p$JOpo*fe^o7A-LViLF=?3<n%X^9z5}-s=5-07k7xu2mEoH zBRz>b1o-z^OcvG~ji;geC$s<h>*1I|PMn2iiwmB#LRo)Ji16r#iDv~GuP}mo2F(*L zY+gWNE4x_tn;^K-8AQ{!MB~zzvUGduWjOw50V{nOMaeHOLZ2tsm`B7eSSmbXT_bB@ z_91nYWLwbp*YhZRL^!^v8-ye5$1vSfoath}0un<n35J!eAlLAPRm#mlym*vt+?vLk z!csB2x5#qdgy6kZO0Y$8Ry?-tJM?|1jUPK}gl#jYlkq>srW8LD&3YLKE-U-s4^1C> z`aqf<7O#Qg_8GL(DwMQqj4|}449O1bW}}Yxr7he4!IUCp`jec4XXiA6yulr|dyoem zj!8h{uK5^#?kRKEPZoDPH)rb)+0q+%Wib2{OY`;zqufFRH~I7+T96q`>#u3Rl^HA8 z`4bQL&#B>c-g~hxdnL%99uDUQa*p$r0a(*$fJ;uzr9CZ=*<pDb{O|iM;h5@PVUU$N zdtoz*MngYJ`Zu4(tTlo0nKoq0?|j0=eDn|Tz>)HkDKGC81U}lqR71z(l8V_R#UgM@ zS|T-H+soP;BSrflU7C1Yj!JG>QL<qGo=WhBUAMDIy2c2{?7AnB8{vYVzu#feP7YKY zH;9Q25_mDVj9HwX3mryNDN<oFX^oa*A2al*Ceu{38b6mh-Xt+sbDk&p?1xAHIbpro z5tuO2f}Zb5q!$@lbmber)AjE|O)QnfRzdWddj{yzBRJk?mRO^E47#k|!>jkb@dN+8 z)_S>Mtz96Z!Z37*Didy<Bvz%>%4Szig*ags96rzAEA~6twcDJTpY$6#!eglY$8u3( zFp`Fx7)KZE9noJ^7v7ge)3j%5w8g{?4LhA_=PVA8eke_I_g#mlk8d-x>{l%3^Eljj z`J(XG`4M|2+Z%`OpFkS?8n}>qk%n?_RAS3jadcvd`1AZAd}=!h>SUD#{h@knG4D35 z9J5uJ``a7umnP$YyJ_hAvqk94mB(1#<=SG=8#5N~5j~Pd;yz`hi|@MFD8D~!-p6G6 zWP9E1d6O+3;(5pj&uH8T{@4)O3ZHta+=h$!bg;Zauz3@K(NT5cPPMyi6Xy^dNU`9& z$o{C?WkYsHYN6tyCmkB9iJA*`!i%=uLgkqp9P-qUk}^Z+Q}Zq6GF68hGJRpIo;j^< zD-{2&@}zrRlc~+}8MMCtB6QyD7Pgf5f)AesJ&`Gc*ZHX|bGa{C$MnXlS^@0M(FV~h zVFGp@Q=+5Bae_k90J!s55kDT9j(g|-6zt0$2|KJ-3jxnl=|OxAd)_>g9G>RkZnKx* zwlM`vhudKxon&A7J!9wpHJnL}D}Y04eZ+Zv`_ZDq<!1+ZwDLJ$5_=de%@Vf9^O<}( zEGzQk+%+3C>|=q0)(>Dig+FlZoFU}%=ZI;W(>Kjjs+@F;xqp)8YU>|huss#(tK?{n z_IJ@v<)kou<3a|;b1=;RAXBQj4C;w|A5+d}5)U%zisLd?Y+{K&V+&oce&H_c;kp$0 zd!UdxS(_RT=-`{>LtwktBlb9{L2R1h4-4PgvO%tPc)0Kwybb2huAB(-R!zpvl0)of zN)#PQyDP?5w87^e12It}fD-$iVeyY{L4LfHICb+@7^1rxs!r)*x~37VoIMf?z1{HB z%!|zP$~4RyFrCebih?0JAA}#5i`hAY7_i-ROn9rVg$YVOpjY8cH1Ax^{0An|)3o_C zbIKI@;X92s+)P1RX{6?wH_-dT2&SSPPSw&y;^U$L!of8OwC?s_I5RIBuk7E*Dz_hJ zf9w_U?lyB)x<!*dW<{`MwH+)$&5-hyzQIG?r*LD(BlzRv%6%rbbpMA2Xz%M5&s=F^ zr)p<oPrzBB<@8M4r?CPQE#-O6GRrN(Y7marh=Af3lX0?17=9eBLf@BGvo#g3*((0I zeEBvWPdpz$9S0Jzb>c$yLB?75koT1JZY_ZCnNj#rY7YFpwF8Eam8X)}In<ZVuqEJ2 z3u-l~&+AX{$0!F|q@yHHC)kqYuN+Oy?gR3dn^=Zwt=J%qI3v;$m){07e{apAwmjo~ zffSmyo_7UT428(NxzNRZ78|=au~^PIm7JC$N&S0fVg8e4wjB~&DI9OB52w5vD<pFB z77Nn$4NQN`5@s@LFx#<bA4{2kUb55o8Z651jS2ksz42}!c8$|N`^nWyNT}1Mi1m&r z=T|IoYCH|k^&YSb7b?V2Ub5ovzywr`k;AmQi*T^vIE=TFqs}jUo^rJlj`d8%_?%w6 zLp}#L-McL;XsCzZsTYONA+~N?n*!nO`5+kZ{tFXiO<+;{6G>Tu6;>_KC7YdjRA#{0 z;GBnMc&Uwf=4@l>Ub-wZ>7mGA4O?2f6+-uXfa;70zJIwWCN0{=4s2GSef(T)-QWSL z_7maWKub*CA46ZuieXQuF<re=E*Q#-kZh6;_@zza%ss*I|LpXjFD!LmK0e-lg`HpJ zL>ldS^rrt<R338_78-WK<HSHpIk=iR%Z6gusb<l~{0@98&qMMY!VKRO3n@Mu*p6YD zZnFCZVnbBS*@m_VmOiAFCFn;}LHIhj{J{g4CD~Bs?FD4QGoku*VE~S)+%viWtu@YY zo^>|tRdp3|jrNF_GWy{Klf7(J=rG)PaskbK#?SETaWE;oKifJ3SfMY^HZ=C9b%9bi ze8eaUpdL2rXp-x-bbYedHKgo7J<@WwVY8k`;$NBdFu-~ttn&3o=O3BE{#SW$&c29M zz44=xL38Ngj!#foKaftn*EutyCmHqpDp=1VV|q=O#p6!-bpGRRxL%rwV=Q?tqHqXq z6Eq>CDF`R2rBJU|8^J_j9u2QP!-kmU3ePbf*LY@=+}Q!R?unn|S+*9Nw?+-LJ{)ET zqeqDgzd2HD^;f1OyB0#84rQaBXt6)Vk?au9Xl+*(B|YI`c&cfUkT@(ASNv{ZzxJfi z&^`P)`yzwBbLUck?@q}&-f3#+cEsjCcZ5UN`JD4(2k5)4Viuk6z*p59BA;&)yd|;V zUi29j`#aIhqt&9*{Ui+DEln%O)`=dQa@d{ZE)Zt$Z0sy~bmm+-FYfSOA7f9ipWCuv zn?S4@n<Ks&y%oCJOz_QBo=N}9?}bLw#YYy^pzG#DddjBcKkT=t9oP%ERP&wsw5jxS zz%@AbCV-L}=is8Z$t1tr6F)DC#Ouq=@%Oreu;J7~_U~r3+acdfyuNWXxSsO{xiiYx z>x3q@xP;TfN8xUEJ(1XDr9(P35~i6p7har0{1Lg7iJTwqp*Nju1`fq2%{y%V;Wp^F zw~@_AcE#sk0>~#zz@kgB^uCuI-hEuktaB#|3ESN0=L;`NsK~)tcK#HfFdQ`|R)T_; z4}A!fr3c0GJd;rYrD|Dt@>4k7|D8)OA16@ct6t2XXP<U?tPm=9E8*pvoLk}L1!gjp zP-?~RW50YMs#O|YR;f@=Y6uQVMf_K3i~D<`(9JhlQl;X_#)XgL*{Uc!8dC*1>v-2< zR~<}vKN@ciE@yqS?78pM9<OLwqxZ>^Z0|@fJYzhW3{a8!IvwMKo+Oy}$^siNWl_q# zJ#5XN*KpNQ1LLwcxQ&xZq_yR_q$uY_<?m;TR;Qz}ZI&th6JN5p9~ao_@B76HZwGwv zp;X)uHUxuPcZtP%QZOhaT0B$p8^V1i(UdA#RR3`k{_!)A=d~A5)V~8Ny_bp&HlFya zDuQ)SJpzN}qNv+)59n-}PZ`nUBu9R?au@3~zR^&}8Lu=kaaEnLPwNM)i}lCIUnRnb z?gGwR9*^FrLDuL0N6~r5bNP00JS&Qf7E+lZo5;M+QAV4{&PoZ12t|qZRN852?`SAx z+~>5H_AZr%rjn-SujhLH_j(my?)$pV`F!5*2sRuT!AC1}(d4cjKNYh?#nygsYFHci zomz;$GYB?|-SwOMd+5i0HF?*>7%X-j&ZQ2Xl;h{iR>RD}$od@^st<<Z&*`W#HG^l3 z9f7MyC{UNjkD=_w8Tzd@8m;txkecEg(D0T)*XKHvnaa@5!IzURXQG`&vHYfWKYd#M zl!_V)nI0JPv+F%!-{~22x|<@OD9^xZ3r%heT1o>B8t~4+W?tdRI=FxJ2>wu^jowS# z+5ABb<>t8a%#oRb-8K!KJ|2PH2ANpUBthsO1AH0sA9b>f#ZEVbtguOyk9Uv22|g=m z(xed7to7ojy3Md}%NU&ZF%j3g_r`vc50PTm!8EB)B)<&bOxK_N2gdJqK<waFn00mn zcYhis>7E<HZT@PZub6ZGyXJ)-wk&|-&93yd=_ds`WymMKL}Jr?PwMnEg?H81;!dSB zRCdso56wk<^QtYo2eqeFt0q!%ZcjFh{za9EA<PX*81l^qAE<=lg^RuT!1BIWGh{Jk zE$cx(zA;=f_?qN4&7Aby&em9G>*MepmGYNLE%f^k0xeE1XiVxD?VU*7$L<p=kz3?( z*%=*W!O00ofL|wt7uZk*zDHKWK^q17HC&xNnyq+)#u!d~><15SCE`8SqJ5%!cj}4_ zo7ml_-lCuTv8^I2WGnGMg-}i|^W^RaTWI;dOz<`u%V8diDLG{rhR#mM*wv3{lj$=W zHLx$vxw#1jF9+-z8iog~jObaM9pzcHLr05~0D`Z3q^KBH+IvxUMLd+R4Z+l$WY`d| z1&0-#xI@fTT>RxD4EiRVxl44(=43v1b(w}^r2#y)B?Yqv8*#_{0kE*!G}QT_2>y?} z*?V`fZ00C(#DRLeFe4S@ZI7s_y%rCt4&v+UQ@QceB6?XQB9);5JbQg1|JAnPJ$-zs z@RT-RdJ=#W%ce^n`i-<iS7copqUc4Y7e3e~;k86P%sOj>_s;dkNcHX%u~(Hhn|I{y z0mq>5UjT0L@6W0$25|RZBdBIt5WiJPp$k#s{Y_c8ubmqXT5aofMPo9iyh!KT2b1ym ztx;mHJC>LHY@}cMUKo@5l7@B#+;&TYK8>6J@y1U*m2RB@v!qdIbMuY7UvWB}y_7<7 zgR7Vo_v3AedrA86O@15sk3!48!29l(q<)E)V7J*PnAa|z<8=z9Rd176l^oc-jUL_^ z^GTBO#f+iGiAP?|$G=Y3DSKERz7TUQ`;MI%vpuMLu_<0$>d)-zgu8lOhqYfi;^1F9 zq1C`ou<R{hf42e{mEQ>)jyYpH%SH5j<N)4nDU*Y721b}&A?>=2@@4x-_SJ~tUgK_o z%di9tS<+s7hWjL=zegd*(wyVRMF{^yCfg5q1sM+wu;$A)3hT0qo^1)|Yv&fhs8_<> z2KVT3M^E&*RtI0#`Jmx^;AipW;J0KKY1({;8=6UU^nNgQ_BCXELf)`agAA5N3f_Gm z9N*Sk8oA^gTrz2cmT7j-sQ46;mB)c+TBTQ;Uw>)Uie#kN1!SO+%T>0!VMLuO9}Jv= z<xWFENz9u)qrX$it|qE^lf%je<~X2ZF?p14mWJF5z=TA9*?Mv*%;}+u3X5ODs5Id) z)pEh@0gI^cxFPqy6hp_VbNNbMI@i5UW6RQXFa7<!Y195Oyyj*&Es8IeS4AgL!LA^@ z(Ow(pDz{Mev&C>|wE|atvBX<XC9gZZf<P9X(4!frY0!-{+FPe4b|P6=cWR9^QL6=( zgcb1n%wO<)*$`@X{~U}A@?f88#oF2zui!=gU{-PVg@#FCm}S<7gIBz#>qphuq0kQd ziD#OhxerI~*-BYWUuj>h5^pK#fQwpFI3gm0J1jmeZ`;vb4!>)`DF^fTN#!>3AF;F6 zUA3H|o&~^;Efe@m&^74mpNBnG2lEz1XEd?D0--bF(ZW(2g&vdkxjEGa%?X4i)&6YM zG@c*N*W(ujKWRh2R7kMxhuiMolIs>;1Jh<r$y>Z@<PPTieaa4aX%mc_*0;e@yIT11 zA`vf#h)&G@zclLnbqJ2{hqIP8Kx5_vUTJupE>#7hqKe?D`M!s3fBz%-d=xvx&ZnZv zL$b}nP}W$QNiW1+^n|)0SjAg#vOy%jkGJFqa}~T);{*%M`*5DIVBxAfhPm#~q^ZVz z&~))a+HUuoYF@U|+XHQ}gJwRD*6D;VzpMb`9s_xN{u9FdN7U+=%=*Fu+-`FXJpHwq zJp5G9D#5GvjH(Hi?%e>vAKT;8eq*G(2dOyYp9fz$m;n2ihhy%p$6#SK65pNsK)co* zqFL+akmo6V{G7C1+Sr<pH-p5?@I)A-geG!BXE!|hYcHtgHNcK{X53etlh%a|hr4?< zxWOV=8q!b>MIC&2f&Vgk?Qg(3c`xC~y>2+^a5`+XismMZA$VcgMzB69VZ-!nue)oy zag3od(!X!;pr<!)E!a<?a~@Jh_jS@m+r6+ZslN7w_&+-A?G3GU3HVI(#}3!5g~o=F ze6aICY^)39DU%1$ot|mZtL=SwsM17PS*b104491DZg%B(pH5hDV?G=$I{@nia}QjH zNcC>_Wcz}*aO!p%FBzoE9c?pVU*-Zj@x=^9utnZHTalkHUP%V_llZ!gFBd#|0hfjM zJ7`u48%YQ0bB<t_dk?{%-Nc;o=NR0YcT2itbAgsDR)VtAq6=`~7MP7`g9i7!QG0_M z#?Q0jNm)6#&iD$%jE~@)CR_gOe-g|BO37q`KS`T<)8(DX7_}ysk7(s!d6u}(3f6)8 zOA9ac02lTyI!agj8*+i44p$rXL9OE_C3AZfT)e#vC|?;!M-uPVK22LIU3zsAM*ZiH z-_}(@z&I_O9u>sK7DKp1`HEn5)I)YjPddHzBjtsA$%#3Rv^(94p-7$a^iNrLY#%I* zF#t!i^K^UOA$fe|5^^+JO@7CIz$K>!s)+uNR{hmxkDh|78XL_kznfxUgd@57)yS_* z&G6PiHM}L*J_q!2;i9W5ZXMl^R9<Y6yF5_hfm)&bpLj<2#2Zll%+BbzJzc6jF`5;B zcfw^yd-Jd<W|IE1!=N*$1HW9C%9r~F^MX?rJivKB+?aX??2=c~`-3U`%H9S0>qX#f z{aoq#$-gvU$1PI(qR7iq&q9jtG2wzyggLi_6X;f3-n$_K4Kf_~gE{bvHCte4!CcV1 z{g`$+jK_Q@W!e2%1bW&G$Lal2*!D=Hq&WHo%vygDt{?A17BClRo--OR3?qx`NVXOC zBJ*j3A#cGZSdh~jtKWC!y>DmB6DMkbU%fY9NfA7oqNh}~C5Ws}*HXtCAKt%pt!%hr zf9<l>v6y?S2YoX90B`0mpjpepIpMN4*}v_8eUC)&^`Apnr|3ROyTpAoZYzA*>W{&9 z`*Mq;;L%)~%F*TLV2@HR_OCoklLc!x^n4*4D;dFGUnI~QuP(g%MkJmxn~Y&u1vJ!l zb#3<5-om#!9vX}iIJ8I~#~-riFK-(`E3#NVKc)wFdUaU3lOw~4S&1}u%R~D3&qFS8 z3&1KAbJM&2XfQvOq|1U`XYVO@w;aYZL)zfXb;sl}$!7RyxF!$WEI5tMdug4eaJfFs z<f7=d!cFpAj@0c%W-7|8ej*E%?;V6wuiEhaB0pG`dlI&W8q+r2?UH2igIo_z<}(NS z@R2`z;I?s3aGPQVN52^1I<-9R)K3{Ew~K`b^Ow`xxd^JNftXd61<G~1y&9j+ptGll zh7bBFKQ$Q27yU&hD(e$HJbeQG2F9VL;Bj~9ppK`%xTExBEiIV;m`3I&Vwk2oYCaV{ zqQ-d;IJP&3)vNNM?e-k)LR7U=6Y#Jzoefu@+(TuwuU#)bn!1egbM>&3-3+KMxI=?~ zOT1~}CaJAUcWSRN1MCl{;Nx@8$=_-iD^zTx=um(91+~&XFHNk>a>mKp5!@R*`QTAc zSh775S56<mRcEdF@2)6p==++sEL6gSj()gUnkU!%mxjB$+>-_$zeuCP$D`AjCg}VB zd*Rwh4w~H?J_+BT-fcbhRZ^ycJJ)Nie@>+GCpB>O{v!JQ^%pJC8ih@N(=jJo<RmBD zg6}t{!J>?C{@9_3^mIDlRh1xYFMJW5HhMsKPM+8iZlX7Og}Aw28LhLAg-$w`;c2WT zrA@7%8)Am6*FTljmuRxzQ)jkaVoGb*C87JVy>Q{%2;AE%i+}ql;McDPlsx7vg;uz8 zu*zau5N?JxstvNrnyd21k`ZK7+9Y}ih43Te6ZnsD!1irC@Xn_u>DvC~u&uKN2c2IJ z+BsgVr!WE{d)VN4O*K%X2h?|UC>-f_mf8eOW8X8zc;b{drjD5`2cL-H4MQ?uQq5r~ znrKObmDO;bw>ldIP9V=u?a*$40&6aAgUwozxVhtGcGzLewKdvUw`CaEOdigsuUw_8 zMLwvh(-tavAD17m>?HfQj9~*uZ*D)O7v+7-60`ebuxaHM=}Dvw2aO7oKQ8YBla8qI z@c3^u`dU2BI&DjBHtwM|zcTRZjTBhc#TNJc9mx~^w1=j1x|n>ag5FQwN+olOXuswv za>*-~v=VKliq_k**{m$Ax9E=tKOe81)>sS;f@A*eu^oG?S_VmHRbb9_8_qdd3<J$9 zSZjdT$u_S5BmHcw956!ca|NUCp#`o|Q>0thOfa<|f*<7%=SCNA;E65tCQgOVxqXGF zD(ZM}`~q?xfvB`Anv27qQATiEG`VcfMIWX}-36oi-nB{a`}?EXLEZ}?BWp3G2P^Qx zqit|{lMlu?_rPr{CUTI*cnog&4j1nE@OMv3{xVc#f1La<&@z;N@14wj+uwlCv*U1x zop>Hch}_p*Af1L+u=9;KPtn^1!%hpw%REKN5X!vfVl>|q8S|U>L-6cMqW<?IDM05W zy-=LOk8OPMpvqDCLsCz6Hhl-Z;(b89hd*|j=F0B%N?i3{TU<WA7^+?Mv1!Fz+H`I- zdTbO<odQSVWqIUhlE>qJ2yd;%e`GxG1{eyKo^k(2Fkf`$uC1BGqci>4?ROCvv{T>{ z=|gaKlQC;P(1v#%#QR`G0*%y~M<oL$;DTs3H0`d8?f(_9S?M%>Ge?JtXLo|)lptu` zlSDD4YLHN4gQMT%bC=oOV5&<8Sgd~(yz+z(u+fTNG;N0MPI`PJEDbxx?;`tWo$-pr zV{nPG!8fXAJlUiNdMS^<A_Zk>{om2t^Hq^l%0t-c+&~_F?Hx_%F&XDOMv1<<8dv$* z@EfgZ(ALVIAFb+vZ|g;`^|M6fb`rnzQRIJH3o*cFHk9<S;PRu%IQ>&c*0Gs_gFE%Z zZ)3ko`k}Hcjm^Yev**yTv6^U;y%Y96)W$+#d}-Ra*Xy)VPaM0ph9-Pd<mJWp<u7$h zAT#!qbpD(P?>sM9a227L-*qbD_5zBVzDlP21iUauor}hZ&+GeNGKF?*QFs7!r0cag zYxMEZiymBeQNmW;HRP~zDLisb<96i_;N?Rl?5LE(!*uJxwBv0s?LQo^l%J#LpNBC} z9;7V)MYJlMT>XDh-v{5l?&~k3MB6mh3GCvL)$0r7&VDaBI%L4nx!SzI^f=V67=UjK z&e3r^0LBh+_<It=*hF1kTHKGu9#~R)tJ)2BwpmT3nlq(Y)75dtwInIHypHB%R#U0n zWK0^em)?n8`pwRne7)~>I<#INjWk;5d3hSwpek>b^zd<WPpr-m9IgX`$+crRoC$Tr zDfQv#w(Y)bV;;>1##PguQf-b(sFIGXpNbnctHSu*PW*LXBv0sKgD>pg3r%cqsA}Jr zUeDI20=y}G;n$S5E1lcC`UFEJ=26_FE42~s_I!N8OlkVKEimn|9v>)9X4@Nwz~Z*> zmsaU;K}S1Yang{ZE%(Csb24lfUXw-Bk3g8G9?ugqGpmgU;B<Byw4L=!K5Dm(+S!P& zz2U;sN;>eWxL7bgJ51gaPyx!S8n{$<EIG}(2JZ~~VBhxp(Cl}e^4C0+4}Y*=kC)Lr zSj=#X9<@X5k${T-xpDr;GRbQ62KaA{44*Hm!A9RWDOi68gg^}V2c+_~uDP7S9oa>1 z4&3@9&Ob@55c_$hJo{z}tPoDy?(2tQ^_s;rRV@(fm!@*XXf5pdC=gA8Z@>Y=mk@K! zoORB&=ZG-mL3h`JXZ8*7?xu=!qMnF%{QzutRFh(TEnusCTa0?}hYmm+I`a4>7>Zs0 z!M}sizt)6Xs_GzJ^a#IS7JL3qeegy46DUs`j*EXUt&J#1r6oHn=#YaS<n~bI=;>lE zH9ehH4yg7jT^LT!TQ&Hu*nj1@So5>(SELs&G<m+rx;6hW<}tH;(Q1-5r~UXsyELP* z-{`@3Vt0t(G|r#^t&?!vT7{pV562PO59q;zQ*cZ#9h=)XlEvR;w0YrX`NN)(?2(u1 zHLSHQ_BiQ_dh>g8*4G@~Ge)r7SD2#Z$Run!;m+8g%}Z45P|rbhEgvLt=i5WrS{?|U zx^_TuPlOJ4OQ=O-mz47P8zkNJmlrF=u~n7e8#T0N+k?sM+)kO>sBMMxc@uc<m`_w^ z_#HkDHpZhfdSP?p0qB_7mbJ=&Cp8_W`aPZaW|G7mte4T~etY0#yAU3GYyhX-tE7>h z!Y$P`g?E42N7Xw<;k%`_Fss8dIC(G$tKGZsgqHX4@JJciZ@Ws`U*F2MI)=P%hXi+? z`BAQLRK=QfLDP_}kZa|{yNt5=pkM;t+g<?;u8YC5r4bGZ9@^i8bF@3!8xEhc=KDXl zNqv)ad97azfA=tAgG)6vr+%jL@O^#J_f`Q76Q212^>O^r&Yfk;nebKRJ@tzTK#e^M zYro%pDIZ>=BB{Qc#GXArlI_EKO8nD6D$`=QZuWAR`09%|C*?zAQ)mA5y$i+*CV+2n z1DJX(lV3!4hR&pl59XNRhuAb;S8u?JFL#4q58A?yH1Qc&7sIc;BYD78<Trl1;l*Xa zu0G~UO(q&tJo1LPFNI*%jOlcGdIe!w9pwD<B+cLIe6VB~^&WpxYP3;7=ZZm`ks?z# zZv(9jvAo%2Em-~Nz~#S07Sd!V?K%8I+8-hMk<nAp;DHgU9uk@BZ9A!Yxeq3Oi<a}u zyYR~+AEaT8KVfJ4caT<`f}66PID3ON=A?GR!`WtB@bIv_$JCg&9URFJXvRIXqIrmG zKkoR?3}-$3NP`NpX^F`gIBV-imH#Z*C9r@GFMR>lXZ32;%?EMmatChQ7S2Ye2BXt= zBMb@Yj$aH0h`ZkrsE)cNMOBFI-^~~td?yu*EDgBn)*u>fc1`ZN;jnakiUTI^XoAL; zDCxt7Y|fHT!sanc>C==Bu%uIG{v0eaZJ{Obb<In#^uH#>Jx;|9)`Cqu@(UQk-<tH% zu9(tyHH4QA^jch{hqoq;5*+dncFSnXOCHK_S3_i>d%JMZpCr-c&f#TEv1p+Ppz-=2 z<X5yomC{dAw;a(6y>XrVhOLG_&%8N)Qai3cFowIojo_Hdft=Yj5_dMapx()<+AGDQ z$g8Xy9tkSoPn!@&?APMG-2|^htpl~y+2Zx4XgnIebtCVuqtNa06jC@KvcT_i@t^$} zn(FDr-1!!re)t7KW{7)2<RZGQs*6WQy`lV}Kj4I~HAb|#E4hiiu-)m$vVmabUVc+b z-VuRtxV1CSYm>*fH-zD&wpn!Gg}vBiErxSNpJ3MODQNKQ2=t#A%TLw{2f*?gstP+s z-j}CvW4ax8&(KAsDSAApWwuBT&y}xDcuL*F{*qRgO_EuxEiO?W$%e1S<K8U+$V1%N z+T}jDJ^Mxb&DKj+;kL5Uh<#A{@(ej__Mk44TIgI&mT;`c(F#u!Y)Z1^=`U|k^S>xo z{j7nyULvdTs9d(R3}=<ijyyzFhabC_(&C`bcz=&RmaO_scUv{^jM#UcjOvbo)4viY zx54FIEZNz<fg%r%$MSuxv^`q~e+bvOU-KEk-VdQ?%R|swsRv5!>S^4u1hg2gfF}Ru zOUbvt!n^r3(5>@JuaB>yxO`oI?lbW^<)&N7{~~wN+6UdapTc9wV8LCllKj08kZXe7 zI=+H@T?4-KPNwY@T6p5iLYmfn0tZd9!B*c`J`>mtH@T<NVW%Y2=#z`ZCtkz!=(kk9 zzX}Xhm%!p3i)h;Swvtowdy1?J<yQ425W6lH_y0HwT?{K>!ol6O$;VE5c$+Vm4DS2m zzu$e)F`<dPJrh}F#6D6JyVmpT26^m`Sp?Ra7pN)nDa2nJjcuw;a6{e%j5d4)my|v5 zkozQVRk6mb)4g!!pcSC>y9<tsLmY3Si28kUICVxm7j-n@PN&DwjXig0m#!_|((1sc zuf_7dz!7|`VHmW&X`rEkv2mha4X309dHtR~mLE38!O+Qe^z%>*$DK{01Kmb3-?#=4 z?#z0Rmq`g><`_`CooenlNj?Ee?2|VT`fGQF*JUc~S(XeBc6Gv(Ssm!MjRg#?F_!+$ zQKL@YCLB0Jyn8*}_>`X!ZphjWmv8kY&9`k~`hRv<^Hf84<p<%0jwYb{JQFHsO~T`9 zQRp@DEmaPD4-OS3?Eh|)WOy-;T{E6gLsX`?A4)htxM24VO(G-tIsCf06dG=8V@Mla z+;x5p6kfEGZufM9A;%ubV;y4IqhL6u_Duvu)e2C%U`fuRKPVzTD2vRqfB6L(v;R0W zM(uzz%Ra~vzwb(!MtuacSeqt3+9sVlr_G^v?$ng3_U4nP1XJBCo|{X%q2bDOcDJ93 zztA7wI~P*g^)P(Wq`~L?)zGV_Cy%rqCpQF%UEZWTRG&Tv+TF3iFwqaud7{A654&O1 zAmDOPhc9mv;Nz=UZWd>~-Nk~>6O+d+iuY-iiO5q<7yZ`#j?nLAcODuz7-#8hk++`l zpv!xQLG`N*lE#q$-t=}8tiHfrGxtp3<tt6NOGJfidDjs8eN5!bvNhIxb;lLbaJt(1 z9BS_GqxY|#lFQCO91`g#JmCc}*R>J`#8=nOphA>?BP%;7a!tZQ2v%0)85P6W(AE`A zrj^0yoqxzS_7=IT+o8@*6F6oY#rI8Z!MQM<XN@>OO-nL)+tFq?v_WKg*Po>jjpOv7 zA`2f{U8Qp8G1%^s5`W(}5#;5eoZB*<HaFVhnS%=Qxe)`gZ<Gq&$?wC}6@L7;Qx<m^ zAZ9-s>nQMj7B?0jhi`SgxX1HIPVc0H_a7lX7rvGUW00q`-6S1;eu=(z?#IrSPMm4y z$N2+>pssk={IH!VRr>Un?R*^hw^eWS($k~rX@OjI)sY4djpvTJ9r%T39GC1|BJU2e z#@`<&U{&&T>ACqbI<PYbhD_*-KIhZ;wXG)Rd9R|p<Q}|m_siPbfqn3To+e)l9ft>P zd-9iB1+S4`hoZ~!j?(F}LhSGEj2AZEkc(H=!>|3jAg<&tXcX;%FKO+u@OWpwI<SI@ zOv>Pgy72rg`p;|sB5&?pH3?0R{gl(JW=NwGL3q{%^NL;!+D*y)%zOtdylurzKKb(C z;+4{=ih+DGTnCaA`}6!`P2fLa0M*>g1xww9<ht=2yjtHLCYH32RcniE-#Z>d)yrvF z$E|dJs}?r=e&xB>=9;v0;2%njdq>~=JK{ZuEX*hqj+lY*RH3hkZ)Ibq^ftt8reSFZ zXWkwhj{1(JlsusXj7Mfc%CA9`^J_)zi0Ys8Bqc}Qs}aEO-a2z-bbGu&%KR|Y0O8d% z{P&|fuJcI217?@#zXNxub$mxI7^T4B-%_x^U7Z&`YKFk^wp4}jINo+Od4)Ogql05$ zU+ZJ4j!(sP0~(>Cb(Or^YZEC=b!M020k~r6E;!RZn}1H60kPS~XyL<rRQl@93jTB9 zMYlCD!*-2a-dT_K^m5^-H6Lix$z9UM(k?js)*@ILsD%0j-^4S(i;sRRhUII!1CCS1 z=_RMA?R^K7C#|NI_Z`?INQHMumtfYZ_wd(Xj#o-(Dr}s27?w7s@Oh|(se`XUY5fHJ z@vQ^i{_s(*TyKSbe`5Gmojtl1ZKTKBJ@87n6ZT&n&now;<(J=8q|4V;`Mr0O+-1c9 z`BV4<I*4Yl^JQP$I({J3iw;`4+HSB^ts(9^TDaP++1fLc*BKupheJ_(rov9zzp5|0 z7rSDwp;zUR-)6x+y;HFLKNT=r^9JN~TjWa`yU6O5I-8q_clyJz+-YkGZ15M^7Vm+O z6{Qa$+gHFqk09jupY*)<5Z1oL6d~Rhngh;4pHG`;TZIe4Lth?R--TT2X41Mu2h4YM z#_U-qB-4l`q;@q&u3wvm;cff!FE0~Zr>;Rk`(KE2QVw?47cQ%tlcZ@!x6+zG6Sfe$ zoy9$pF=u8Qbi32yb<tyx=ySe@;h9~r*XN6{Df1nv+|XtJcAMa8el+?B*Jt$Ib{N_J zJdJ(QLf3Z-$M>B%wL`N*`HY1NAAi;eivNY6&#MBCFjJ&Oi_X)Am4eT(Jf8avy9rI7 zw@4PD!+GvJ4esEriw-jgwm5DUeai_nV`UEP+h)rlos_uek`zn}I0|Fy&eCEJAHE*l zolE1aIDf}!Dvm!$snceF$^C&Ga9Q|x^+aYkzlqfLwBzZ$7Sgqw-h46dHKmGKh1dBg zT&eLCk~~8A&@l~&n32h=!zS>KJN_8CZvfmFXvH^AXmZ`q41A`O&T|DHKfz6SXeu>$ z{K0&#u{}let^N2!^Cy@eFJ^3yC$OVe0A2}A!hXM$(6n<EXzlGT2b502A?K%3rO#fd zRMz5uvlYO;svJC<Z%Jye<6!HdeU$h6wbyUUZ0wmhoO`Z%O#b~lKwD2$@je^`%B}Mt zv!}LKo$5AtJGula<K1XqvOAZ}tEYt8A?O!bR`tE;oNQ+^AL_nsg>Cx%`6$ltiit4C zw8}(i+Exk;N)v^L_Xc?$&VihyB%F807O&PAq0`6#xJ9rgV20>|SDAD6yaZg(=N<j4 z*(uF_p2`|vz%E~%c$`Bz&AzJ2US+X(v_k-Q>!gdP-gc)I<cwQV>Y)8{Z=Tc94Lh<o z#<L*~`+I~=ZIt*!%|u?=bdc&F#?ylO9<o<bj9?~(VZqgXP-&G8HGT9s)HjcdhYCO2 z(AjjkYb)JVGeEW60Ng4VjuvtdTc^fwpWD?iF**`Ehxu@{Q9riN2;kpRIYh2dr*rU? z{ME|nTGczcHrx$A6t<PmTiSYB-ZhX{rRTu(j_H_Y{R3=F-qG`-ezawbJ%^h_z!kM@ zTHARn4^6aWkFWFTV8DJ@IVg{nVjoEhLqE}!jNzPr+lw3nC*XGD@nR0umXF-+f`69J zfdtW=n%+$d+!qf+WA|=2b^8>|FN@>ptMb|CBJ#*X<&x2CAKrMN5iEwN$qS4Bk$>r6 zJZoMFTXXVj`!C3n6i<voJ*`x+o9;$)M~~*-ikjRSEI1-_1j97l5lbu8#hogEVr3tp zOo^isOAXwhCs7ZM#lu%TsJM?R%xX3goah6vuykwff?{9RtE`h3UL1l8U54TUM<<N` zX9Y#`C!)Qv_#3Gli4SIUN4G4&SUF=&m(O0H%PF5oCv!ON>7{@hKc)x<VFJRFS<p4i zfIH3Uhr?kylnxle-qsy?iKRLZ`>&9Gtx4uV>Pnoerogic9J%IhH$FPUjyDXffi|bJ zdA8FOe5({eD{b#V?B9O)X=WeR7;gXvQ(nj~D+R-U(>yq)HB*|=HVE^NiJ4SF9jR;U zVhFgi---5M8|;l%n*}qc(<}1-c~eq37K#<RaU7*LlD}OH#ENnSy!xp<F0VTYruTDk z!M?GqG&qxI4mX4IbzNC|UlCPz>448iKPAUpZ<=E7jN>PMr)zCCNJ06hVAq3ENS$gg z&o>Rh<gIsUt;ZkvLwzdO?w9!5Zza4dI%z&;U2u1Z3Z~4x2yf0SpvvQc7?3j`l2v@U zU(9+sVmODM-R;5AkssvSHY-8XtEaR!_#qU2uYxuS`)HPMeLS`oPL8hu0I_C#K4>es zYwn=7HEr<KqyVx`u|p+~B)s507`ix%JO8Rd*zNaVN&WsP&KTYuPGqXXWap2R-1#af z=%i6-k`bD}Uk-Zb0!cbt3peJq!FRjNq|(XxtbP0`H3f@)Y{Y)bw|PmL4-2q%hqyo3 zi|5|bB3Rx*hKFM<al2z8tX*u#4L>GP!v1`2R4J#o@$;x+n>Suqab5}>x<`u9n+1IZ zhtK$&11~u;lnsU;XI75JLm#bi$Ns_Gu(>myQhz{?f?e2uPA$0aa6to)2{6e_71xXG zdg-Kz_;bg1a&wKsGxfs%5@^W(=2_yl1?Q+&sx><H1NQnp1j|Ck@yE{&7#m{_`l8>N z*<TIsiM;mlPS$+wfp{kL_vU52TdC}nAy2Z5LAxpL9J4$OPZ)iqYwLg3Hm0_N=m1lk z@cbHdR_n+HQV^@Rrs5deJa(-0;jj5lbZw|HwOZLg)WrR;-P;f;P~-w<p99ZPb+z}8 z)JX}sA1LAcLzs0mh1X@ebBgHdp8t@*3G0K<tK=<=mjBY&gWJf;pad?M8sMT0dtlp5 zC!7~nB3B$4g^$i!VgDihYBVQ4g}aIq8NSS?9fR}4tWEUsW+_OXo2*#=>mz$C8Y6zc zSHke9&f?C$97Z=GraubgndQ1LxnUd37w&;K_cSo1d;v_(RmKvpZ}iso9Tbng0!MPy z;q`wbVYmA*{ONTM&NpPE`NQ`Vla<Z;H9h!LSO@H!d<H_UKlhs7HVdIZ17)m$_?v%V zT*tNYl7&f_J2nheUPR#IBkEi$a+-6-4CXoA=G0!Q`3h&chvM=Pd!T;)IhvfXi=Iy% zfn7It;;GNXeC1ZW{4?H)zu&K-;3GdFuIp^l?wSQ%CRY=!kK`$H6{uNcl)jzSLKk&g z{x!mm8-(j>x0V%F=BwemgKp@b`x_=7o&%4(4^#L(Q`Eel$m7E&B6l~0n`e7tfTBAq zg%wlHidFP$doCS(eUfZ6wOMnwH;=iZCb!+%l^>lMif5hOxX?t4JH~#d>sk@eYsUz9 z<d7wue6GMxA9rTms`hxYst;TI-7IPt@!Vr%KD5sF!Q?An$n)b{@+q*Ti|w{Uey<cL zQO)CjA<N};=bwOkO;`B*WgblFF%0h<-#|y>UD+aeF6>oImOYgeMb~FCzs+W9XwXGJ z+wax(UggsC--|udoTh{7RefCfO<fLxXB2zDh&zo9kt<c_l9FI@{rp>9o26llx%y_T z5gy6*Q?9_ON1<%>JglbJY7l=(I!jr;>pVZ~y-K$CJ+V*CH|pk>k9Re5dCmJl7<YLp zJ^B)ZFI1-D!oLdq`(Ba!y3q;kum6C?ICCn=4*|pPE8zX6e2j~HN3o?(X!l>i+js21 zQ+Do=4qeQ_zo}!e+OH56Pws&xVup42xf>g<9L8FHIhekG0=mOmT0XW1r+;1!ij9VN z>7EHz3@OBC);aw1(HHppRgJw9+VZb1qN6@`t-NCX5Dc0>inLFzgouB(JWS*_?JZYO z(%Qb*WO9x&6N^dHuq`eM(!fpQ<M^z_1omIrUO2DMgNx}K*eX2K^WqbE;KzKv>e&r7 z1KLv4j@Pg$AdNouI!$lZI`Ybc`S_cs!^h@sytbPzs#LbYLEisSp;?&x+9sYyY#c9J zj?CfWReNiC8mhC4aQ^JdyFxz_89IH8#=2_@;o3KIny+OD<CbXQ9lr!jQobPtFVf-A z-04&i8;x?<EBL+Bob*LzVcU~@o>~~k9Gk_fLiWlZH7~%sv+rQ8f^c(IAwON8#1X>v z(e6MjJ`R2^o&LHH`e>!&>})S^c<M#^Xih2yw&MO>N{t;)(8bKvU~yHj@~#A-ce6e2 zatAy*H=VspTIs=rvt$v{1M8y0@Sf;_^|{<4bv~)dzYgi(#{$3|r7IzHa|m0nUkL}> z8c{^oH549hNo_2bL-@dVP(Dz2o!X|;jfNOLcjq14oSenm)s=8(ya|3Qod~lQnBckj zx9L5u13Qlk!WXYe^Q`W9d0$FFr%F@$`fxoQ)w@#V&`y<m6lV+OE#L{&h2Z}&0v@k6 z@{;~L3YYG?;oH-L<ZkB8R42ZsC3Bb8Nb8<Fp{EX~tHol7+5qaU5Q*28x?q)$*uAdH z-~~q8s3m6ujJUZ$nt5}BoV9fw*zCSe)u%Q2pkFNCcDY7=-|To+$twD43YfFpo=<oU z$Bg_n(&431IP9!D4(YOk&M%1KNAD-HX~HKOyeE)PT^IW`k7()Rwa&aSEuVK~%%y3g z*MNKRBf0-BH++1lS(=`&jB-^rd|sr<UMT@E@<J}!d!^9qUop^JSR(!YZ#wMUTSxPv zB$k(-g#LEX!hur+n@)~EYwZ!}BG@U>&DpriRuz2&x4*%`1Jw0I*J_gv+M8dZsA+<K zfBS`W@=tr#)4w8bt?q&s(tUYiKLd2R9YE&7D{{m=kQ1MG6}?+^eth7b=gQ_x40O@M z4u)ztW8NQlD7sj_I@ui8?w)M*aFAEQ(<&I$X#-d!j>ESa38=r*nZK*6$S0-+@La*0 zyJ*-AC;!_|4vMd&=eE^wE>k?iKMtV4=bF6p#zJb*b74O*Z+x3|9EP8tz_ZIcQ~zEI zXm_>9Cp3=W?GFGye0W7Z_6gvyOYl~1&nM@OInt!90XY7sGaimJ!x65|e7(*OEw{JA z-c90tVs=wrvi7;GSJI0YJN4ryVa8bZ!kdSr&4d9D&dO^_gK5|MOYr2sR5VUasl9g0 zndeN^t<4j(J2lHIQ2BfVtvqOsanBri$3;ah_p{{kef`le=cN2Y@Ysr8f$+WEfcC2f z^UNj4{R*6U*0wZhe)cE2Wc-g74$a`ejPJ0Z_hb56u!L&jM5gXXUs?IaFX(mV7KLu8 zhwJX+dC}=*w0M&X?<}(Az43ox#@!y&p`R+&+&)aU3W@CD*@wG}8N~bCe~@EbMD<Cz ztSxy<Q;VmAYlnN#TKf>FG!#8Fu93ZV8Yk8iaM12gw0yA-)zyl7g5v_3mO2exH5~EK zAbV_n7l6+`G{f)*zLMI4Q#8&u0?&>z<eu&wsqDyy+Vn0PJSzt{;!w4R@GNc+Yi{2x z*&I4aS9>q4z1=(vRR-Un%=0FqJCMp7Y`y4}Z#*m=R}4SXMK-&<8{TzsN8Q7!ti5TM z6yX1k9xi$Yvw8)iZLB$87uknhidk&BSoGKulUX;s5#;C$bjpaP6Q|T_-SP@)@CgNu z(2vC%;=QS(I~22TJ_8_U9{X?xwXCzmykGO6+lr~wT$_d4zlb@w-(Fg#Y0DG-`m)XI z(_a1kx<cL3F?b}f2@Y+W#^Y)mh41<lw4K|Z|26w_;q1{kR?H<!)Q!>Wm6)py6b!@} zcj@eTPw9l~5VnmNMXh3gsIY2VZQ9uG_@U?-v<%Obi|^>NMU#fSvQrzbIz3+sdz8fb zH$6dRa28H8+D@&aH)0l33S~zMVCv4^Fn+Hq7JPIdgKr;cV6TU;ZRcjWXqkevuUiSV zPFyNYKWzsM>cONxcMmjdjpx+AAsBYb0pq>Qd2vBG3@Lg|%5Adw`rSrq_}!a!?F&Q? z)kE;cU>JS$J}*mKKSAQd&e-R{HR{(pA5Xc5V(tKAI&t7TeH!wO*5$WF^^dk-I>Q;O zKY#HW(p7<*eFaBm&|xt8r-u7(3}u%BeN1gEs`VT$$ySE>Xz~~Ml&1|U{MbX1wlkLo z*3w0BrqCDX^AzLZ7;pNBMtyFJhid(>Wqd3AY$&21vr8m1<x8L|o{9UyYRPJaj=%+o zZpZFkyvTegMST~(9G#2M=F4|zlYUe59!)8=^AGB;<R?$oFyxZ5?(&r$7P9G2H!iWg zOc(z+;n2J{<lT_OS$9;p%bpX`ANzQ=oN110oI3EHFIA-KHULe3S@B14_M2G|Ocn0` zXOvy^N}VNgKdqirA<pZoSGdZ$m)&v8HyifF8nT%ez_}4!FhW^{o;b#NoxzFl)Mq=r z_t)U?^YJ`rvl&H>C4z-fSoPz8<a|dGdyZ%vQnsnqf9GmgQ`sW_ysV1}dw$6~_YA{J z_iu|{vB-}YZh&R`*V3bo>u61#4zJQy$GhjA!>VQ_-tVV~%FnvVS9DC_xb0%PD}F95 zz6z|RI8B<HoJ8`)cT!!;L@ax;Qu?Rt&O4HB5!Vdj8O{oPa>OI5{V<4*sw&~+P$yg* zZ3Iff!M<SOEP3DiEJ(DqfD36pJW{?7uk(F5<+}r?`C3!YL3uDq+-+Mt8mXd%X@2b( zzH{s~n5y>1)-&J0zNQ|UOq2P<taP?$sFHpjFylAde-m6c=JNTDT<4ZgK|Pvb=A9w9 z<c1M`J6=!!d^E)G;azGN9>e>>r{a_ESEQs@(O7d#^rdI?g&6hKq~FH_R3BZZj^TFn zHD5USc7K*vS$v`TYmcQ>s*`bew=Q_e*~TlcvKV|Xj70MXW4QB?sgx<s{U^F+WBHO8 zwv`5O%D;KgtL*?esqzcGeb67?;t80T><7ki=fLH67$;uS;JNN+AgSb${8+geR*pLg zAJslkoM7(0Qr-(!j1@6>tP5spiJs=Baqwk82~`V5Ms!XzG?&Hm47ZP9c4!3mK5Qd3 zRc6qS5mO1yh&iyf$QQS1k}gT1Y@?Ng=hSb|N_ivb)fMn`wI$RsWGapd)5QPcbKu{a z7=nhm(!cQb_<86?$-2X77<coV^g&-8#~T;Hld4bPs-MgAwA1m(oP88vwM4qFzEggy zsg9f6*2^#Ke}hU)DXkIn=*TS-x!a2cG|}}7^q*$OJ*uw4OYz>f(k-MFdz^Xo0dF2y zYR#>075QDnYOwqpfu;iunTk?*#ZJNL`BO!AyY--5ZPl>L>HVa8Gy(UQ8so-Cd&yR0 zQ#aX-<8?_o>?&9UH#JAI?v2r6hh7e~8{DY$j5o?b$y}Cxj;ijrNWNhSk`CISx6d9= zC$9w7%wXt0NlPBp)t!9`ym*IlFVK9`1-I)I;`sxb`1))po*a_~@<xCDJMj>>N2X!f zH9x!^Q%kw|y;y5TIVq0&3x6EM`SwL7eqQ*SB4!17RqpD*O14YE>E~s*EOq4Lc5XOH zy#XHdA4~h1CbQ!5KX7y9PgquE$7jP=L8<Ew8c<>+@|QMnWO^UGVtGfpHTVzo%5fv# zUVs%x_rQgj+I(@Q4+pDFCvJaL-Vo`DC9m$#)W>t6_}F*ouz@7Qm_BIzQm~g+yJGO8 zCV<mo<}l5ax5TyMP+h^@Iy@RZTYS+(=>-fg&cr3cxo3YSkKHQAN^Zgz8`dw03lH3r z*DW0hlkGZ)-yvgOQaA<!T<6juBf)$gY0YOIj^Haen#awbNV5;zk<`o!<jY;MS?}aK zI8;BJ=3D)c^+Wr?(cx3rEI5UF?~h{bfN*(C<|sBcUI1f{_C<qHJ>ls$Uz!=IgHvy8 zmWNNb!huh}(}>r7aM7JH@}c#@1<+_LFD*31PG&Kvo$ktgwhorSK*DgV({y-$2?Y5+ zkTw|P$>#_7a9pppoac9ir1@R&-sIJAqRyS0Si)xoh<;y2^3Z$3IaaG$YP8nC^seJM z?r|^f>G=af_O{ZKu0v_#;l9$}qYG(zYBsfWT1;B5t~f!bFK?c<O|r4G=igHluxG;{ z&RTAPc`yzAdu@kUaX_uuK8EG`-t4Gn$lf0h(U!sM!T#M^*+2tCm#j>#Qk#O=f(cO8 zaS~o$k&VLx2e9X6HSzaiCsnwmLb}OU8te*q^6XpTYM((B&qrdDX$0sl7{|$liP&e9 zgK%$6#!v%83RaEd!}DF()9DSAtQOzn#3*ie_P+G_Qw~=44#s&SXHalP1^t!xP(=7r zFaH4>NOz0aWe>N6|Nned4*Fu7vNXD3YK`kEzJa<l%WK+QZ@eTJ;0yCMQ@M92>K-~n zRpa#Wwz(U}z4OOy`ib~sX$<WV^Q<#ogQb$F+cY&J6zdk>6~3Hx5Y?%!_RPmzzCOX0 z>yle3{B{?S`Lg9xnI0S<oXcIryk70ZX}D{8h`e7q;ku^>qz!^CqVhbEQfm?@+E@v{ zEsnrdmvk|3s1^UN%)+F1!D4^j4%1fm;|~8)xIWnihT2(!e#1DJyIqaT&%K40MN`<n zK3cL6ciA=i<IpzK1a=;d60?*Hu?sj)Gj~Pehh9^8^dn>8^|aygGlI#`Etgbl`f~r* z<~ZXCQF!PHQb=nr(XSFOjho{!_nmpIexot;K7I>q3yo-!IETlVZIMpDT}w&J4Nzx9 z9CA=sR(?809x*V4H_goz&$1r1$L&3N`TY>#Buk+^&7Zt7)?cNKD^$6Ej2D-jkYUJ^ zpJ1D)i{GEe@rn<rcy;?+Ib*|8st%qj4_h|{3oo`oy(&{|bE^yKUrfT&vBub?W{&)< z%7i1A0^j+wn|7=0goF0U=$8<T5O#nHXQ^=2-#m=}FbN)?Y6iPY_u<a$@ABAs8&1y{ zhwIEwz_A0%Ykf=4%32N^<iQR*!0dbyzAXcE|Idi8)OX{4ZWAF>yR-EDVK8$iH$E6y zN3YM8O3xh}cv9~=I()!_H$9$$^@qmO)|Js5Hz^u!H5b&}P&p1^C;L$Iv0=PZL6!Y1 z$5BtKG(P=a3ETSj#Lfwhm?yvr^99poU#ur8icFj|q=3uze-aL>Kz7s$m)5Rr1cku` z;C*ZwI==<1xw%OS`nwG}u2jYFBg&XHD*__cxTD{+-mIk^EzZ&2tlfVeG=#gb-7-&J z^!lI_s6C#mXX@a{DP^+LzCIY*JfXI0)oFNp<DR5Zx&!i(eQ=nPiQs^YAyx5jtlzH4 zx-C<9?+;~<P7^xf6PE(QLObwVbd`L~#N2D~RcZB3S6Eii6F)q4gd?kG%ND^Y7~Ve- zMpIXg$qdGwMb$7MXe*>e=kbc_Slpzt3%t`EcwA~0|B*)Uty)j+{B|z2Rb5NpMc=E< z(?b02&<Q`sq|h?6KU7od%!-v+?06!b%|M55g-pP~S6sNM^r9T+Q6<&i{sv#2i(t&M zj(DWWoxdu_;o<j*vQzULFt)uwMPE&L0ky+T^;0>-wI}zGQmI|dP@ZEyfC_6*%WJjH zNmfVBz+O!?KGwQHT9D!^c%}&)Ik+$Oywskrsj16R_ny&*QcbEJv<pfC>*eO>(X!#q z4AIfaK%;AdJsV<(S~cI{a3kV}xaFWeV^Qrq(I;ugt>h=>4J!gS(%R>><h^epgza7o zU1xU1&xQjr)!l(>@&~Z{p!eYNI7r?Xt3bCjl6b%komw~1w>l*}RsPdQaYu)M+6(W@ zD1KowJ0|(DZhD}Yf%n4t@3W*4nVX1bP2l!U;rP%uKypn;Wv4!&7_s6xWVW>98cRp| zFn<j+)c;2(Z+GW$i%q#UvL|V1?*-e}w)pCIfao$bK`#SsNIHK`aEHgT<NiXdj9Crd zPp7cF+J%&IUHQP+BCz)Lz$YssaP%B)&S>Y#u?r1(*jx|T?dZ+}6?0+pFDrbuXE|lw z&Skeh>h!B&F{v#d!7rAslFjFo3#VmQ{%a|?@47#w=B>SOW$Afx{jZzX*e{<b?BaWh z-!mH1XgC=j6kWG;Mf{w<2wF{Ka;e=%oz?Ery_RSy*%&4`5DwDW=0V)M$r(Q$x(JHH z3%L8LXfC`s$?MP$OP=H@cIDe<(Y{P?{^M#)M<SZ3rt1iJ8FxSm6#ICS7CX+KCEjNR z)lj_15M8<%LXO8!_&Dh_6fokMe(ND$<p|^lf0k^DVsY<0BTPP^!Uewi{C=Ii+{w+G zjS|Hcuq=i({PlRz*CA|w_mKSAu{-WwaR?R-QlZTs9oW-eo4f>u?2}#};VSNi6D&vK z{>=W^KKc!muc-n3$Bne#svo}0y-mrNb=hTkA+4*O4mTot@ktZmH+$HR$JVIPnq{N; z&ajhkQ@;?$XocW7gAY_zZ3nyVj;DrR3T)D9hKlCBVb33Z>~uUFcNn;HQJx|X?COmf zUxs1y%W!mcc|`X&Ws&ElZtQU0r1r}jEq>baOB$-%3+%doA?5j_dDBW4dMf5J{s~>7 z?0hKiPP#9(pEeYgUX5afciCP;XXfKcJE9%GgW)M}hL(1k5V0zY3x3$)l<NNcDb1G8 zc&70WKW%=!q!x^itfmO(V7%CRj&O4uY_DyL_w>hb@SjCsBJRmuU)izUmT|bbTQ0u3 zo6qyd{iwaCJRav|`J$%ndfIk*4m^o^2j1<Iu=vmf+BTyH?sT{$M?XsEY1@-w-K1## zYxGv`X<Ue(YNDaVzLZux%fO-DGo<eq5>Vr7Z@GJk#J|5VtTeqyvz|nACzB4`;kP5t zFifMGs*$+rp$U67Ka(7PY=V02^)TvkIQ~4gLw*|?#f5{r^X9Tp+&m(fmCCYN>9h*2 z-4l$<vqw|%EH6I$^c^jE8cC}*_vVAj;(lOvQLe7|E$7cL6!Vk|^!)sEuhokq<U*e% zU_4@t^kkhi_O$cG2*qBkQf<zG(mjegU_y&#W>aMHGz|OE6}BpkVV40N(S`bP^o+5n zb?AZM1dPJ^D;K4!quWWf7Xkzi(1!b-kCX2yi-F%~Ly?vAsqK){K$Xf}+4iRw_fV^b zT}QsafV(lE9kBsQ>q2S+?pBjYg+Ho(_2s!`?LaFvha;W4!?a^L*z3!9)IMfaYnwcf z-#v)t{P{WX>YE9#J=g<{KKgNMnU1t7>naUf9wqO;;*ISqJYl%+HmPRuSx~SFq~9kE z@Oq6Fr3HuavGCWDclAk#m&S5J^8?yqZ^<vMm1^4mP~!DRpTNXPru6)HY3=UM1?b(o zBPo6!&kaX(G0qq9g{t6>jUK{%rk2S^l1|dPf&EDB-6(qY{hrkIo&j5R{13uJ#{E$z zZRyVUxzhMQ2fSL&suM0!<(#ecu()5M;I;VhWa~`4X|b1PKdFX1W5KEKnT?qz|H@rk zE`j4eH=MY{m_G&g#TK_Wpn0W{`WXF>q4SQ%`itT?BBGKJ*+M83vOV8(DA}WgL^86X zkWE&5(oRF#X>XPBy+>Qp@JqXB4^3&$-~HP^US5yq-h0mH^M0>ln%}tpR(B24&m9UQ z^T(s7mMrF0hluf~t#N=x4yk`sAjdU<xJ+v>y?DZ<O%rBN@%nbru)PuX-Wi4Z&$+)~ zYBA<-sbX3I?pS&thX(OX!NV2h@Ij}VX-sfIrEMCFy{v6qJA|Kq$x-Z~U4NFEJ{0m2 z2BV*5IQ{z1gOp^CveRGk@b=EZC{t@gDW$H$?O(Ap(9;$3IWuZnr#Y>d*o)fs-DGmF z4vM3O-T})DJ1pH0Pp56T<1dc;$?d+er96Yus#FH$3Y<Z+XfpNE^%uT<)I!ILoh<Wx z12i7*5T^91gp3}}$_rGWCVMG-c&QV<j$g@YOn0-qs0ZxvnNXbeCXCM9|H5@rQ*o1N zJl-474auK>Kz6e`?RzmyoH$zvM@n-D%Y(tpW7j}7DV00hY>qXqX!gU#5h4q)QKF;1 zZa8a*GR^jpBa`X15Kwc5b=-IYpHCneOqQVtO&@f-(g82(8knEA0t@f{00#<WDRx#G zDxa+rsxE(qA-7X;>-G#7b-<bO#*D`sZ*IcWICr-AV+qq6(3k$ES`v)Sr-wV#&?>Gs z7F8I6)%<i)a>&J_+KVJxFU4VG!6Eh`?JC=6T+8YvDx!|<^TxQ=*(}}Gn7^;a&}z{E zzjv0<0_SMTPI9Aun{PJeAJ1i(xBdvzH%?%+>-sPk%fsyXv&Yb2ZHI1$SFv`(yKF7r zk###(Nput|nZpAkDn5A-{^=Fcq2)0|*OxYSFET;rN;$e?s73dS^w4x=EXkg@1QtWg zsi4js)3SBg+mE?4bA2gl%uHrk8?x!auw#<z`_6;g{7^i+Kwu>sy2V2eb8y@)Gt%EQ z5|3SrqaSwwW+eB=jtoNzlSol5@7Lk?futwj!-Ti`G<#+~oe5kf)*7Z@?=lk#ITTBy zs>7L@(=V`D{t%2zYlRC+DG;%GBb#I$2adcOdO~I{$jAN=Y&VUdi$QLf%X7vRCrqeb zG9FiWYm>=+Ep)CKg7Q0(X?gimA^3|sJNwIt4cM};;o8lkV3T)>DFg{vq<BDlqP&W^ zg!dvjm7{FT(HC%7%^sKbY=dK+&UjtQ7Ii0<(s(&1{8^?8Z?(tJl!^wJ=#+}rtZqZ) z&Pn*@`2r@dUI{G&{qR<gJan!1AvN0+F=zfAn4K~e{kOYP;3+9Oyni12HVptRWzK<a z_o8c0el>1YA^71HNy|1w!pD(uP$SzU_KmnB7QU)r>nb;kD;^10I+ObmeuiPpkPO;X zs)5RU=O?V|gm>$E<B9g~tl3kZ{^(tT^vqHCs5qMymUHJp&Ttx2A3@6xwX=QuBUodC z4Awkb!A9u$V8VOuT8cj>{2iIfR2Nzbm(LJ0;+g#&KNO(cD;||sgiz0c86@4iPL!)Z zCYF{cQon;P==1UeySwCZL*5b(+_L`z>p44>vvLoD{J2$(-X=L@YSI8NK2K-v(i&*j z>jd;IGsPjDDdL4ZUuv^U6klJK#(Eq}G6#82{dyt}I(!}8e|5vtnrYb5=OD{0=FY#Z z_3TsBP}uM+mWIol;g#scV(_{oic!dcHzOv|h$nx<SKN7&xN{EJrs`utm4R?*@=q8W zaHvs#bUcmM<Ng<(IlXw<1CB9w_#)*?smpG%JM<SUo2;o|WG^^WX3QSH;LH-`3h~ed zA9k|%AS+dhWiy|9V04NJ?nOI%(Ud~>wfo@c?sTwRln7Ttzl+}nt`X+VSOnYd&8ok$ z`ZtuFOvWR7o$$}laTq68Bn~wB#7+ig&;$38AV%^0gwb=DFNTVPCmT1=%(o!AxzV`k zxsdNt4YrdVSkLm;!hnn(FqfN2p_N0~>C0ho^}$j$siuV~<U4~c{~dp(k0tQR#O}eI zL8TPKO1zwT2Fn>EFJ{rhK9ksaTNlhZX@-%P4DiK71)4EE8GnuPpojc_WJb&!_H0+6 z*t&O}%Zqdmz|=yBbkd{v@3z=q!wxz!U$CltYy9d`4@397f-Qgh<A;q6%=vdB%DhOR zeeVZKMlK6tWk!~gfKBrJzHCOur~*wq?=tk+5Lm9BiZx_TZu|DariSCJAU}%MNFKAs zEbfEbk^@T|cfp|ieD2MfEgTtq5I)Gckdull)%zLIeam__b~^9eC0W3OHAf*zxltUr zvOiTX*vI-rJYiB<QdoKCF&nbi8;d_qq{W$WXxC>VYGkOfTW$k!bFl-Z9rY5PT#lg7 z3yE~0%?Q^94uId6FEio35;o_$<BRX*Y|^i4u;zJeo2_oNXsB0xKs4_;=vFc7NfWVN z|Ep+Q<W27!{m`YzmzCbFgZ)}M=rxRYccs2Sr$!Wc8SG~<rQ8L6UsG^sSjxU=Er7OQ zJ34pq6xh!XC9^~2?A(ty^qJbqTH1Qi&ebDu+d3`k*Kk=VSe6LBFPAfU6;rYrD^GDd z{OF(`@?LY1pnh`%dhz|wTp7-V>HSOip~B~T25IQZchE0N!`QHj8qwxeBKlj7qH1GL z3Y8iUp{Y}-^hhp^9$$#f8<j9+%2<5zR~vWN=hNaXQz%<&BO9p~OXo&>geM8+ko8g@ z?mhd~sOA0*PB|7ctsn#L)cVWwiw+bu`Wx)N{)&~Vz7;kEK4$S7-!Rpu!|~w(buzRY zB3#}E7<um<^Z3Hq1;6ZQ%qa_U8t;NSUj=v*JcuO(*^-|)95!^@&<d7FW_wepeOVN$ z6s?6d7e`|5*Ks_Hbd<$QX_BdT3tJJnjFtBdfr$5$$i-(OtKT&OKYs3uJ~mZis=R<V zCxp<+JG$b@z^kA-D*(?(J5X~&GP#Z!haGMutS&sA8U}<?sP0;jyC=h1HWuT|b>Vnj z#sr(q!+3wy4(FA87beEL;p1BdjYmS|NI?k2nHeL6KT{U7q3OBMYec13V^}~JG91v? zv(9zKZxf1ja>uEUdy$pBDy9EK$obEl(mJ++=098fB6-9Xr3qrq;%}}8KM$b&)`~c5 z@P46s&r&wPTm%35>(ald3^sIu0nU-T#gxWWF%4HK7#M!f)nZQy-5eoLRWq}gc)^s8 z4}A&`E+vzP%xSQVbii1MLfIjg;9J6QyrS%j`vMX$bhSNxeUvTOZJmJ<tD(Z~fn(Uv zdISEM>P2H;l(RttrchAlU^@Bgw=ik!w??n0`UnY$Xz}kT8yszlH#Qys|7-neqt1PH z)-r|j#W%qEQN{SWU;>V)ehe!gMBw6jW1bJn!iu`fLi*(#EN|1IxeCQpd*we#Mi}?1 z9li;B-+u*5xlnqX;e!otQ%Svt^C9+guO#<&ol+Xh_MeI%@9-RwTN^^ZM~xBMOnaeG zwI0=3Ps7ff8t~cXf{Al=@#5T{EM!Cg&*(}}Qt+OAR~$(RlX6M=wkplc@NV?mvy-)^ zaCXm+R`$3umz=Cy*%rSp7(6jV=#_q%eSPJDhMkt2`Tw7=O@Am^igQ`p@q=Je?+t>^ z3@WVSIl8*uFz)m~9KLQD8*EU|3L|+Aw9}ZD2F((0Zu7vL?-Nkw`gvFy%>8h?r-H}N zEIg+_1EX?mv1OV)47sVvw6-r}XMaqgBhlaBU$hAwS3)TC3WSF3Rbouv4J=*d0L=L} z9Tqk$gXEfe;nYj+Eh$|Ja}0*lbk{?0bGsj{I&}xu`;4dK-{N43(OY)ZdkpPRRu^wi z>Q6>1mbo@>9!m#Wqj0QUCYCiEgh{g%X~NuWIwEBaTmMtTg~xBhpgFC~!oQp|U*c%& zjsS@BQebWx*Vyp3(~{cYsWfLC?}_(oVW$4J?CtHHqT->GY$^Yr-xO>J8Y`SJF6Ip9 z?AL%y`+H%#?QoK-yeMW~?STm02)5(7I_*6^2BrQ5lbJ^v{)V66($Nc-4BRLlzBk)7 z<a#P9-!vm9Sy4PbI)+LNOeo^*bUeQFo$w`c5=nU|&@bN)uvk`)R&iES<uPT>TgoEX zYKDjUH8-}^`ZE3ZbD&#A7y9Xk)2zCC&>i-Y1r)iFo<$R1&i(+^q%o+t={tMn;ROH6 zyy<(amZX30Pq5!J1%h8aVd+ydu-u9}`VK8*SN-zH>SQtby9;bsd^+o^5|0-<4+@$F zXISa##o+afXEpB0Q{YX1G+H&1H5lfz*njWfS78Sjk8Op2zScCRS{_$^w4k3VdARz3 z0Sq3z2OeKr1vW|F#BmO5#cxV`;YRIlXqG?9&YzE<1erX1z~>svPjm0e+YlUdUW3YH za@hI6QfPUd0!uhIXnogq*3&i;``RaC_C-hTrj(^RQ#Xtpordr7_ONjQRq)@a&)lnK zi6_+EB@ru^!GAY)f|zbYk=-N6qrr?eStI^lQ$j91gIM{c80NTuzvb5lp<S03^tkO1 zAN@8X)y`@3YhYi_SlY?H+#84f-ZNM@_qjXYzbl^MT!~(@`%+EvIR@R@xas72(6Wq% zvK=1O^Zqty8=4B!(s<6+OH~Y!e+Aw<reHC8;oHdmC>z+sDke69_1s|4^0&f|ivnnR zOD8OUD#5?H3D^_viUHg&dt`wJPT1KO5A5S?L-Tlvom3u04_(JXM)!sjw%#~AR+)Sz z#*$Rn8i+7YM1vUx6h3e(6jqz@O#C;9FYy(_@{`H%bUv(V=KToGR-w_x47dDiW7p)@ zNvcQG3T(+VQu`Q&BgZ;XAH`%EIAAwC^b2L7n<tXVO)E-0KApc$ZUN*R0*#x>bZDM8 zHr(%O+_PtiaN$4zew9ShgS<AD_&yNs<@Rs%ldEABsjk8pxx4K6UvIiHZ4#QVtOL8( z!{J*)ARA`oC47x=#X-UE#Fy4C)U#wLj$Cn<e>Qc*M<1UEUvjpvq>)G1fMx2~#d&Rw zIl80~ssxwfn?SeFhB}np@bZc%ifnHO$^CS^!e_RUTo?GDc7U1u4WgU@)@(5E(l2YY zVaYqZ@FaiJpZ}sxZudsehNiyo`%{oOsMZ)N;??oD{!1ZBHUU?Nl`z<29vfuDy<oKl zY^I=(g6?3PJM0##==+)JeC>v-r`&}oM-SdfjBl9wV;FYj=HgFHOEQ(?^UNpNqD93h zthhA`oO#Djb^Av#S^q4{*xv`Ym+ghvx5lh%V;#8JPDdf5H;!5&kC*38XSEyLv2f`G zcKXa%Y)Y<UllM%cmbYr6ma`cdjs65COG_wX;!Bnyn@)GWRSNSIEQIrK7r?e1xlp_z z9eXxQQ$$NDt}DyMC|!39ic=zw8a3+nji!!(YVh?mrHG-f80(^hz9$CK>Mv91*`adr zx>p9Sj@$*m7wS^{XH#1D?6olc?K)xgA_=Xs@n#2Hx!Y{#43r*sp7kwM!V2SIH2ZFE znzCw6W88uq{D8es`fYE)YhEyxd3d7kiTg~q%$N>(|6$ipyI{TNZq^ZQi&hz@m<^wW zejhuLP1`I>?Q^HnsqLBKf~}v0>%T8D?adRBeO}5WvJT?Lh>7$jdJ4_?=HB>eumLsX z=V9&J{qWZ#oqZh7&nSNlGT!Qh>pD!hi%=g=);<Tzug_WCk!VUQ;5!A*E8OTiNzmHq zNv8)D(i!(_;uya#LQHUPp@MTZ5)7kh3BM~TDkB;nu)~bNhoBkb$8<wfY2XHF>hkuY zx3>df*2TZ9Xy_oC(Ata2IHMu$M;>PWc`vlgnIJB_QXuZ^8bRfGqrtt(7xQDnxi4Fj z_Dr2d!=%@V#@vPaRa=hs29IL9`i#PnQs;y|Ek69sJeJIEj6f~b67JuABbpRciTlHM zf!nQ2np&WN;oG%{iACfQY)$3c`cQJ9H7Vyw;pbNZtZ0@-)hD_j9c~X*hudNDwrn9{ z=E??BRt0-kDPzB^(RiBkPBP4sY3G3#@Ge6J9;YvbXnr5RIP0+Z_IVFGIr|e!obE>} zwQh@adK$@;_eNJoDZIU)8vdBgWS6Q7>G<F2a46Rl%-(sE!<J%l((%VImo%84m?iX? zzYA>oPhedO&Pygv{R`JD{(yA<GfcDClY6z?@I<#3eO{a{zL#)Ma!3|BU-Kl7)&cbF z@M1PPHlFTiY2wH~r5NJjM=e+^?90{@>#uwl6TDLC&Vv(hL(7##wtQ!ns{_!^ah51E zc@k56*NejD45oRPHEGGmi|p2oN%-1S8rhd>_Qz-u8%J7b?YvO3zD5ZH<v;U&pDJ1} ziy-`?P2p@f&6ocLe(n_wzl-aHDe=QGXF(VlwCSOly#$Sp*TT(VtC{aQR~oUq2L9Yi zh9?6}@r|?=O>`ZL4o^P_8d>u6`H>S^*g6Rvlfv=A{TOV_8cYv2S<;Z*wa{^A1K7_p z#^vpfq+Jun_u<VDe)j{L@M@N@xx}2_8-9VAT~ah6Y9N*N&n07t4U4u|0W$xz@#MA~ zYJF7B+zN<2s(;LW`zh1*Uh1wisvZ<p&m@-x@7UFLWmf(-ozisqF62!CrWbp`h2mr2 zcPo#)CNE}c5$j>b4Fkb#lol3@Yk{;{6AUcSpu(l<7=Fne9`<TxGv`mmyuo)RO`g>d z#XZCyPi|q>=34m9;Fs97#SWj2Tqnvd<zCrg{jlSXEgf8=CRTCBN79_TU}5G1S0>*Q z+f6;G%{HDjZ;Zh@*)Z}~QlqCQ3b49k4}5x64gTxe*yx#3sAj896DH(Q>!wF+m%}1f z{p=|_B57wZ?JQem%3Vf#Gr>cCI9fG$bM`IZnJzvjejY}0NiM>GLCZzAK0Y+kDwdM< zO{kxiBMYuu$;_1{Xgw*9aELyx;T`Yau0C`%XT2~69dVlNW41x&4W!6OXph$+C|EHF zckQ?j`!vkyyi)+hr`v*<RRML@acm>+=Z1+WJPdZmO>*tR-ouB*2QiP}$boTqdQmB* zUzR}=btiiJOBoHu4#4YO;iA{jW|o*7O#S{W5KFc$69+H#LfgB}c;N4DQA+6}vp;=_ zEi#))v)oKDD)^t!JE)WCRv6-r#gou9j0q!yp9(R7YuRDhG0eYg3_WmkB)gc6tbLok z@N#Y`O1=6I3J%O*e#`pdiJiAWevc(=NGrmkAP2f}{05}{Y+{R3hhb<)0KS`B&nCQ6 zBRz1z4`U`!!o1h@-CLf)U<)NsYThH{>@FmWYbAnwYG1N-Pvu*wR5G_ngeKV^Y~l!C z2<*R1R1F*r3jgsl&-0a#WU!UB@cF0z=AEpPznhw}!^z=SDrv2nfE7(z=-GT8GHML* z!<=GvW~Cxs?gqSeZkD+8k0aV1_ke)yLj|ogWu_hUn0+?d1|31YvCXxG9Zk`ru$C@R zvK~i$Rt7;OcYXu}9AG!&BOo$-m8e|HS>%rWF`?ZFI!XuPjn-}O+Pz$8xu(lBZzhvc z;3OIo(Z&L=&u7cx)r56tj)2nqdKMd=Mm`VYgtaFE=%%d-z5V-&8LRyQ-~5H(m!?Nk zmtAG;)h;;foCduq=Whv>FOVJZoVES>1{Jza_}*!>#Av<@JvM3<lZMSepPoMW?#(EY zn2lgNpYCD@7e}%Sm9l8@wFzq9^+);I+3ZkZt=KW1dvn_+v#u~}-m$m_57Z}9`F=UU zq*wuDf@JAB{|-1PcwuWO=b$uQWLBAf*cg95mK!GEu337x`jHzNmfN!x&(wuc_X>E| z&X)q5M_}iH)v!`!6N{TQn_YGK#ft0P@MGXOR6S5ik0Y<Kb)uahb=H>+iLk{r!?n?+ z%?bU_7GQbqDBSP1K|EuL^l#Wlc)HyT8~-K><E?&jANv3{_=h~^Uq8xHgFmy@W4hEk z&Xb;fnofI9x54$NrZ|stBVtM%=#rThc`Q^1xz4-wdJgWKd!5bd(u~QWei&^(8icJz zPSi0M@R6+#dS_Izy1qu#ob5<+BEx9L;#T-`$qBt}uE2y31-Ps7lvwnH_W{sAaR1uE zTDPucTNZNfz^(;wCfOSX`A?**FdOuGcbH}8<)UQyqQ?CCaU?Gz&3<vF=LMLCmkVUs zXf_geDY}uPO)rXh_Y)F*^dQP_2Bl}0vLiD4*h4g+#Ih{mfv*`J^d3$%gWog%8N*@v zu!XGZ!F*VK@2hB6lt%j1DQsl*FV`zWjtI$3>QtlSBg*zXf#?77>1N9>!QK3%aO@TL zAm5k51LvJ^_vb9Kx-^CKPJCvHei1lSyMg^z{(}`Ky=Iy7mFVT4+YncthN@qRXf=2X z1p^z{9b+$g|3DcM3-s{Rm)G!=IkU;i+#6dt1E02Bh6S6f=-SSoY~<^^jnA%afP3VF z+a~CU(|_h*2G8c?>+*N%=6L)zIi0?E%dw`IdN?~wLU`^2+oTtW!=`88^Ny#Yr)D-L zdicW-&X9X^$rs*_Oa=QxQ?TsU2)f>FP9NIlvA+JjG3#du@09vc)rwIFZnZGT>jFIS zJKX4#xf`?{6-iy1`{MUmQ}A15{O&4Cx-kxTU;hewqIO=;47me4>{8L@2JcMhT!O0| zb6NWQ`*8c+3K%x5gMH*q#C6YzWlK86$4v?NAn+$FI8%V%o2Mhw?++^nOrTIp6BHFo zam+s>v|7GEQq<oU)$Xc@KK_0*dq5s*x_AX*FC;>P>UWl<a)-@ZbkntTObPY9Dv14@ zrr~b>X019IF8CHXU}N%MW^sES+zC4&L>^D3?)1}ae^U_7HL+yQt47ho6a$QRN)aT{ z&%pcaB4Fwl#I(wO=vMuYRc4Q2ZOcOF(59*Q%1n*>FH>n-hCfZ{H58Sfz7s92t<fsK z2jY_pS#Z}MIIlFDeVrUc#fkS|-G*<hkKJTatDk^w*(z-CBnz5vKMiy}wu8%?_bfZ{ zJ*2+72V*ubWgiQ4n1R}8=+GF1m&POsyYBfn3QwbO{<BbMdD+r%Z-+Dv{ML)q_BxRF z^#q~4*#k`)_2~WI7KnKHnCW!vhVu6VNoJiL24!?L{8o;lSt$p==izr@6You*%j$%0 zGu7$rAPL^wbQ3}w%&=^|6+dep2n7uS<&J1!<Kr@LY;qi!*BoSdtBtXa_dlNOj6;L? zOx(9D6)V<+Vdr#WCV%3^6URs3mcem!_~`%~@7%`XRNEk0%NPw68`$T+f%LQEEX*oR zfQ)N<C0VhmSaUcQkB-o07CmR-iJAf~*&jz<9yi!rRtIGU3&dBev*_4)RazXgj)iZo z5{}P%4u#5-$<HPRUTV7G{d4AMRm5{HY$yEsoq~xfMQl~NgP>h8RFH}N1M0yq+1M@8 zuD5q+VS02CeOh>r9iAFUrd6-l`AxZWr|loR|Mdy`ImiKP>~}G(E0$RPC>*EBG{7x> zUmIG&d9tmAWSaN}`Yh2Fv*v5i$F1(TbEyRVSH6Hta^op`-(Z+;=1ckf?uzZBX}(?p zyT8Z_(|lwxwnPQhG#9dEvIoFz=q&j8<Q8ZIc7yMyonrp>_e?kGpl~)RgM}V_#va8z zV-LDa@kE&?UOcM+8S!~Ei}O7jJ$e2}Qp<9hW2t#UDX9z@N8fB_P>uJV#;KQ|LQW~~ zrlp+$g^TeJq$|f3&(@-ZmN4r7nRmX1NRi?R8LY580h%8#!L448>W=@niWwf`?Dldy zvVK{FDf3lu=h?Be;KEus8+Z%spK(TTUtMwM9#b$^GbQWr)9~+bG8wGmJ52rKur$I~ z{ONZTOf8nczNOz9Usm^~7aQ$JSiJ?VeRQY8zQ(8(;){P;M`3HAD?XT+fhCz)^!bx7 zF4RxOwgux@v+{fPdBIm$&NDrh!Z=)epqOf`_luurDAMu)YIIBa2utIgZz=USK3B0| zQ%}27(vZ_^ZtozPV`NTmw>qM9>|kn}r;ZCGGJ@RvOkDSfft2B73i>qw9rL|OgL8C# zEM5ds8`bcOwJF-qaAxz3Ti`&(CNOmlq?W1~^!hpzzi*s@t+VvVRXUU1R_%|^t~Wth zKrC+CbBsk;yo9<PZJ@;at_zP9;Nbx&r2YG=xUzT_ODXsvx|?R<`m?{7y7XA)C}%<5 z^Y5}t7y9FmJLd2q`6UbfD4{f~p|mW|o_eX8;(+P=-DQ12(iJfftw&CQcY89(cf}Jn z+U*}(_#ur(@AM(#tT=o>&!5i!3&o%1`6PAO5C6*-#W{;@D7kSgne2MV-R^oA-F2aH z=Hmp(z>u5FqJy8I_qH(|JsFhh^ul=)55cMVO>CJ(DSmr24)QdnV&vXVp}prB7=PkU zv&zHZx6BT!<Q~E9Y1*U|)t8=aDFvm6hnd|}p>cWd1>&|)J(SApFWl|vOUZAKGwJ>x zz~W*v9MDL?pc{_d{j`iJT#?Xlem2+aNT-26^|9-N3Eh?d1ID$3sdbJEP0^esL~R^S zKmF{fw6lc8EV=<U2{F`X@SDbmg#!iaC;V^5?^#Q{R|{K)*iq%9l~C|NjvTF0@!3Oj z8e1d5lkz?&b&6-5UwBa3gN4F#XJfD!-k+8nN<rU;GwFSI9to2;!|%dvP)n->91_d6 zB#i)t5M%0ZqfSkqtT5}?YZ&F{kNT5M&~Dv2kYz!1_EZ+0Dat}!ja)jhdpJ%$Ad3#g zt<K|4hERd*AXLBfSLlk7fdLzDh_SbZbD#AL%vc*uU#2;rV{JaRGb`5XrXAhtNus*x z9yq_-m`3v6LBjf(Sp9oDd{iEVy%krmj)%kW*6{NTrl!%OT|?=@K4}bpeGqCF)rf;1 zoPnJ^dAK|=9L>5WqGxL;>ktmGhHX<Z$@7n}KdW0<az%+69K6XdC`H)v#|g)dbK)I7 zK7X|4j*4D>RH^uYP088K6n@_mbL+by=jeFal==j0U%FtYu^e5Df6g2yq|=xiW*9NC z3Z_k#gJCZ@-|L0~7H=#><Hc%Z`e_8&wy%Mr&KL^6+06I40&P86!Ni$7W8Xd%ZnxUg ztbSp18VA!6o*^{b#&K3`1g%YMZJcN00VfN8it`V@5v&xKgXfkq!N{J!M-44-uj~_+ zC-8S^g)2(;&%u*B`D|>+AKsG6rK}!hQV-Y-s!p%j$b{9RO_K?HtkQwy`TBUsHx(C# zY!)}9YvJFo!>PzXj^5@);GLZk$`7`}Ro8TJ_z`33Gxs~B`!!1B_Va9f<3jeRa143X z`QoQ1QR3VEzge#Z0hs$>F1zEOh^M_&FluxX-G4p^w}no`lDKfnO!S7#^Z!Jt4u58p z=0~n?H^EBjhw%4y0q>C{V6<W|RU{OmYd2TAui67q{{5gVr4I)0wquGjqtH+Njd&}r z8Lmcj^URwux?WSEmqX%l7tho^t5K1}hU{l5OH63|dK=U^3utID4EHDnqj}jQ;UVX! zt$ZNe_+Uyl^;h)c``mKz*tcn@6}5;p*A1l)$I^rkOZ@1#R$uIr;atQMuB1I@2^6<_ zQe&T`Y@&i0DL9+Zv4<PQ(uyl!bNwgVxg&?P+M{Va1<(yGe(#?ahHU>|w$l19YnbGN zxeNK(=V~K<O6M67)wk^REZ$YEGezeeR=78DEWMhaNn{jHdWmIlTe}3?e(+AH@eZ(2 z$%Xy3Z&;7NG-gaTX8u>(*_}h1#fi!x&@jY^Dzjavvf2q}XgZOqk|Iu+&RKDgLAmi6 zSb05{a~|%CMH)@4V2~nSK6r|+$d<$KQKQ%h6LWOf+J{*_8Ol6-qu~+1*J>S($3y;y zg=Klx@abtZ-VW)<hMvmB&BM0B1{n){|L+x(_Vj{@Ql`)|F^MjPePF`#crvvpr6egI z3_H4mrDa}W+xaX)C0Q3u!q36YrFJZ6#{_zHD~Nh*QgDVr7gK3@$Nn91q#|h(dIOv% z@xzRKc^-Z6u_a97S_VB``<eBvbw&N=Ec$!SL1Mje2Xi`^i9Y$&l7d{D#$Lr!@K|03 z-Rjn7V-^|`{fS}v(S@Y_p^_N{Ed;%uO0ndI51C!;5toH3WA`~JQW&U#8OswX$Uudx zAFpEPhov>nD4ayeM`wxM+oRz?&sKJ9{S|OA+{&z5FSE+Ahal@}Z**L!kCq-e^zf~9 zgK)Y7E;$v0ndTyPc%MDmUe$(TUo$$|z5zO>7?9ujTwFU-ih_Usg(Ew|alb(h4cP36 z54I-J%2SI3g(}{6dA>$G*|&*3-sTAxUpeB<fqj{A=050Eoka&emx$Reo_HhgJ$LCu zq4$fI(6N$xIxSMz3jT(>rML;AXKfWfB=r{Gi19eu-j44PG|^qK!ajS3P`7C)epJ&V zgG-_Kym}GDZsC4W)9<jeIUHB_SaR;?MYexl5H4;xBz!ko%6613249}@`uRqW*7Huu zev9=&zfohT)bKrP8We-)J}zO0VV3yDHbRU#`J-|D(6eCw)k_$2Egnz5Go&Fe14(T2 zXRT$O@OG^X+06SchGa!zfYJkq&i;?Z-^r(T?IOAqvJlFcl6dl(H7gB2D;d4@9awqY zYEW=YCbxMq*tkpqpTICmTc$?SziHydGHckJH<fhXc(c~pGmR$VLgAX{Ae8Cz2mZaR zWfmcAf>K3|#Q9hZ`(9^_YbM*0Bs-n`+MO?+_{%*LmcNDi+8<(V@K~_&+0N2Vd(t_n zKkV_73bsrypDn1!!tRMCG&E5KjgEX|8y+uU`<16)9YhHCwT?*6bdJIg5i{6+dkOU% z9z@4>3_+jrZt!S~r(>5sGW}UDnELrRSpD*%_$Sh=*2RWx`!|g$RQ9tmK`U9r<x*H{ z7Jyz6A6eLBcN&!Ei3;zzSGLrcUM6!-wA(q>KjJT2sPh8k-TP77l$C5kA6?8<T?&oM z6QS+pK&Z4d7dzIu(0^$b_%p*C*AE>-hW=lKKIt>5$}|bpq(8B~?~T}NjR*=JzEaew znMy+{tm%v-Nt_znpH?LnV4$?EsD4hJ9<NU!*U<~a>G^)R@#ZX;>>}{&Ea%H*l%kzm z85{k+m`qXzv)x7l8|2U-(GB^|wmVNij$DGn^*&I4;x&6P{jT`S=d^JCVF!Dqt4gw= zan$uF7E`Wx;%admUHrHQ(#*Co^^7NQ)p{Nqx#<SHbI}ymTvA1g{Vr&$6asH27*bcC zC}#L43cvh)!*qLffVRwOXH&CB5dJ3sz8>PP#&KR)QIkMT;b)o78qNt~JJ^e7b6I@s zD@oKx1-7s*5O3+vq-X!GLq~Hvo9*3~ncwoK>(PGr>d+d|SNE*&>%}7&wyLk-$(=C= zeb>SCLM`$)=#9q}HSvSy9kyEa68rG9nB_^YVdIpN>F1rJ(A8MSUT6EGto=Op(kF>J zcl5(&PI{<YxsnZC^$2#n?Tyo}Z;~u58cg;xe?h;|_n~^2G>tKxPP2KYefs7oJT_2~ z+<Tv7&7U_jKOZ~SJ<kKNLaCB1IU0aFdKI%z3Yj?FSB2zlZ$O{A$E<!tIaBIm%5F`{ zpd$+7==_jiTy}jaXj$3O=>tcZvoF8nsYHo0qHeQO8(y+Y#TFDbuZVt@M`IoTzMsD; zgj_$gh&c}eFgtJrPD%KWt=Qbf3?Yo(@{aV7E5-Eq{(e|s_8S7!gK_EM0+b22Wh-y# zH9k5tjA}i7@cx}Yyz6<oAyj)g`#z%pPn3(`+&zl@-TjlTJXc7qnJ1Z|u{S+g|CMJ9 z!f@C(18S8|!T%1Ap$o3g_^BihH+Ct*f0Gr^_nj7{jgDqInclQ%EobLfxuMG5*=*FT zUc46_gHu*dq=&r)Y^^AV&L;t=*;o$Cv+VJ-?rkxBjUq^?uV@_5zlF^<&LBt55dO`7 z7Nb9mUO5~QC!QUK52NkLaDOa#EU#kKv!~L`%TZX}kcEZ)KZDE_4KWVqz{dPYI@!B` zJ~dom8|yy93%<7vO7_Fr>!nz{hi4$zMP{lg&}ySn;YNL5)>J-({M%Hqpy3T{b}WR( zH3Mj6$tU*WYzz)Jo`Kp49%R%BZ0k84TBnhR8R09~oSw(5xJ--ppv-CKf84?ss)1WG z)i?vk9qU6k!?Gc#SmXA;Y&qZ4HI3m+&lx_nw`(lTX-IV0Bm`i6|78;DS`Bgaj;Qys zgm(icU@Y&Lk6W<<E}UA=%3F>C^~uN0e5UGs>@k};eW@reH4ww<=d;+h09-hMJEkLt zld|c0cvWx?X4S7}cjjf_*0}Yo>?tyz37^;;w>ow*-iT`EyI^*e9UMI9j&IH7P}#6I zZKBKUSCN|}X0}AQe6R#99~$E6e};Hv;sE^C!+lIjYG}7-90u=DXSYVRvaQ8u;GK7W z+?q2I;+?;W?}O@L%xZJ2tu&+sv+T&LL!Q^VhEP^#JsYku0ar_#;ku$x5M(%z<dJ`O zT<(wmJ@_Lm$;`x|>=|hNPNVh{T4a5U_cs&{h|eW&x#*w_#!cx<3w`=SaM&bvS;vVL zwZ+oWvw^5_KbUj+94YZx7+h=`$!e8NQF+Qxw5~5C%P0Nu>40SXFr^rhwBu=N$tZlB z5=+x9iZCX#w`hO(0d)42u;a6ji)&<paC$)whFAEKH_IZ&v)f@*r7smt<$d~XYWV8u zPB2>j5+W<_fx;->2@0^GxKq7^vg$XmaNh*_*)^8!Eb+waTI-obMhYu!Rz<6_El_p* zDI7gw#3~(T(!ty+cruzmYH9{WE}cpKR$8<u?gg7NzYxrBr?bu}M__lcDfx9>VYegZ zv62Js^gB-#!rW9E9$HPnc)i6?auu1*!S^t6e=z+GcB9Ub+$|TYPlML)7B|{2WV<@e z8SQ)~W^Nfn<8JrGqfHxy-*HRW;=kdz^VJ$ql=5L0U!>x>XbXzm6GCz${xJV<nzXI; z6h!t3q{ZJ8P=5Opu|wXDR@E(HA0BkG{Bu79O{2N+RiPHf{FTMPv7WF|Aq?+nZ)6#> zoY3K&2aQg*#a}!F_l)=b=Pp|S2Mzu(6NwLKTj;Z<Y4;eq|7AuA=CthuXWm}a753`6 zu&q4D)u5t-L+0jCNxK$hs-9sg?K;>Jdr0WIrO(ebHA*U20N*WVz?=HMWPGCryq{FV z@gi-?E0D*+yZ^D3VhP=vF#vTg^R9`N8+{N<AU7(X&PFQJTh&#<<6X-1Wqcf+88n)X z>bwMuxyIHwn~M>DZZ<ZDB!OH>2|BI(%S?l|2zC#zv&)G^^ypnW2ImRrz9LxiRl^FE z?f=2rtq&V}J=CG9o=S)=>V}FRiv-K=iR?nnI`PFU6Y7qPrz2a{*^U!wV#2RO5I2<P z*l&2UU4yeo;btgjm2>y$jVR{qFGusPc%gdLKlVPb6CPf=!*>R0G+WMJSabU~eEIZ? z^^6S%xqY&vFKhr!<yT^i)L6K^G?RX8jBb2q_Ehqvko(UTbg+Tt8<@4^6ua=zm-iO` zv7P;sS*Mp27FruqkhUEr+@6f|I|(}XRIyn8Mt*$QAKup=WuwL$@ZD`58br-urW@tx z;RgpHK5#5e<(;lmpI?Y!q2FP~_Gspy2(Z>=vgq`*9oAb-1=-*stinc?(pwdoVMGw- z){Pb3R9q11)nZwvz7A6xmr5U+c|PXy6#A!ANHc$H;=7Lha6nsLSU3MC`!k&TZl<TQ z$@e7ek9j$0{<XrRk~N^L;zP;KYap>uiPc2z61%=mL+k2c<WLmLSud%0H^3f$njuc# zy&r5VtyvC#e{ZOBX19zMN)Ekx3VPllXfrcPJk;ZeH#&~G?mv~!MyWbtB3xk+dY-hx zC{f(LTAL}mi7-0&G1wk#1f#NUq4l3NW+tYf>%|GQI!hb-22RD{9qp``_jl|Zd~nBt zN^wK?V02C0$e#WjjS*L5Y3-Rwm}jJp-{>YxJNQe`sItTNpYDR)^IlY=sDno9mFV?J z0nbQf(9+qjSwfF?<5@l^>b4(;lXp!NrRD{&Qzsv>)BmkwDtzv)9o$bMA3OvvS>J_0 zJ)c=++XhLSsTtOaQMC4PDTc<9>-_vl7*+0yFC)g$xy6xU&x-MA96FrJ_cXxzB~>s| z*B@Wrz5?^I&q`=u4qb1}MeS|DR1>3&(!YkIC-;<PX=zeje2YZB{ZT_-rC?EQ$N^^U zT?nTZeqw!S5<M4ou{ROMXtouE>2}$8k7p-GA2=@3#BBV0QU(voI^)8%+;J;AlF17` zRR25>%0hEVd$I{VPut0IZ^U8I3rjr1T?uivF?iTKoV{;J73@~4Q~wp=II?aGwiPSj z<X7v(%iD|L+-z-JJ7=#bzwf1J_Gc$-k(&+1tIdU%fzo(bgMUAcj=>2pA3@=P`4C*8 zN|Gy_{n`+Ozct-h_F?|sEAyp6O%d$;s5n$Tnu$lQZ3EX8&dla!GYg-#8zwIwfV+Jx z;YC^mO<0wR3zqe6ka_5at;r9>lP1gH<Mm$nVc|z+cX1f0J@Y18O<x>-&ySv-td!VY zmKV%}&9S%hCia5w(%pSliLocRyDHNd=ZB?&%CnO~UFSK_pXNi$9C_~B=?#0a(uI;d zJ!#(CcrsV$gFP{!xY}EazAx_uhGCpZYOg_BJC8u6bpc)tA16+}+RBO*i=lDtTIM{} zlBT}y5>lVbU~zI5hF|5J-3&7j=eW^o{#o>uz6y&ii|L_!2K?lC{}TH+q$!hO_cebw zvf`JZS)of0efeDIz(jUZPlrXk*~n&x_ZDak?|23DMc5fdxi0DC>RUk3Gd*ZSx;*X) zJ`M%j0;uhe519rz(w)iHaKuOnf2`q5*D5nA;q#8mBeLPksjsjmJPKOB_=|qtvl;%J zLj8|9K(gyLw((LO0~cb;A9+)@?iqG_|88NUb0i(TrHwP*jAygPC5Zp5Y)SjNIfaBh zX1m_(V-X%Z*u@=3Av<*pY+hR>e!aaCmWo5LKz=2gUFLxP6^@jfv`~m#yH>crIgu1G z0ly9_A;;}EpfK8+{Fl|h8_y9aZ*`5S-rWUnzfQrTnk;Hqw-I#r=~Bid8+w0NnOX}6 zv&0ETm~+30=Rh-QAmzj3+X9PF9Z37CbI3hYf{k{HWO0b+48xzorz%w{QXPjsGAyX{ zt_d7nFcrJjC(+x=9J+npfgR46&K5t&My=sXA^n~)_0p(ieYNaJ-XjNBWuIbr<Q{|; zEo?|RXn{9p1~ty#8Am<uX5xu{9c*p<QpuUV;aEAejK!P}LD6_p<H-Ky%=)qo6|Im# zhc}zyi1#m8a`rT+ZLuKJv3>FUJ;1p!UbNM`1b*}WZOa-rNceCKn58sL3{Isl%KScd z;XRWXGy>mVvBJtw4Q67w0OEL_UFz~2L4N!n(eY+F9U2#oaSBefuqPg0o$5{fcJG!P zX%3{1FRp@N6+%eVbTrP3r!g&ZB)6kK43aTm-z@u~(vBptd_B)X%=5#W`>fIX{W)+M zdRHQDo<Y&Uc0AXnN%})M<$WY~1AMy5elE|U5g)e-<@#3zyM1!_!C(aW>T6)WTol|m zmI5u0GpV_s3Zdpi>T{z%hK$j|SChPH^n-Eew5S<k3u@u{h%?OlP6>B$>C%!#?l|FV z2~6mG$X*V(%c5UYGuWq2=X2Jv;X8b2p*p``8meN&!Xt1<W`TIUy)RvxWrKk)?U||X z0AUSXU^>ndX~<KaZCLGyx994^Yquoa|8*b4zA?qzX=*}O{YNq8YCWq67$RuR592*Y z4N5o|PCNKrWJ;4F_ibd8l|~0k@8LU4y>~20UW3j|e!;HsyZfzMyP2+m6Kvkri)DYv zrajh^p!T>0EIKlT?(jTSh<h5<hr8iCBW38>u1AMZjjEhH;aZ*oNg`LWaA!q27kvX{ z`(B1JcW1nkJ((Sk`2{~N{f1GTIWEL~6<*K*VUA%LQ%w)Xdx7%gbev~#H&(Mx&MD-a z(}$v7gOIw1d-Vc5@Y94EHqq*s_}xU4R%9yU-d|-fK1vQ^PBub)UN5Tdp2oe9*`h{< zE*x7p2=bgJV{D`YOqQDhbKhD~@owJDc^eEbJG@0_>E3jOT$oMd0Z^ApWxdioTwc9h z!+MG}==-#BX!_TRUH3T)Wi#)y)}|Zce1&7s%SV=;Z$BkS0ryn#E}MGy6Y){-4p{KJ zh`#?b#k8&}c*nb!XV(m)@k-^)@9KPLklF*~x0Z{i%bPi8&wxpm<-o<>oDn>BE9+aF z!4&vhGB;iZJ(h45!UJ8Lmm1G~&4(omUPa@lsY57r=wmqVYlSnkbZAp>HEX@W?|~6H zbk=V$J9D`oCGmI7jmzAFrF{n;;2St~PL*cuF`(7CGg<h`T9)6r752mzqDQ@@_@*I} z&UOcKmxL6pypc~1b=hLmkrZ@TITht&PD;k_+b2ontlg875^0N*7Vp3f#HNfS95~h< zvVLA@oPBCAjZ`<rvHUD@+bc!dHV5Iv{V;Mjw4>6<a51=GIL%)DoRxUqVcW|Tn4y#( z)4$cy=sj#N>n;hR0j@f<>UAcbZBM55C-X!x<sdxyw+lQ9=73FhAKE?F32Vd>#w8f^ zWO5*lsCA-QN#n?KQz$n1%z+>N{4DzK9836zSZ3si#yawNHR%z%?q$RrTNH87;!c(* zV@3IA8imaIwZg{hW2w88^AY9M=|xRDEXlkt-i-Xqy*1asP$8i)$dLQk64e@WcHC=p zEPV`F(w?;PS0))=yT!~U%V8+*k($bDF^iNWbbKp^w@1&QpG$ATeOD{)k?LWq&gzgw z66Yc+--lW8Vd(NyP1wIbo}##Ou=Uh6;N^O}%^kRk$4(1A%llK+4kLOI`4e`19R@=$ zjiOUs4s`GD0BTY@2bXPLvUgW{VLJ__@$Zg`FJ-<!q{?@gxON6Tit6RMH%=mI`YjY= zzqYc;7x=z^(nr`9w~fUq<YI>PDC{|#heDATYX9zpteyIF`DzpF|K$Q*y=<@~cMy%c z8A~rZgK^&QQ7F5ab192PVd|O!F>vuI;Xyz)^t#Z8TokuL*7b4tsdI^I>xniPzF!F^ zv`@vEHzIIwZVyNg{0VMJsuc8nnc$JHPZ6uNs4ZgvDN1yJ^8Hb>S{gmdmFYu66-Y;? z;3ebF?8iA3vWtJlGPJkAyWz5M<(e(7o|uAT=7-UdImUD<#TEDb9zml`0P7!Sfa-Z~ z3jG(tD#CT>ec>nY-rNTEFUw)A5%&YVKF9M9W9j&H1{z+|P_I-MO3ti;fYOhQ)Iz8# zXCyOym4GJ2+?m#5M<XsC5&HipumKZI@t8W_cYS)meh4#g#^?UnsdNWok2&Lp&?5~; zQcN*sWg6Su^P9P6sNimWKEE@GWw#z!QK*+H+VPI#%YBjbFcR?QXiH{l{ieRSPFCm` zgy6v62r6Hig!{)hQz^`yPT#)EhM_cS-`@xv3CQ#y9Y-&}+qiqrJh)(*fGfu}GOh4r z=Dn#Oy^9=(3srU5Eg4e^cIZcc2A80fk`76ye1r10TcJ;gEc5|ic>a}V-)7E%<>W@u z(Lp#V&V&lMhv09XJ$f~&Q(~A*V_vC1_BnAbv(G4_ZJG+pySas(%v7eSNrUmi{3Gnh zbcDT|g4w;%0##*x6f0_YuJv{njq^6<@5rx0qFe->)h#A7o@sB8PQ#KfXW+5Lc&h7@ zi-ixvu~XWdicp91nR%akL%X;$!HliAbA@@7%)kW?GqL%L$hJioax_~ZW*KDSR!1kQ z-8n@v?Y}|D@-=9+o;t-YR>05|p7=s5fewAw#Tz}vw6R}1lf88a`u}#N7<V)5(-=Xo z%M)qh*S_q{IYm5`-WPYo^~c~1e)w!*5{<Rjz!>L|^m@<+5xO1d?bX9D#3_!}2lu81 z*FMbLfoJItaSnyWd-%L)5U!WgqP4NsRHzn&);1quyR<gENc{)P#>O<({M*F&Mj@hC zs3z|pgivgZ9hQ$zr0HKmaE{k&$PMUb+xBoCtKL4Aa`%qN7Q6zf=0>4BD+4Eu%Y}Y2 z;dFG7HJTim#2TD>VeB*Bcia+%|E_CM#`p-ll`Rdf!7qfpt_{$<`L=L=XDO}iXHT!P zud`)GCzHq3FdAatkK8g7#p);S!p=dJaLhrE+I?cFR^o$N)rqj|iywZW-|!|en?&`` z(7yGtup-i*Ch<OSA7cdwQCkW@w~-bm6w%9}0q9v2Kw}P0roO%V;N+}8cInoC5HZS& zz8=oS!}bGki=i`=KN?BZ;l?<8kOAweX#(4fCV;vWTo~R1?@kA@+I{KN=74nKx-JC< z?S|Mxdgv3aN&A$xh(1{cs5kxs`?cXU+#8Wa(TM}7hIi^r+mh(flsvpO!>934BIkT@ z*1uq=h?VOaVEDZvns8z`>fCB%rWH2ggvu45G0lZk7WasI6;xTg`WCR-J_1vhxZ~$f zKG-~-yX$lXG*IJv;T<Ke(H;CAJld9mn%bb+rwc9~ccq9wjW9q*iNcRXve&M2ShI8_ zUA?P_D%@|~$#ZtoQ#|QVbra+50#x{_M8>DRQK#2<Jo8rrsp*ffvXkdPH09AJ$%ABn zjAso`&alNR!YFyLGd1*6LzAT6Y|WR8uy;3S?O)S`tL7OE)%Av`I3<r{?^@y`zrW1b zeim$%5c3;gL$!~-!*q675OvoGYlCCR<3a$L9M&M!1IZY&VI&z%v4+_X98h|MM7Ywp znYB4YVEbNQDxL3)>OwkxXm1wtUI)>3e`C6lpU$r4E71hKk_Lsphe46OgC&!=b3*;D z;BeFew_k9?l-<_YW$!}GvRO1};bRssYa3j-SO)*8&V+S8SBuM+SW%_g2c~;Q32U<v zO`e|=Gv3QH)h-3PuzV)Hig&=~16s^4$^)$*Xpp7FDIsAhLV#U8NL}06z^1lvALLij ztxSSZe2yCCrH_+-Ch~dLAPn#m@S%)9YK^TE<pYkgAD_2EMoTz(KI>un_5RfH-&FQ` zT3<BY9)Xv?jKuFYBiP2La<KJp0MqeFWEl_r$^Q%g3@%N=Nl!`|E$OOIt&>hhzcw>} zok*s;as{0Dy%9_uu0V~nJl1fZ$L>!utkN}tlul{j{K&y{=EMcK^+=6wtb7mWULO^6 zHtVn`^V@7o)@fMXHjM6Yey+pzHIV%37i5Axjo3O2yKUoe_*s2`ewL)zQi5fR(pjYa zOLn_n4TT^2SWzUy(5@BwWPXCmWizSr%6Qs+WH1@!7Lk`=gwgJR?mt>YBhI6k9k*E= zm=y6pL+2fi<r~FuM3InEW)g~Ms3_q%hwKrF6h$&KGeV-RBrR=CrM>si`<#=ew3l|G zly?2vOTYVH|G3`wy58rx?{mK2&xgB?FT|Fsspv6jGPfOKgKeOLoW7(pO!oA`)+zU; z-NBn^$f_UkU|<p-_$Yc&!AjiWq;RR8>BJ}AwZ#g{Og@!%m%2?|Ax$~FT+#1NA0Dby zLXW1{!0i_rl(l^l#Or&3q1rR>SUHfF?*0${UdzKL{igAP;9Q)!JDBx{W^uIA9N|fh z;t>{NpSX8BRBKtHXNVWR$+1R@*^RKwUz~lm^g*Qw*3?FA9`)*QoYsm7%!PALU{*vP zziGKntG@P<c3Ils*fJ$798l*1H6AqlRuFj~4#FLtCYbNPMUIjB^3Q<X&~SVyB^Zn# zmlge`KgGWxW7RpByJ5A|*x-w&R*&XC=Y!+{y?)7UsuIwrb1Iq~pN#J#{P+vT^XgJd z{G0U%0{a`YeY!aBTvOwl8V=OG{Q`KP8I6Xf{ZC%&b-|_6<--uXs+`JRop%}TV**=O z&Xc`1O`)3pS7@rI7Jq2kFAWf9%O=Y@Y4)x>jD4TZ%VtZEQj*X4X-e!cc%fu=CxiF2 zIs^MHO;9DRT1p)p%#oUH;gWr&)cKh?@7&<bj{V|jZ2KHE+ALGmm1x-aLfnB%=1IdZ zOQ`R;0#?lp;P-Dr*hkzUt5q6k=aeb<Y^6TMTsG%|+%Zzd*{zT#*o=pCH<04&P8YY^ ziFoaq#OER>qRE6dJg-eKl=La3z{4BhOm+wx&g;SsD<z(np2JgmJ98WJ@%*~h8vnGb zfHAeviYGlI<@f&M`0IaHD1L(-f3F{fn+t>&ZdVT9>^hhS8vT?UHAQaD>Kv$PG(qcC z?di<>SWMR11nteNY5U?4v{G%Kr1Dgs-ww$^x6DJ(k$U3QJEL)H>{$F{*%}Iioq5&F zAm9ooENtzFbEcZ`F4bfnAe-Q}&Yxkcqc>owCTE{qBu(=B24@?_qH}R3JfG-@(+3=( z6%DDZ{`xEBJQg#ByOPxB!Z>`~vkJcUadp{0S9AssD8qJ<X}_N1i?$2GA+*>M!#5m| zf7I%8L#G;QHRmmL>^m7-n-y|j$MM)``56=n;cPYB19k83QR&cEQsROze!M#j=Q<g1 zXL%32&7Fce4Vjp`W2J1@aWlCk1fx&um9(T+I&XNWfup}(5_}qStSdQ5!xXCQ8*)xj zInsl}JGDcjm4Y?f(jAPCFCxdq2cS&s;Vkls$?S+WD@z157fs11stxKJbis8;L&SbI z6*u`zWv{f>{QZ6;Rt%nmr!<S8dG<be@X!Wn^%z6$?EfFNSzbz~P7KBQX6vE5VB}2? z>cvAQorUkAODW*8a6BdTL6@EF_+h8V5SM?1b{@D)cf0BG2U8DtUHKb&Rvah2_jX*L z{RVE<wP#oBuB5xN3wlM@LSTJwm^#Xnf2j_~d1Z^G``5eR%Ct}}cAbjVNt4mG?kjyL zzaszLIfFhY^~KJIj)UqrV}{d$9WX@+=id$FoBt}{_)udyGE*I&E@{tC1MX1UV}ZQY zN#X{xiTLT660a3Zob8&Xa#)hM*JhN$`2#-u;qYI%Mcl32#;9X@Z*{rXVZb?CzmWad z8Y%Rb2Yt8Gz-z~6(Db95MJ7-Q+YfA!YECNg;R0(^Zrcx&2TY*mw}YU5b2?je9xS_E zGeV!0)^xVm2u*(9q%J9md{E>LS2;|<im3T6g+DrT;SCM6N_-%7*?C%c6RtzHQ8v## zJpldFCH^XSFm}^hK<z@d9Dmpqa|E*@CRTVfO0Ekxl^M2*9S8#^+>)Mc+zg+JoY7wJ zMNYL>=j857;A5o~thAkkb+>ZRaPn(#kI$5*JnhF-?>ggbohC`uUAP1v1W0GcKZK$u zd!$w!#$f*zYd#ee%dfg^r_19tM7}Ep(-sKEdEg-S`Z*AfyY>bDmt*+q<f}AzY5>PX ztMHU|K6tFJC)XSkPJk}|px2z;a?_XI=-SSWXZ6tJ@SV%#=P_O3SF@NaIwZje4MUu} zuq#Yk;e<g+9(Xpd19tSd1`B!);3vAt)F|>pszVDnGx0dwev{1}r7Pucdb;d>rX8<U zodCNx8Pb{w>Qpexl9vq|CRio~nB|?$cjrGNzk*8g-F8)e?0!I6zpz@0F5528+tiA$ z^g2a>8A~7}qC4_w6|QL>fnE`#>F%h`oO!lF+7W3F!#{qN9gkh0<2yDfx;>jsBO-Ux zz4BvpqSl7qeoEpx;|tPhdokmjpN0BIE9lYGWUOm6k~iGGCtZO((x7c2{5j8pGmD4t zx5?dk&g5#Bi}&tJKYg>LHTI*awP4csXwJvG%FXbhR|@p&-b@!?rC?2XBwtp|1fQpU zsgG-SersA!Q(w8F>!V+EB|MpP^4szi(Wlg$H3uBtz5yQX#anryy!75QmY<4_P_RN$ zy>wYV;@?1p+I>l_uW;0lcBMT#rn1uag;1&<56|axK(j%Hu=;uqD%28q-P{4(rDs3v z|5~tr4<(BXxF+{k)R0zWGN-AJ#-=Z(xcPB~Jh9lA$3G1dSyLNnak>FHng5_Q$?dVh zDpR8GTDYcZk^H*;F$_)8mn<%QrSP53{PS!9f9$%1R6qLjmSzKKeUDP`S-n8)b@Z^N z)|D-UOL$sH2oLH!2779TVdDt{a1|XRor1AksSvDeXC1y~y#wm1MDDL&0%=xXgl?f# zP~mKc!8Iyq)6SeOs7k#4cmPH|w!)%R;5RjC@T#DYE!uf7ioVT)$->tba~B?ImchW1 zwon<BEVw+uyw=^F2OYD*yvIAF%c0BYPCs?rvGEq!$Ni>tQ(n`@lbt2o^0V?WjbZSj z>sENMG8JoLM)T-eD;}48SQ=>P&QA(D@!376WR2ga<sCf^y0rbS%=ruE!8eO1QjdZ( zEUolosc9(I1rJ2|jsi5_j>5WUZ=ut<-n8FmFnpRh0{0x+O6@kBl5Y4iZ6IaVUX_L~ z-YG*+zdADMo-FUl`%RB$RRWBN!e8HJfM4TTs$Kt8Hfh&L*%NJe&gmw&FBosZr8)R= zWOo|+SOf0`Ir7uIjWnohBDd=iin)Jg(ApXUmzH0<L3zd@D%N>I(Ut?{<;zql<&ps_ z7k_~K-)%AN$xiySZ=s~EVa25j6T$sXSDbvb6&VCN@Xu?eG+6C6IXn4qlh}JJ_X*+A z<Iho>a%(={X%cr=>xG+r*3-TngYZ&#035$LihVYk@R}XU>^EvE9CZ6jZx<h<QFhs6 zwq_I#kMQIxInU%ap;`R#+atJMoCdAl3O9$dgoCqAQFVqLy3CJ3#U|nOikJcug-gk@ zZ%1?#oxkC_A-wIg;E(?3%KaL5)BZEFsW?d$7YOh12zoCVIVb3rhcyrWu8loY+Hmiu zs|6EeFlK(v=S$YA_-EY$FyAo=LcZ;npK7(HwV6Xi*8V*GGOnh{gF;~Yr;YL(+mV>z zqXf!RHqu0UrrA41K1pjb|Esg+SmpBy*J)$1hlUL=d+mwK9+blhRN?MpWC{~!h`1<q z?sBUgKFAs%&;R8n`hh*T<B@doT6|HqJUjyD)u*tndoI@N1f$YAk(D^pB>&f}kK&(@ z)r&vWJz2bLJ}2W-za4UM>M9CrEF@=xBeeHi3iXcFU_b8=d}r|*cKr}@|7F9lJV>3V z>RMpecgJ91wKmqc_u>f0XmmET!tYVSjlD<QL%vMFd#A<hbFU%i%-sUUV+(nauLke? z)tVwTnt`s)gnv!}TojwZtuFjhwC?JSNpqWNN|`(U4IRSDCcXKBwI=4vcVPTM6P$lQ z2fCa##A|lbsd0~B?Ho<TEcax7`M@0~z8-^>f=#3puY>BZRC)8{LUey$4)JP(G12*o z!n7m=GGF+Ev|ti;TGvb~j0`bVS^>XKb;ZqZ!lnG~t~hXHI3E&Q%g4*azR2qgnCzGU z<^er%l4XIoOZ=v{M=~(k&Kds>sfCy$;{;R03>PL{1C{pWv`Fxdok#tpKMRE`Jj;Z> zbe>0fnSnfV%U3x2^%{K{6Nh$N*V41>#n7C(Q!X>#4?hhSP>5i0mKS;8&3~`qT<!|_ zQ-+UlWv_!^+Z}Sn)orA6F`Q?tbmAQ%Pc03;NE7`J%I|^{(%I|fl0y#@jwnpv!Zt^w z$n7WTp{)b-zhMRABR9y^8R=;4lLPUq9C1m8Gd5XHm#POJQ!HCxgk8?}#hQ=G^yunP zzFrs(VSZbsl7(5KGpxk+MFV87C$%n}=0Al`aruz?Z@iednsQm}J;LF(sM|9S)!KfL zLLE{dHu8mZ>bw%KFY|{9)(w#SutePBF2I(RDO^9VJF2fA!F489ym%qO?(UX+`<y8@ z8;LG)jT<(kTj7A9e%x(rf_NwHquF!gc&~Re4d^zXl01X3<!*ak?YM|0mpHJ?fss7w zxhaOEt7Bz(G#{!B;E>*4;1quqG(YzRMRpReb;`rHSxI<4GlC*lwdE%}GT>vzQR(Z< za|#EwB-C4<z!CkX@b{bFNaOq(+3;QtIIhs)@j+s5s_l;}4Gu!itC1A9mL-F2zWDJ^ z6P@qUpSP!Ufr%tRq_~@Y9w6bL;dkKnj-?Qh*p?4ex8;_ymC|tNgMkZ%^89c1`0GEx zSIdf}^6CE6$LlN<U7HAJK0l&$FDfBbPaWfyjAr%CC*>Z73X1V{gcOxI@T*rD-FVYO z-o=8|xB50E+&l%Zyt`n^yhzL(>?il0{2pe1X+^(v=0WJIB$(vsMq0_pVe3>BjT<dE zXXs&<%=i(|6zIY0Kh{fKm+pjuKOdzPzY?*#m|rd&7^Ub+t;N31pW6TI1{<0m!we(Q zwb<}aigs}U<2iq5+1zB<er^H=yKR?C#&*X`ttOCr|1nr*q0F}&>>#Ptcrw`^jlRPy z*v-6*^vDUeVHcFzxT4pu1RUQ`fcG{6dv$v#$39y`*Sov8?0L6T{x{bgTWT$Fz0v@z z4z34Y_yb10IRvhnPi1{CZQkUX#}1cs*<^lKH1D;PmYb+y^o$KIV~-)){2j*I)9=uf zW*h7i(3XnUiM!e034G^26*gDwp%O9MRFCjf+}WOik!eBD)wTz1t{p1n_pwF?jT|ZZ zB+#6>2k718>9TtGC>(tBhIH?yHb?y)2uIeAfZgkgY0uU+9DdY=lEe;UknVlTS#u2* zKlPLi#GI$hWgwk8v=44?_(&&<7YM$UIJ2DYhRv7W!i#g^cqX?+a`5n@Z!O-~amFy0 zcWYwcmw_c-dOQX@7?#6;lVR*Xr=LsQt@BWDJOM|nbmxY@4btBS1+XUmh!p>)mfkE| zNioGop|-9wmX8>LV-A-=P@Eml{H%>X9vbkiWHU5*5ee(=sAI`X1sIkmgIqijr=Jl$ z%Ca~%{4|#H-VxN__(8K*`#bA&(#MiLZG?+v2h4t!i|2lINBLO-t~{QPBWI-XQs2FF zGdveZ3O{=AWJ|si7Ky<#1#4v16T!I!R=qj}XO{S~Q*eJYHorp=^{PB#`)WD$fC~EC zCu3lmCwJ8-qxUOJVM9?n{Lpe;3cOcDQqd-A3X-M0t4-O_-hx;D*#g0HTf?~>Nz%9B z?RkyJ!uSLWmd(ftnmVZfH|{dSHG@*oRq6q~w1?ua&Ei?vey+4C=_tugR)PsIOj;Et zxaJ##qhV?YCZ}(J^pXeA@4Y+n@taUO*^6JqY2X2oz40|3Lw80Ap4_u)c(EmnzJ{mZ zMZZ~~(5|56PrUKQY#sJnd=#!7Oyq6<I&h{U20hR1q0I&+w60DtwCfgtZu}VR>M{iH zADczTV!qPp@7bKZvNtamOg5*#>O3UJ0}p8Zgge6h96F{0R$feEH@Bmty1Fg=o^>9$ zTrkoPB*3jN%P7`#4w;+v;^$>L_~vaRoIcnZy#`lEZPb;d17pjj5z6JTJmV$|?x;;q z@)|((y#{t$?IhW5oQAcEQV5^xhQ`%TX!+ViyyrbyaNfc(CA1H_^{u8encpEh^&xB= zxk`T0ZWLX8-YiW&-JXAX9h2g0qxoK?DlHr`5<5A$!Tl2(sq>6!E`wD?=Ki25nis~i zh0zaEzL^Z2)s%7b?yC^rn1?MLx}(ATIH>BK!mE-Z@PjxMm2B{Wr#tu2<LXFu`{;!E zIp&;n&lY{dRiLGL4!tQik(2v~ZbR`gkx^)n?srP#{4G!DL5vpmEtte3w#}52j&{X8 z0aJvJ@i>kBdYbG)YhdAr*NWK-^|({SP}a5`gf_xcxqNH7;5}}pihCK7r^+DuJY8KN z74vw))J|NHdl$lEEV!!jI{194A}dcj3>r2IHk`HPuupG=kKjJll%G==wd*5mXGQT( z(U~u5oQhkkb#TDc{+KrJCv5ya1+Pc7<>&6Bao~$qG~D^JY@VCPcdHX|TD1YT-(5z2 zJ%(}T_*^U-9E$0=`TV-U8#SIp^PfS6lEv313ij*FgSO@3#qnN{V7VIdd|SEPNX+L7 zlXHp@PMUDHItPzv$McJlA-HB{8e?c69$Pnzd#ic#^l)7o``jDXADTnbnqqk5;DOIn z+H<@8cV+wO!+GV32+H)h4ewt&V3!vre6srwx|}2S9Ny~C-^hTStUNhzn<nks?+B)i zwy3t`KgF%nrEo%Z3~I)<=M>FkzTd4EdMVoTKfzBPaYQ&2HcE0ya8GndRR}l2D|&Xl z4f@<TFJH2`PWD<}{4vH=c;Zw^S8oV!g&d6j(FBjbOrYmZ{_H#DJ&mbcK&N7-Q%h<V z-aDX5uCw|<S?Mr%*Q*uo;Yfa8)J%UatrfHEPW1l87b-oFi|-D4K)2l)=(Hr3_D(OS zk=3zOaILlUW?T|9wZ!tOe*I7>_&db*@t2R*7f^<eV9mGa^0w_-ygDO-FXeY)r?E1$ zT{8@y3lG~qFD3F#PR51W(?MPN6g4j!jaw`l;m!dsoV{y0Y*`coGlSQ_{rX|JdWaXs zzboJg_th|eQZKwXJC%LTX5ic0<+Altk&8S(0Clx5xpW78%=k1?+8KQqUOpFh!GBTM zSaz5?SD6Z?{6HM7?#3DNN78oaz+pAvysao4_6BE4CpFq(Z~IKV!11U$)($r<yiB8$ zPE+4oOI;RqN#w%RL0mn^96yX{CeKge{PNMA-ChjAUb|{w$u%qNpIS)IzC_UJ_>Sl& zp>QJg<jQN8Xq~^<4eY3+s<2Pe)xeGN;5{jFdfH6tbh{(iIOOxaw`!atJK&^%curr^ znx+j{C+D9LOsd=l*c7=y_6%AJ`-1JU!;L0-_Gd81xNd@Xe!=)_;ukR23WJ!gTggYY zP%vn2Qr)XGD*ikMW-NUNmwVfCrCmJN4GmPh{IXARLo-jD*=k+XGmnram_u&99bYm0 z13RN*@abv?RvMUz1OLn6D{I6&wfZlu8`+O1S8H*<sA5{VWF64kOzbkE4P+L%lTx7` z4*$~`>zm79g}W85aP`O5cXZHNcb@d^X9wIBWX>(3A8T=LxBT+34TcAn!mp*HacV{- z{BY0UG~x70ez}?Gw=)^;vJm&#X6cBkIsZ014OLtP$BQay;vX%(<va-Vew>2Rx0bj= z+Z5NYD!>PwM`3YbFy{wvm2dtH!<XhuX{XnC{F;)4<=*wMJ7)>(IeSu^3DWq)K5@QU zI2F}*50?k2tc6Jf6R@RU3@5bF#%_ZDHAI}*?_O}g48L&@qi;t_r}jc^@A=TnB9@ES zcf|^y-6YbNxHGUnwNEY*`_xS^Fme{e9@c_0_fON}d8Z)S{s`HvZltMU1K9uM6BwKJ zSXLKz;6dJ&)MlSOS<Sb?<3?jSd+`9aJurro68!Lfo0|~eGfuc*U2*@Y%V4qT8maa| zj9Hk@-xpny5(nF`%`G2vi<khGUMakNR4I9j=Vw=cL!4HoC$G(_BUjg%u+ce{8$Lv_ z%I!!@G)rdff&xxm+Yyy*UXjY(DRjoz5B|+d!rndJz^CWGB<-bx(MfnnKJGGeNxx}_ z4V`<y)WdrzNU(WN6(QQs-6m<z-z*JSq64eqjCr<t2rS>;3x7T^!!eF?$Xd@&e%i5+ zbGHcxqh7dt#><kk|Jx}oD1A-^@inrt<UpB=+_~eqo^<)uJr|R?I?~ZxBUC*$h&#6L zhSMI7=lVY5kbX|XJ?Tcc=#mxM>t3U`;(gY5IfuWh567OyYFyIv6vlKj!S!x^@$abs z$)!0C@AM3(=I}urrzy@x#m}JZ-~}jxo_MdT0q)omijchi<kpkz@tyxX2zWUR$DNu$ zZsSuhCPP`w;}GxfKSUl;-6_-d6+JlE3P1h`<(yS^Jong8=sENy9Q%{c+8?9wbw)>Y zHtqrQ-2=esNeqYiIb-kZKS;OV2#Sb!Lyy*+f<N0=(?=B}OcMOF&h1U`m|!bxE%IZt zlvb#A|0PAtt$<CA_aReT3n$#4rEr??fYvoXsY-mIg;#{fZgcEJT(>F;`@gKBx`FPv zX5|+;m^~Hts-{alP7LP>9q!Y0*GOy-uDwIK?eOjN@i-?+WZD(N=epd36T{UgwD&O9 z8RLwPdsvDd{~EgEsLR^(m%-y-U*Ph_eD1%Zkupl=(w(kLr7lGi*juL)cJ(<zcRE<( z&(-IsBylwVym?vLFdDdPc1N@ttOd)3^IhF|I#u88#XZkUtX*VDYk4}w4p6}Apq=#5 zb{nj?7RtSDPGbYHgLql3$7v&o0^dx4ALj<i`HEzmwYCt|cBD&FZBNN#zXp-rGIdfJ zkjQSnMm!-di%m9m!t&osDQ4twZ2i)oMraFv%k4hs{oa$@1xwYrL$ox~sR#z15@)N5 zCi=Uy&ETYMg<LXJU-A<<wh`C<()=@F7(3-G&4|ik^9Mb+)wn_UVDk>a22f#>(Y27g z)`=fJ+DXUTjpOgfZctkAIR0G<Nbw4Ac5z4ln-%nxuRvB0Q(kV^lV(<Y68nZ|u2UCo z`gA2+6P8OJOHYFS;t>4rP=yreIEH_@T!YQ6T-djbH}A7BqJ>K$;OomT<S;>&AFa9% zD+4;<754-<`Z<Kx9yMm`$`mg2AAz?m!|_SecPbOEral!PNq1rusN4~(raF@Ns14iR zz6&+s2AtNZ4?0C%fGMY}u;q3e*-F!hODytv*|K1amP2^irfzuUpaE{&?}~eGq|yTj zz)?|oc;MD+n(jQ5uGVSsoPL$yD?BE?Yj05AVgu=J#uWa0ZX{=k{U?Q<qAq8>=~%y? zQemhrZmf%Q$%}qMl|78<)z*nPV~_=RZcIiqonk5}5S^x1!d0_R9TzU{!9}(@G`dj2 z?-vF8%j^=Bdkw=6$L~lLH5R;SoenNI5R0$JYj9q%iz39XnU<V%<}fQ`j+hX~>K8M4 z#XlQ`!dfuiBE!((O-DMZ+aK3{|Lx-U8u;*}PS7FMoen0N;LlaSCVho#@rW1H8J?kg zbBEI6j6PKSWe8S`UqD0Dt*GecH}FWRqDO8TxcOfg#_U;2GcZ%kpx)3SpMhMqFoTDU zbH;T;x60EOZ72OR!K`+*Gdmod47TRxob|yBpLm?4pmw4w+;^ws)qg174z#AlDxT<) zIubj2J)=$@Cg2<PA0_4Mp`qR&_T4Y$96M#I+g(HXzy5(u<01;2WzDUYIq{0fSM>3| zpUaG0>i9meAMdnzPR?%crNcj3q(@)1#k|@@s=JpVb{Jovi&4Cs<)uU8x2=Zc$-07@ zbDY-QjK#T|14%Q>k(2XJ!I+)PV1bb)-gV01>AM{yhtvpZ{C0JDb(E{fr7x0C-xEwb z;~E+_Rg0m68m4LJz+xZd?@Rk*QMYjBm~GGj_Camlb1BxU9@4@Rpp``lL>@KasvqJv zV1I@xp0<WwlUGAxaT!d_^}^+kLU`UK!930xjrm7axYeWO)W5MOPhl;*^LQ$H95TY| z(m=*(Gof`?k)62aPRF*tq>PVs@bzB-*Gx&k_V#P!nYU^oVMLbT_p0H?+(Jy%suPZf z1MqQE54I_e!$Zr2XY0G6e08WUD|MNK-yTNe4l|GgJNcnQ>2<|v12x{QHx)L$ehWTF z2g56aTKHD4kL{<baqNgJ&>CRDTbx{Ytx*NlEVqVrYvOowpax#aIt0&Ngvs+Z2oI3n z7I~Lvl8g4!Wu)!?i{`5AcNu@n5?9*0qG3%7%vdEDjm1A;vz8VwZa7Z|lVTyLX(E;g z_KoU|4)9G?Nq9hw<+ZsU+)%#^h90t#C!abiW!2B8*Lhn>Ad8`(G6vHs0MAFwmZRp* zhLs+X>>#|XN9w~cz%LY+-^!B@4(d&j3siB)RDHZN!I9^?c)~SV>@vb)<<G|qars@5 zS*+-Sck30jz9w3@wsz5hag)*hgF7Dom<Z23LvUt%G^<q1qwO6t*+91d-9+DG%#cCw zU3Ch-jtanf=eIQZP$An49>R;iCxP#-SgiRgc)fYuU~{-9do(PR!k&wa_t*J!$ZIky zTw25G@-RHPY!p7em_=7jhQMm4V7%Ab4DUB?67FzyY7;AX1OCsbRN*9geJ|vVx}$OF zUtdf}oePtmE9mOn34F~XgRf7%L*<^2p|#398ek-5UYSaGYOpJoBz~u5;X$~>WfDvr z@tkfPYmbg?rr@X`TfFSoUCd+#Q=7kS#5=SL{<VtZ@r@#<SCj-d{WKu4uM6HX^^rcL zn#q?U2jhx^ZaAc20$-RC<Z^35Ic4_U46RHBn{U-v>U3WV&i@tN()*+M!bk&t___(k znk!+5c^|&|wpmj1(#K^YN2!*sP48DXNp~KP$C`PI6od9^;EEClyydnYij0*ZR7rS( z&Ua?h_O%4BhoGsK3od%zljmKWEg4j-qM5>PGB8Asn_hR}tFz;|sC@%n^C{%2Az@s* z`7AshTI~|@sx7CTPsRfQHuSluSlX~XgO7>M`x4Ip+_CB>?biB5{)!H4c6%grcs~W* zRED8Jw?*)3jv1$2eMQfo`e6B<k=)I8g{)q?hGw7i#;2{kd5WVwPl?jts!OJv#4jji zpEtb`?upDN<9M0xLKs$<#_tO&>0Hn5IQ`i;G-~=v+y6YE;csHNK4+TbzAp~_zx;;& zTc?4&k1ns;IE<%{T1y2_R>J&uda&B-i1glgG|rz9&)062LW=WyxIW<;ExN19h4tNd zL4n8wRS)82_w(_sT?U>}kHvFy1e3!to)>TWM3wfl>C=CPygwryjM^Y}%Sgb<?SbQa zq`;HGy)eWkP73?;lx9U8reQ@DaASL*@FZGbslM=u{yq*Pya(}#`eukXoW_-HgaN*9 z9eHd21mDqO-m$P1D~BC*F@JlD>LbGV;TbC)824Q2=MamRHy)u&_l`?mwjwj)u7cKI z^m*m+6EJ(J*jEhRMjDGsz-V-abM%l=+)!x6MH_u_^k4}M_k0JlF^#0*TTX9!8uM6{ zZ2bK`fel8g^P_X`VBFoeRQbUNi{?~N@1`7lcC{7G_A#aXANIhM+n1rc$UN>`>QC#N z!ud{O?Mb6H5j=icI;SeRv*D`eE|sg+lalX5-Ze)7_RG`pSBg1~nBIlIgk<x9%K`kx zOoQhvjzP`BK(4Cw<JnDK+<#6dJm}`k1EV}Kr63jqwUs&JR62TgS`O!{!f^OSb6%e& z!PTYhxu(vLj{g!&O5MX$zT2G^(Jv^xFdyOr=gDu{+p+4W5LELLuIBT5;QF4K^zznt z`18gIWJMAl?io$_PKA70cMA+Mc}=6vmC^2+j`-v13fSp01s6~IAhn!33g1%AAf|Q& zo%?So)F|f)kHao#-0gz?%K6w=Mf9iagQP9_{dtE`U+l5?u+;9-Nm};&80=pfieDoK zu-c2m@L9}b)(TGS&!k~AXG}KtkGfA&)ATUaYM#Q)=r<^*Ib*NJG`{z;lCCb4u;gI` zMBO&vm|G!yHMp7f*Opa1+B?T(_tXij^)Q=1|LKCM1NMo`?j!2%R0RW;zobhh<3&$j z6|w}oWF$VPGuc1s!i9MLdno~>ou44!>UFTOZjaxZnuH_l4Xxd40FECPOUqvBVp6y6 zG_vR(bntWF3rp0{#B2&r-;n@qT;sXdec?m3Ttxj>%mZUn6+G*^$ff70YI>M-P8#0q zhKDl@c-%|_mxNE(AjGLJPUvBVrbdS;KJK&ZyWA4rTdjsZZU0GCsU|d2G9kbAW6`Zz z7yXaBV~F;C=%8|0n!YOwcW&&1%GHCRx)<QyK3Qyk%ZY64o1jtfJp7#|OT(=!Ft^J9 z9H7FqOUIJS3-tND+EY60;mDrf|AI#SFG_zK$OVdQPTd~CN^|wN#(tCY+LaloKfWvP z%bf?(n_;4J+lEi5<<X4=?f84&e*ClRVHiJktvqpkI`tW5?X12_om>9spl%j|$<Z9# z*>nU3jvs-M?>Et~J_T@LyCME^^PmZ)K7!q6!`&KG`DnW<a^0!~TtD$k)#q(%A!(Qk zcV6=wCSIAuS90&m!LutN>t$<Ry8Z!}?cPL(bqaBIur;eR52s~T0nqd@pAv1tc)^OL zu*T~ssO$-b?TuMzdfFL(zIMbdZ)Z_!;iVd$9u9#i`#^EWoBz5RD7;Vi#z6b)kh-G_ z)0o}hI?#<ZvTs9%x(JLKsKCtr+vV=bKj5NSPgD)J;GI*Bk+k-@^sHf{OaE7^Nq0da zo0izX{{60;J@Ev*>u<`}wX%8nnliF%iW9y}H+ES39Ujka#b30{IC`onANy@a<4)*; zw_z^a`}Kf|wI^fCR84ks-UI#ZA4^G^*)DJAOy=>gx?=SL4{ZLVPKsguQN6h{XFuEw zTl97LUQsZ%ee6Q!Z#Ajl*8o|BFZtrjG_o%>q|fP<lvL0idtST<>r6a&s9`ehnka+i z^1=LekKhL<n6So<TTojmx;s5)(JkXe;C#sk>&_aT9F~`j4flJpBu(Y}qC+vsd6h)J zv$5CF6VkYGXQfx(Q#sE-P1bKZ=~AyKm9A;@WZhwVL5}+@*<_@lO37p{o3NT9H%V;# z&x3!?+68tFYe_lgp)`GXHos_2#QfH|1T79YeC;}UcvL&`_4bEu+b6LdErCnwv%qMI zEln)*$LRTm{B6Jw;aKX9E}8)t`%H(UekvewsR!TO+5^?&VtD;A;b$my=6@x?6JH;d zLpB$J{`Q?frQfN?AX{`jZ;U?Ehw$>Gee$A7ncVA10c5)ea@kxXEZ*nGmZyE?-JkZ5 z&i9)z{P_~8&GNpS`)VX-zcUu^pm`LoY`~hcJvq%Yg{iC`Kl|@GL@(@(8}16<Q@-d# zC?A&0tRGOq@7{dMR#gf;l!+zTqQ4#-%08jO@v*WO?G^pqM;0CM;K-ZsedJ*}a><*| z-I^xH)wkiogXcs>TR2>Yo}%l0ev$Iry)g5(V4q*;h8gk_sC0ZQ?;WxiOt%~Hr(v66 zu+@9nLero1Vz+>WpE6$B)*kBuM$&w{DjM})2EHjb<<DU`+<eyu1GQ$sipV5Z>;I1I zH6D}cktsMiuopLd{Rcs7wfXYqz0l!A8oseULtP^Kp|i&k2nzcO)`6|q)hP;x8?2!R zIlJkJ;|uEgA_(g=9!f55ojA9oH|Hu$x%UPey!h4@KU(+UEirkx>Zl5C@xP|noL4PB zo@!3Rt{;~!Eqo~1w`ri?KPw@{%!1PvE`s@^#H@2v5cfFn6w+!RQ*)~#(95vES)<?3 z?=4keR1wB!+L>V&%S!m-vmOj26Z~wI$qNI2gPzG5C=<NA?Xv=T(d<#!T<nh#(=73j zof_Zo8-QmEhO$;n2;L4D&Q%S6K&7ZRcf<~SqVH9xTv0@NY7a@#5W+LFeK=tGYx>r- zoNTJB#4|zxZ<-$x^o`&yy~HeZa~kSA=|^#4O>pGVAxVBV5RDWI$#!*Hj7c>ClWi5U zY2-Wk$pIT$loO45AKGyzF^da+u~$)&rpX33tm*r;X#8`E#G$@B?EPK@dv>;fR;@qo zsPE1NgT(LG+nV<$=F-AK5B|``oVSOB)9u|2&YEUF;Kh&Mkl37no44<-s;!jZ=KAYk za_k=b8taU|Je+ZWuIPyM5X?`#rBvIlJ8v3wTwd=knED$FxoP)2mx!+g_<f8s7i*dD zFQpAodw)G06W?KB=NJgGT1$1;4#`Ev@9AncRsI$=k(-lVL9%&Q+H0u97KJ+GhY9Gl z>V<TkL%H^9NBos(#0`xEo45(q-Csrr8q&Zq^NH--;Tj})ZJ?PyG_h`$CcVpS2Y1KW za$D!_ctpPg=P2u7cUNP{=bI9@wrR)CE8V!$cElAUNQ&A%1>*w(@Ox)9zV&`0cc1?T zq<wB&HYtd&oVf_?=LF)cVT1Wf%`iOqHyB4cs0+@C7L7U4Ae*WC^N#*UAZ6PJy86{e zGTgr$f}{L!;2KvtyeAWz-i(yON{XmwY8Vdj*-V@Fh4JxKx24#qH1u!mM;kA%mzN!q zX~Ch35Pwm)h1b}j=7%J~!dOjphJ8fVwhixEWP%m@hLWRnj}FAoqXng6uX1An1o`P< zbkDY&-XIv`9fZr{fGpo0*etnqO-6r}c$#b?vP+krLCdPn;JvSxn0drvCuuyzY#GF! zw$~MZ`lvZCQD09c@j;5AcLCd%_F?1orffIJ5?>^XTh8uCa5(-8{D1f1Ro1_utY0B5 zOLe2lc}-LhIe~xrIB{v#RO(aI8(aBy;w_)X@Zyx-thKwC4*qD8tL7Wy`^P^?S>+;Z zn=f+1KjQG(n<ca-LZ(4o2J*LeKs_GB^X+yCtm`xh%!7qPTs;%@Y?Op=ZIa7z!SHBv z?ine;eR;oI1AJbR$RG5Rc+ZQ0ERDZFpZ<=*wcRv$)fER^e(D|sb=wcyW{pI(QROar z9~^0IWGd@F7>`%KO~KnyH)z)BG4jyx7Wy{WlPcEDfD-+yus1Z27w7JF>7BAdx>M60 zeP@ov2U;C)X_P+R_icereY3Iq;i-H*A%u-KRzpptHs4fP2Y#bn<VDNlFm<Xjwhb7{ zH?JO+bdD=y&r1<}U+kgNhqyx3k>|3rO)e%hE~cLcf;nV!ER0)o0v?!kU>XsR8<SG7 zYngDXZaqgA%43mUCt`Zjd<qk@pit#XavHsny!RPX%A*maEF2Dfvk$=1!r_?p!2qZI z?g-)UKZ!d<Ykr>@!tYeA!Qf6lJ{vj&68hL-XjUj%CHZmEZ6`kS-$;m#UrSy3P2_r& z7|s}Zj2_0SikZBET3-y~cO6#1ql9XgCHqw<`&l_md>z0tY$5HV<EgZI9eiGPP(CB> zCALLV@k7o{vY*itV|Hzq%BR-SlUezw{Kc0|J3j{ZyR*prQU_K_cmYY%g5*Ej$KeOz z8o#LA7cET3bG}k6X4c)1wZbky`kzqVJK!R@Rjh^0sxhFmv@P|SyNF`U;&>fiqW|{y z#+TYN!D*+lblAn12Pw*=rD_X7Wq%Z(X<kNI&TY}>@kD+j_(a=N=SzQX|B^B+?<!t? z%f=sxja05WhG#`B2DQ0a_}AGPZ>KJXZYNE6`F?e_UG73l9{Wh2_9zPnM!l4<e}m%2 z6Fppgqk;nV{2;Bv30(F)lWhn2u=an?K!4F$x>TRY>L$IY?biEL`q2X}XvD&f$|_i^ z+Z)pxPDAfm9#rB{1+_b0(l+69P5<VPlgq^3s*^8%nNbU6f4;yZn@+52Y|cKH7eV`1 zZD{cVW9-;&7S$iyC%Lxwm5)w0l>5s^<P9Q&G<&%(8-7dXnTz7FaQQW9^3tu;{z?yg zbhjO<#q`HxidWL8?8k7VTp#i^QxGI=$}u+MaSwu7soN$1Up@Ke96N4j+EY%f8H4}s zO~4lYc;U0`gc(cE$$x%3qwlT|(*Laq`X}``@>ZEV>w+Ct_iUnX;p%MFV$YqXj}<e4 zwov>}pZ1n6mNi=9IeBg<x-5u71Dz`PG0y~NywvCN-M;vuMi(Bw9?c^^Wniyw!+B7H zGlzbkM+e(&qW*u*%Re4HqOPUNSSzw^KX5Y5>XVE^yPHYf)0WXyQwP3t$&pVKsj>C6 zfAF8kQv{Xv0mA|MJf=31-?Vc>obQWjO%D})H{`=Q$79ehVyNiYnQ}jyk8-QxlQ8mX z03RKliEkE<SLlv*CcR^$`QHl@9QSAgR2rV8-u>I*f1~SRK=d;R5Iu_(3%pRra1^T4 zd+@$YTOODsTs|T{Srax~+Tb{dEcJtUj<{Q%eclMx^Lx>S-RI!*M<cxZWjLpw@MqH% z+BhNYJAJ$Un~p^H<6$2Rc*p%5bQd|<O0j=RKJXjP-EGGz_6w`tO*cRf6$RD$Mc~QX zg&256g>~YXPJ{}6MWZQRIX#S5E^8%c6<(7X_%w{!k;oA_UC}8dmru;8rkn@eF+1cI z9oMuI{a|YvJt>}TZ-rydk~q}+afS*{_hWqfg;tCk!UHJ~=i8{T%c4&5g*Li4tudPC zo_|Lx7EVCl?K<?~#1O9N*jBON#4Va#@llTXq6cl=(^yeh4`ok(leow5=d>)`7rvQz z=RnN!9>sM_^6}<nJ87riJdvppjJI(q<nJIm&#Mtm?dOho-zQ&MmlntEth(aBWEJ?Z z>k2LHbdM(H1i^;yqu4L22lw@u#@)la^TsSkOe!0PN4A>5{8n>l>&I;A_kbR7Jj)K3 z#QXE;mc`U*p*I`O=}#yAbK%FO!qX}GGbf!bQ7uyN8*4}5tk>4KLS(pKF4kcC(_{JO z&}%RxqKa08MvETAblQ9T09nSGviA{fJm#H(^?}F9XoNY*RqJ7^$5^Pc{{rr}`k~d+ zJX}AZ9$aT$rSa?ZFuQka?6vPZ>AwtREVGq8oK+O1r}FSe`X$m&*bN=7K9TGNPkKVQ zxQEzTz~ajv=#`c=2gNIcTHX>^kz<JJv$S!9ni^bcTnqUZHP~mo45xK+Aa|`LOto7h zEolruzvp3GRNNL<osZ-EccSN&p2GXgv*A(6Z5lYJ05cogapA+`<P;jsM+*Y6)xqw9 zTRt5WCyXgbyC=2uSOnA``NY}Qxa3I?tp1{h+xrD#|L{qieY7L*&#Q+|1&`s+tIK3E zyqr866VY^g8}8g?DZH2_I=kk%FhnYX>pvS|<C`DSI0NDTF%|B+o!N>r{eO~+W{X@F z@eyj4cM#oYqKr@**6=?~vvXrG!Lys-zgnW=NP}EGskQLHhhTir4j9(!JKbHX#D0pN zvh%)?RnGc;xJdXkmrc@Rok{<|bz%grd6J5KD}}3S;$CP!^fEno<PH7%%%{?^fiS$i z2Tb@+rib<$XppKW&bTs-i$)K_O-<nP>RlD>Svvw>$A6R~e`K-o&wM(qbXxd6&(hcZ z=jo*V5cpwd&Eb7-li9isc-Y_^G|UUdBf-C<N$RH{{ech1Ozg%h`<|CdvQpt>|0uk( zb%y->Oc)<Y4@CXEPjKV$B$scoxs)!rfP1qxD(-%q31c5bGaT3t8{Q3qh2!Qzn3^TK zjs8H&#wqxArYAQ|PQnEJe)zAqgsK-0(+TabbRlgeeH`7EmF%tYMeaa0Y)#Uno4R85 zRS8vMZ~wAKg*?2|1%;J`KUO~Y480Afk#f=m{61%?l-lwb^6diHY*{sJKDEX9lzt() z-i_yPddDEXix<4nPvgr|f-xuWqg?lVuiSOw5{lOE2@igrm9AVC{>1G@aG=d&n6~?- z)49DyXdR=?F<(+S#L<@ru5ZmF%XUMGg$K8Nsv?DlcjU`^n<;Ty7mkeo3@=`nO5Icf zIV{>lG8&#J?-H3td)=X=rPoS2Fi8`2rw)NpFYZvEs|zGU?<8CwWQHS4TS-q1Dq%)O z4onV@!vOO~K(pHlN8%3(yQnFrr|NL#C`%r@UWRr(ta;uw5AONXoYi&|N)dXm$@r=0 z7gx0g|CP$DmXJ(K$NVR|nw85xKQ+)hpBb>-+y#tAWb^Kdw39v4PQ$${WjwPumK?=? zdO+$?I=fIX^?LWkoePrSY1??NHtvE8b(FCv-4ZucIFLuCtN6?hVE)Acc-Qn7845m( z_Ptl+@%@bAcgA?!e^?!#A00_84y&oRdtW>~QlQXW6|~iFIN2UC=T-B%;uYg4g+@z0 zr;csI&jOFo)2(6$>@pdTf18YE=T=kTRZC8FR>l*%H1YIF!8{K%k}hrt<xX>d$Sq%o zqTaxcE)!q&<#!{Rq}B&LX^KfB=x)?z?ym~IZR5#tP9p_1xI?K~E7muS#Cw6x+)C_- zJNPKc9geP~xk1y&D)bd-jUG?u8gsB|Tt}R^KzPj~kIV0r0$KM<8H|b+9*gu((59s` zyJ$L5TK+A#`Pe~ZXF|9tsxQqMd>xi9$>L{0miQq+<j_WVa?;!1@Xc3=w#|=#@=GVc zJ>fezT_4WBXRn9W8hMItFQQTFsyg26pu>vG%b{~SWy~_&B}Gc#Y2CE*5T>*d-Z`WS zJ=1wu(KP~l8w|nl!+CIjek+RYWsP5LK9HZ=Od8X>3Oa2F!<sBd@?Vz#cg=&PoZBm; ziAH1i>*>4B%eOs(<8S}Kxuz3zPPbH2_txgVdLyaBzzl3|XOCZMyK&O%5WJHch*uZ6 zVyx|O?jxMNixv^hd?!3AYxcm?4avA|li<3OB|MxtMy|6rgog_jxu}|D;X}LyUe0r+ zY|YNN;m!<L=2i=PTTi5~BBM94{4Mo#=z!IxTC9`r3Xe~T_rOsfZVn2?C#}9vp9jjU z=XM2l4tK`J-cG{jnv8}03~;f=IN>^pgE4)Uf@Yt?3jc$_6tT@-?1h!E&*=fu+qyJ7 zbx=5fPAe4acF3e#c|%G{6}g-dD`D0~qA_wO9J@3JZtDu~r}JaEtN>tT&(&0VVJM%R zsKvP&wsPGMR`gwv!3N4fnCkwSo^7zC*T393rFsOsFVmtb^=b;q9*IgnZo;*1TKwK* z8<Z#da-?1jsoCa9cQ#kU`z^*YhAoD{8+S;HL-P5rZa%JR(+);<@WDgA0lYHLgsp}( zN+(`w;h5z^(fw|{wExOb48IvH&wOHt*@vC+V#!g+Q7Kkju?*pI)fh~vX(7Az!rd1d ziCZg6;AOo(;EOsqG(_ZPG|g~S`Z37<?ZPKozQe$T&r<HJn-Eho68`=K&YG`{j^9ks zs%)kr<<m2fgHnNi8j*P7&LmC|{^!Qd5lkJkcxvN2daY@S&Z0;2<c=n7?cEz6w|tNb zjXW@=xDcIsYolM|HJ9&#ca$=_1@ekt(9Y5!oG|DKtf>++l~gm->pES2fOo0E+lvhh z?!)Cfg}nKpH`ndygEPKm;oYI?5U|dUW6!%{T2>srUwaBRHnhSu?+#Ir+cWagSwP2b z9wgPFfoPVOE!?C_A>`dO>}NL~V{a^?REs33F778Rx-yJUxN1ZDot^Pk*Q>OyKn=$B z%0q`Q23#9z#g;p?QQJ8g+OLe_hm#k<$QiSwns^Pkxzm=*q8%}=SFZG^>jl^<2grQ~ z`ms}tU?#5IE#Hb*0H#h>;4!f;cz=FD|Gu^5AeTZkT)7&)m93yV-MXJF_q$5(p4#xj z46&Eh?!;yNmQaPS48>U=sC-2}94;1rH{Bzhim9P_#>ivSywM`znLK8c4V=CJxa*J; zzjL(^yL?+5vLOn)pSd8<%-u_le;he$=1XbV`2$dQT8GOtU&%}AOJsMav0Rw7kY0Z? zfX5--phHA!UKQV)y_=QM!Eq2PFPO}#b*XsMrYGGQIE`=ojN^)JKVa853+{9#oBa0& z@p~h27M^sC{yXnUzH@&9s=IS@>18ms8OvW5c%s$Sb{HzysDmuVv3~7D>9<}!AI`C$ zi*c^}XU|aHB@JLrDHoeNrE~dXkuzA5Bsc1+i)UN8y!TVR;QA=xm_R4CS~kzc_)cF| z&*@6OZCzbXOuGnHZ*!sTF;BkmvnSX89RzV}J-EYxe%x<`C20nC!8yL(IA?yv$+eB? zxY<#O;YBEY$ev9bE*7whx(RPLG?zRVOrnbY=Acn`9X@s$g2v^4D8Szr9mJXOcD5Dl zy*dunH>yKyOB?as-h`bg)?9XeJ?Kd$Y#_Ky=7U3NveOKg9rtE}<B)dzt701d`)miT zwajpG*m&vmxoFU6Z^(s3YorrVv8-7$jiZ-D^8;HSOncjlXDJ!*1}9$^&7@v9^|gdu zI(nmz*Iv*U%$vVIWY85{&$D-g=fOFb-(?NPs_GC-zmrB=mu1WEp6OwLNn5mArN_?i zu0nL8Hhvo1jl)+Z@Z7K@KB22iO0N^4-zFQ3nd^#$(;mxKaV_$BqY^mLY8-q1*avYM z)v)=16ZcOm<WJ99@#++B8oo@0p2Z%MpCne(F3}?~nq5pjnQgH}WcF%jy@9<WHNYy^ zm<NQuc1bJf#%tT0BK?W|z#`FHxM@oz$H`@MIXoB=3_6gBjXr-^Nu;u*8E#bR;OM9! zym>+fbg{ihxu)ub8*62$WD>2|uz-&E+>x43+T*xyk+{B66DM3-Lq_P%Jp{MYQS^qg zZ!M8mpEx8Xj=m4u)DlTY<mD#RGBuh6^TieO;egpW*e2#!qv|ixvisvX?x+bo%)3Z! zBQ1IOvEeYbU>%+I43m4C-xTM~<*;zr1^T>gwd~ll2zDk0uvW(<^5Cam<o%zmF=>n= zx%9EYcm;~y@+9o5aRNsDnT(TX4~9SPs+>Q4C++#t4N3<O<06-fH2a*y2Rf%?Q>`{W zY`6j|XZ6CrrnjVab(bkK&5Uidjkw~k_-uEyc*AB5w$8f>9ycfOgAE(Vv&If5T=u|> z1*Nd{;75gH)JfVpq5_5{K8Dj_hhXH+A$0SxwRFn89SxY&1)Y!Q@V>rJ!Egm)r{Z9_ z{r4_7&^CpNe>I%kzq2(?iZDQ3&z>-1&nl{GD_k|(4$}Hdlkn`D|K#cI@_6Z%Vk)ed zEpzG%i2E}Z7oN?=RW?l!th-vVu(T`I2TM5O>I-nW@=`Idbu^DlQ$~vtB^ZDMG3m_= zN&T4?$9Js<6(tvJwP`uHcGF><CCONM+`uJiR58scbzs|{ufTs#E-u|<h7pTW*=bxZ z9`ne;IL$6-I77l~IbG21%KsFd1y>eb7ls7{loaVuK)M7eiD%9xl~78$QwfohlCleI z5fvNk06Spz_C-;#vA{&JunQC8o9_p>mb#vqIcM+tzOD=W;IXqA)Q*FM>8F`@%@jUs zYXIF^9tz^Op7Y&G|Cq)~Mny`;S;FD5uux+UN^3{Zr|=|76q%4g>umPD)(h6(iK3}O zu4<6iP2Q{K7TW6l<uo+iA>iT=xHZ|2nj!>lck_67&gVkHiIq&-&j8$a34F*yVGz%Y z(PW`RICPyEv75oP`4D0HBY8SCVHr-)wSyZ^ed%Ip9Q5cp!HLjv2vslUel8Sx<SP$w zWldAKCpt<HHA>*Tco;+D??>z!Y~|wL#gNy{V<=lvM6ux_ift9bV@G31hWV0Kk~#(H zuRyo<&-k-Nn&yW7LRH;->deoE%T>duT`ZhFxk!<{+6dyU_j4)MPS7dj{l7o7q0-kA zz;WXTcB$hbDogs4^2kIS;j|yuT%Ja*caCGww@h5wQ%ol==?XoaUhZR$2HS>8Y)YXX z#YIbklj&nr7xRNbV-}-_%1O3o#a!HQ>L(^Vw}<eY7}&CSDO+~o0$XT%(s}MC1!xj> z_b;E$VY1Ef@Yyl|gU?A*(3^j#|6mwdezGI&6-T+TE_(R#oD<D9@}+#IKw5VonZdXH zZ2N>1$S+8w`J28mi~1C>jn5)khg+=a;|bg~aSFMHNJ7ciDsU0zk@LFDX!poS>Mx1J zO9E3~rali^V;tdHl^g6X+Q>QmoyO9Q9U$qxh*B#gsM0eUp5MHH;wdszbjKKOmnhKh zgim~Rwg+w7<wI+0l2L!>B)oh0EN(y5$?jYcScATWRNuNCFWw0wwH0A7GkYOBJWFsp z>lu^Ny)pD^mWjaWZDFbFODWm@4$c*J*c)UC8?UUxf=A&X*{KTfaU1vfZ9Vr~#}4*I zj=_xn$@p^RZ+3dxASkSx4AX;@$*<20zBo;%^U79~(BLi1IXuzR&JSXrGWJSy2H$z! z5$E3$nB!BT=+;RK8aQA^>qd@;8Klkkwr9}aGg_dVU{0LOD2TZvoL%1*Qq22SHmNxS z3XNamX@LQ$I4B6@*PLXB%XBELEDFXZPoY;YDydB~0|vKOvCiGGur#iWIu_q$W0xkv z^J(U^Z`4EVZ;T_iKXXKCpQcdPDQ&!7*@Mx0N6@Elo_Nh;KD)sUBC$n&l;|h|U9$$( z`DO=qe+W?OV0Q{p(MJ-EU`E52a{)W$=#gV3{n@DrsxSjQBww)SQMwf4lqRs$W8v&# zZ@Qy=mCv@4M$7V>3@Qy_f`T8dy!IcSSe;D9K9`u3tP{SUFbG_&d_mr~h^D1pVcC_M zWZImLHeYPHs|PLN#awMBy{(x2tTJsqT6`Djj2`S<Sch(t@?b;EU=}lED779^phEdr zian|dGbi1^#q~>Zy^aD!d`^eY+e;v6lqpQgN`haPV##TJ1Eac~BIyDKr3!MOeKm&t z{!#&Jvc{pRat``@tbz;EPO>!jY?zd3!dBj>r1wsPSmTvAZt7MB>Ux5w>`^Q%=&__3 z6_t>-IT;33YvIv$J1{5}_yt+IFuhHNHJ^XTist!&Px?FDX#b2q+^USqrX_UFDw+Qv zbRF6+ji6Jj=Ca>PK}7b|_}OGHiZ?yuzIHjo<8m3CwDc`H2ApG|%#<E|oP$Th2E&L~ zaiAc1noW<n!Wx(6u#r)|Am!yxodWlKX4(Okw`w0(wP-IMi8;#*KKbL@Axh+1JD*QJ zp%0&Qv*_;Jk4$!xH>^nP7o|K8q`uz;aC7f$c3Jx&T37GD-mU(SDxB+%u#ebL6+qw4 z|HS_F{p?NTa~7k29J61$39hPnye++AWj?X|5Hk<FFP2Ux{NyRnax!$7$-+kUK4yR8 z1OIMl0Zx<*7dUKb)b}w5{xovnHBX1^J7q9Q$A!*zzQ>aj<Dk7d3{Ey`L3oBCbafcg zhY7n;JwucC7^Q*wsY-Od%?IjYovAWC4c?oLAYI=8j4rLh-D0xrc5FTk`LUFxmnzWH zmA!0W_z!*!n8NLs(a_v<4!5;GU<=LaF-s;L(zX@zY8@v<r;i7NUfXf>6B9DJe?0}4 z-zIeL2B<GLg5}S4^NU-unC;vMG}xI;dq$+f(yLKaWFSsfQ`U<{8l7ho@{h85Vb)b^ z73VtrWek-S)NrEVVbnc8l1dkxWAlUze_PK#?zj8KR-a`h(5in9Hz?Mjt6wApwdB!{ zzoXzn@gdeFnhHw-Cc>NbwHUZ4Tk!V;(fiX9c(kMzuUVIZdG`gLQ@DUjE~UbyfDHN~ zr_5F!tz}y)wOJ%r!#kDiVlI>2=<BVYY)&$=ueWoc<LG{T5o`ncsWFi9=MDC!uV9-u znPc*cR5oa~E1k72fg4;T{SIezBi<JD4N7tD6c188=E|ZhwNdLRBbUO3EG%1iXQugr zcS$EcoI946w+v(1tFJH#6Twv!mr424#<N@Nqe0Kk8+41Vqg<Mjz_?z(8aGN)h3JRt z@8-E^Qx*fWo4;bEvNX8YiGfCeFH@pFOnGrUZE5ty0n15n$9x9tn;#6GzBU-y)WA;L z`H|PXsTBM+gWeZQfYLTMHu=y-?z8JZwn$?(F1=bt?`9F}yB~roF&_9|$}r%9$I+MQ zbi6*xn2xR!&N2K0{CdfmM(Wk$*?H+uIq@o<@E821Vr9^<vV^WH??Dwi;ap!m2bWC9 zC+U?(_~d0@anHYQ?DKYq&B6LC+c5^#sLm5L=g5HdPbbifs^w!6MzQtpEWz8{jyhz0 z*<R%>ocsPn3?6ud4iBEQv7dZs(^FZR@Vi*pLkxj;;~(+?7O`})FcMmh*)loPY#LZ; zN*)VcU{zKE%^oLCd+x|XkKtvU_O1>M?-j%2e<mz7)DsjdJMrM#p-`1v$mZ{_fY%d) z;IgHa=+7TJaI;aRht7k@*`SDqy07N-cdkU=^XIWZ?;%V4VJ^5rZ}avE!Bmr;3*(AE zvIFbP;iOe6z{@ty<ES+3Z?nMY_E>uQb{ck?3A4_VUpS8~(o{C9n=O+0$QDI5F`@NN zSEgP>=aM3(_PZ3)WZUu2OK(~guoe&ezQs00rZWj?G4S52MoI<6^w}pEpW0m)9A8JV z=3grxa(4o2a@8f75?e|i^tE+9dyK0_m*G6HgSaQ5@X?@|EgcGUZ&4{+A8kQO^5&3a zR|Ub@1-y9PAX2|AFrh^a_)mB~{yp@hDVK!(7#h$IG2uI}D{#!aeQEZ*F0PrmLx?bk zja>X17a0a{7k>^R)mNLCmzq7E-9Cu>X)7&sYeV4HJVO}NKAXK&=;bsTLqI~K4Xd<; zZdGg#`UG2$qIogst(^vCcjLh!L138jQM7JT1Plu<AkJ12Y!uru;Xxzj&CG!5Q9ATm zwTB%T<qVb;!uKO^DK~wxz}YvS1Uq)@VB3!@WE(HfAgk{Tq!ZS#_mvgonJo$1eNW;7 zUtjvxrYQ<;b%U3x*YTBsKRN%@qfNqm;M=;L$PHf3th?^9z+!#Wl^jexc}rQ>W?A-Z zX$o5OH*%xRzOYMMLvT>81?vA5yea}`E6{NtTNa*6&d>Gv?JNFchoqvx{(wE0M80I! zhJs7YyAEev9L((3c=J*z?xa{|$z+z6ut!eftU)P+p1eBGj$cN8e6R-gTq}dv%PmYV zhGVT$?CDjGh)g*SoGw{Yu2CV}v&`ogEi9$5A=RQwT5-5XQ*ihNYeC_`SZ45VD^85o zq5BRRuy&O$ESoZe<?~0`9NnF4a@Bsm_<JR3ZI7qQ<XGnUc@GYcZ)by6u3<F~4Z!xa z5)Oa+gyrj7gLX_Zo6u6my5pDQi$h}s7sh%vEqP|E_zgoGzgZe?hA6;Lv!A&1hB4T2 ze{i=$8H~0Uq3aM2n)@OYN<YQ1HJ5HMN6~OvdpA$We92StB0Jc$N|CCM7E$NBO?aR; zg#sP!@xT66uwPe3!taVt+}(~%tnQXAELl7Z?(Q*zcIimo$U%<ylM(puWDPSa6^DJV zEurwADm*?F$F9}}z}k6XAnh$f!;G6z+*sg3td^u(@|FDljZQGyE*1O|<Y?QrCbq6# zk!9Yvgr;eCIhixjc+S8GtJf<Eu2pL~nyEs+-ycDz#UlD8-^f;Q#xUW-M412W5?XJx z1C4Qecs*N3kSJ~AXE|MGE4MB|N1tn=Ez;7^EmKC%-)1v|kcYS~PuLZCNbyq*cHsA| z)vVx?6qFrvz_s^m;3lUI3-)Kz;Sb%ceYhi?x|c$W_pD@fwPpf0J%!{yXi`U~8J&r@ z0RPR->|aiVNK&K;`_}rvF_UO|JiU{xOtFLht|q?nb12i?Re;W(8g%wr1G{i>HQR1# z%8&R76!BvxdTamTP6`}IlZ6E|>4gGmP4a;g5{<0jS`5UDTig2XR}l_Uk)^K|f%xTl zBHX*MhDFN`hOL%^;ik!QEa^Rk{nAg^XFGxa3|`>1<}9X+4S}^n{_5|&_e|$d2CL6` z&pbz|!?BDM%578UdJc%w!iqiY$M+yAwF_roT!unQZ4nrnX^_Jsfz#L|^cJ2X_hv;L zTsaj%%O$q67n92A;^+><8CGnS*e9;us1oj-Tgvtr%8`1iBkkY!4)4a^XJ&VAG4Cu} zety6-=6U@!(=*wDbK)c+_o64IC5$Ht!B-&nAsVh$=dgM27O<Upapd>Cfv@eAC;nkS zThp-<AI)5buLd+>RNo)|kC273IKjYN`;*AB`9E&Yfqdjn$-}AR>7<e_2fvO6voQO8 zs4M@ITb{NQ`?W`dOvpqC-RTbB8)cdA3In*GyqC*W^as8B-t1gJGLdc`{Lt8j=r7Mw z<|We6N8SQY<}r(Q?&T+HZDLvmZq!hg$)3cYK)2o`SX_FUe}34DSWzO}k1QhF3P&*Q z-N&jAyI>2aik?Q&^!8c^-S{+=P6n^XFFtY9B;Jc^&I6)z3*+JUJ~{G=y23PtdFQds z_SAf7Kie#k4c&JI=AX)UEXkfoYDcX3^Y=?>{ihLN@WcnU4LinaZOmx@EHPf?r@*XK z4CR(BJdaDZl+#m9Gy3&o2IsP72iC>U=jZmXLryabJ~S})MCL9Fspw$qE4n$pJAnqa z<io$Q4Blr=q&-)s5@-yf%R*PW(XW*SzuST<22X(TRlATqH%HUWDlmUb72RE_2EFlR zFnEUz#pg#uWPP2;XW1hBv}qBplJcj`Ef4Wt(omXd+QjDnc7o}Gt8rzI8}<3bQ`bLP zJbON#4J($X?8hmzZO=DmQ&a@E8@J-SpkJ(R`F=LLI|+u~k%1`!NA2vtliXzWyEyEA zJngMr$+ka@B+G3Aiy-bP7j0urulIVuQ6B@?o-!4tCaK_mDnT?h!2!H#^k{<6O%s2# z2zT~>;qO@O#d7%!I5|Qbf0V9ghnN0E$vx_1I7@IHBo@JyqF|;oQj%^9&#>W<8MG^E z9BwZpHgSysv_D?UO5at`!sKMw_I)0To%f>|ibgc}a3~y_EKOn4wV~2q=s`#X(u}e8 zxT!rGOw12+16NnFT@^v(**XI7jvRgH@q@90M)G~9B56rY7Z<Qqnfm{WB;}_76@~$j zJ|vSJ{bfWOn+LO{A>n+bburs!_^|cl!Qs?4+lY>g-+)Sr*6{M{20mc!ATaa%#riti zQ9MuqTpkus{;48j!wzy~n|k<ThB@rt<vx^Fz9O14?Km2UpJ9J{KJ%I{gnwT{F)Uda z2FJ!^@duU;C#&N_sC?uocpyClbbY^J>h|GqL1_kObJ+y;s(D~^ff8ygR}gYMVt9UL zC0si@meQ3r;>4@Q?1qFH^eob&69z-z?%X9fdq*NfP4FfY4^K9#QH#wwxg7(~xY4sV z2fQ=2pFg74i$1eV;l{2EoTP09A?2_5EG=Lf%p49KNyBXI`L11_v1Dyz3yn>^*tFLQ zIuE^P2QGhN+p0!^xYmePf4g+}{nZ^Vys2k$&Bd&BfhI~!v=wql8_`d=+jS^B!ofdm zaG07S9?uG*yZc5k?V(fo=~uH@!*mB)JpTpi<%ofWz_s{hVFF9c&N1$b6@|BaV`&fi zaGshpvwoG2XC|F$wHOgcC66TNPfiQFd1?;JxjqTnPmQPdBkgGZis5u<vo2Y@n}wsK zy3y>xP>7Z+qR^ya)OcVFIo@7@O}BorvD4()6(4D;zmiQEY2H-%Xan;Z`$*(G-3k(p zGtgN(l<TXh7d0A=Czt(eS!Ts&w)4PGEV1juwHMN$*{BF?4mqN+OC<f<tqX&UbRpoN zI}G)4<+Ot4u`3hD5;^8T&M93wU6Be7ryIDaxP01|l0y@2DwFr$WLl%W9>4Fq%*lo3 zz=|!3;JS1q>)E*kO@?^FA(w7;V5v5Hbn`g6%Ev?X&P8n9g9NG@3t&a{0taC;jvl>S zlr3;Xa*B>%|Aa8QS}l0&KGv|J_pz*XV;0!0(5B5>w6OomY+Q3;9XGkpg{`t*hTT`9 z=)nbX(bBR(&^Nx9>wXy1;v^RXO<X!(-{DV<|9bI=ut(RlRU`d#E7*<^5Ak}!WmJq5 zb_pz)Z4Vqlzk16c*klL|>JPxqj`w(fm<4T|stz^(3TcJ2KTPpjf`txktoW5OBsE=S z!2*-+aQ|2~(6f@W9`6YYgfnb?i#zS<Zo&>#d6E$B>WMuPpdC~~X2KnG-If{D_w+EI zyU87Lvukj)X$W<k9t9&WSd!lG0@C)8B=zxjIBu6IZ9eErk&XjQPS`7+&HRWed8b+U z${*~j-zO%Ua)vRVgG`}+6jS<aNR8(as}0rQP^mi$+bsic4{5`PWzJxbTTZI1kN5p_ zjvpoO4evX~lXA~a%s&%F1FJn?t;9CoDC#v^7CnuQPH0Bk09*Q4G9T}+TFdUL*ic5D zkdLu{#fROogCRZ<z~|+WuZqxvJGzD6^?VIpdZ&mdC7-bR*=00N^o!+53FngOLZ5xr zCD9yV9<+X<;8*_IC|V&F2exN+;+O5unEC!<D4e55IcBb4^EC?Q%nn8cubu46quXrl zz(PJ)&w)OKzvBi;OM(2K*G%>AXqXVT5q}n#QTxv>e#^&lII=zz;&PKHs#zH_O~#-^ zu|79pRu6k$??aPz@4#`c7F4w7D;uu&mDh3(rRTLmm-F3p!5ilfLmst@7AwlOEGvD( z=Bm}O7YFk2C)~jg!kKY=lPY}NSt*>mbSdb0DW%&5u*!2TG)eOizrd&j<qc&a`<|`9 zt!`%3rzX=2oj2I6?+Z5;%;&p@Yk;i0F@u}3@cX#ne3xiL(Wp8;;k`GNY5=?J;R;L6 z9Ak4=S98xMon&Hfs@b*c<>aus2^Xvy$EL`;!T67vq`%0FQy*=LTG0ux)Bg(ozP6sF zm5!uu2ZvI0OBoy*bb$q(HzoO`Qm~)9fvFU1MZH@~uvcEl$Z5#(N4U)_LfeN+T-}a2 zH7~H>mlm8fe}~&%j$`e+uCtt(RXn#w6|^=z#9Yr+Ea%u5Dpfv!R$DVUkA(s^F+`Ya z9dLlAI2D%t;{bm>?lq^kEE|@c@q&V|3JSkAj|F~vivv!AUv1F}W^%2H?mJ0S$cghf z@r)(CO^RoEIr(s@<P;k&<^oHD(@Eakm8R4Ug=dAMndg~p=(PptQRrlp^2+3|#OlN7 zP#^Z+orx^ep;ENza~KWIj|U|#gN8X<Lz!tP^DxVW1%8RNT4fDRzP^KX4J(0%)6{6K zog-CtsL|B+2KLs-2fp}!!L6fK<0`cc$ZDJMKjq<Iy3T<8^9{#EE<W_1{wBuk%UCEP zduHawz54D7+tl0nY4g<~!ZuvEGv2@lVTKUCA`kk{Fn)PdKGaXJfsb9cP;NybtQ+!9 zV4rBf`$Y=)YKt>S4veI^k*=icw1yo#qy#$#eAv^5(eU4KQ;58;LNnfq(L3QjU+U;g ztLNB3)EsBp+$ID6CI}un<-;x;PYT)G=2`eUIfve81%TXX1JW5*POUqIte;K-3|I7J z57zl|b&fUoMqu{5pU<%|0{i#GIU@?4?+Po2NW+}IaFTK8<p&z1U5c`GV7^Z%dKVfC z{C`c_@Jkco<tEW|19^D3HvkmJsZw0FCA8o5WeY8XxIfp0`Qn@u%zEfb9vjmkWArpI z{u~Wi5l`8fvRtT@8OkiC3pwM@^_X`#74{4Bh#j9gnDlN9+L|3q<?>O`ZX@(8?(gO^ z#;U_bseNpueih{RE7OBKjs|_IVD}r+L}e1o*)hF5*n6j;<)&f`{Fn0*Ybx{DyIspM zck~PPz^n%Q*G90_|BC6*ze@1^RRv06dbBlR6Rr*%4~q{af=l>QerotiRO&Bg-@Ht~ zbMI9)#jTWb`qb%r-dt7}7{+v7ari2GIG!~Uhdsu{lzKRUYCp?@>9bp{{;OrcEa*w= zcj4P^+^!5qtiqw>pqSw0(Ppa!2KmC<Q(#-%_trsHrtr_aA2VYzpyYNduD!E@&6yZa zJH+Sk_E)N)F))PI*RN&D4}|aenDu;Bz;IAYeS(IXg|MVF6r^2+tlWxOIOmSNFf(=s z^WA{se<;#<wQ%fof5h$%Z$^(^F_`ZWOVU-0tCn1blst?KpJdY!?;<+CR0<9*D8k&$ z`q1C(BJiQ4xVddl`M#^gB&Xm9Zu=@|O1T^q<)?u1S;VEBB|cW13Tp?)!L1>im`|x3 z-3WWhMm)%aDPv5){C5wkkt-BAPX*U~Rdj>P1%>*9eErS&XulwmEfc&xBQ!7b$4Bg9 zbCg|#?!M5al=+Q^%0#g3umbmGNG`TDDbd4714J`Rn8^vd2(x%Puk(|8vZ{(z4s#~& zU|s0dKZsk$`*V{U{pd^8SaMAhSaIExVUeadrEjn&A8B1&GF=>&&I;s9Yggf^iUpis z$p(C;n}%<!^fBgGDA}3$)7kl!RFk)zySPQjw5$jLm8$`*vqpH+7(-)9m@ByI(|6;r zf;u$%=Y*d-pNKqmd(p<*N4Ztoj^KLLSE9}C;&`Ld9aMa#!-@xD<omV}BkxB--V~v) za<zspan{5jN)uR^FY!!`Ds^wmfdO-6R&%SVwZn8PZm+Y4=H)7&_BEH<MyS!jN<-$b zZ6uf18v$ch`E!k;)qIC$B-Q3##DrC46k(Et<&*5`w$2VwCmRgoZ%<~bOFK9>D_xp# zMT|~<|AvF6KINYWO@s2$S@7Q*W4a6DLGJuukazyhft-TC%yHz$U;d6Vzvr{m`6aYz z&P`6=-GTp}6;FjbZ?JVIouR$M6lkLqtm>T2zlzTVs~8{J)%Aia-E|#LrO1KJ;Hl(X zyBSBt4+X6y{-pPxkeNC05%UAri`rldZ|XA!k~Ev}!_^T~d|Z~aFARoOn1q?5CV|$F zSG<(ucW%;>?HF8f1ci<YwnTbBq~9v8Iu)_*=Xy?i`%O_pbqBw8Nd?FVXTtLZ2~Z=o z2*=DG3`e$w(xz#nV6SmBHFmDTho$qGbn0MoZnR?OUik27@}4l&x|ux|?$KF_mNenU zTYMa}1%I`jW#?+0*t&{RQoivJ3!^0I@JxIBm$!zWswM}6&zZsE1N(7ud;po5FF}Rb zK6J`^8{49*NTQ7;kS1^q)-JPxNq-+On_eYazc-aBSC6G^Ju!%GwuZ&-L+K*6<B<u3 zHnxhC^3j2s%%mZ9U^?3oD@XOq%SoZt7_XU!LGM9Fx@=$rlRrkmh>4kC`M`;4Pv()9 zoe%F3$dSK(3bw?^V&`WQ7=AMvUI>4eVaf`yXsaFk-QdVPAF0vIj(bdLeI5<pWkM1c z3+UKuUC5ei0t$ob*{&fn)Fb5Fzqs`vXEzLgSYKq`G~IP?bvy=FSyA`)bvVj&yX(=} zQS@)kAbkH&pPlwgV0P*YQCB$&xf&6q#I402JDzaE-SX(3!ZJL*V+5|VmuKI9Yk*#p z@D5O)M3H$<MR`%DaD|r-j4a!OQ^W({bgB}l9qd5=S=pq1EElb}$AQ_{INFa1_<F}p z9^CKZozzw)mgNQ6L3*UwU`^7yn)s~r(fHHlC;K$t5^Q}>vJT%g!VN*xByoZ}(#Y}q z%&cJWNiEP^<AIV%E0_^!QvQi)ILX)u<R{C6+ihR??ceP>>Bwc4Gf<Ci<ExpccoT1Y zsoFJdIM9CWaNH*|oI=uV;A!D%=CV`Bkj+}dg=g5%BePU~N0KD`pK~`5)4}HPrL>YU z@b2Hv`G+Qu|K`C|b@~~*v!IB2Zy!gA@#j%gW>1&Z#UNH!9UhGp_UhEau9Q7s>qZ9< z7p95@nji3aui(Qxa2M;h3oKCOYPMLzikQDU@ALY)DE>eZ8GQC*>W6~ZqX_lZk>{3U z=T|41`$3Cbmf68bHF5eAID>Y`R6xabW9rD9DmYFXaKNOM?GBCvHUB_Z_{fv=ihA&b zjy^o?5qLKf?r|DI9zHm#g*C!u(I5X0cwZ$EURD~jr{BaVecTe1J2Z#gYggr7tO$o^ zznuY!OZf4M(aiZ`0Ay4xWcRyjd9Sp1cDqjvH9LB_BUcu2iMC$2(|R>ZR`0~AI%)Jq z*MvQt>H_lOL+NI;1ATq(!ep+z;7Vk>*g&leD6VpV8b=fO_%sm%4ozmdlWz;QlY6bJ z|C47VlMP3dmg2+;X}CAMlvIxv)8xxj1)f6+?9CP&>S<TFj4nynOL1X>xAqUZu4`d; ztplk?c)l{vS<y#vfzvVAo4TLc04hD_eI^`4Gq10l`c`COUp}L3={fve)yY558%#du z<LGzge!+qM7oX3LWU?9ISZpW_=4pmt;CzsMnPUuJSJ?B00te`H$_Ffx)upnpQUpQc zu)gyp&R-eGdKc-CV?Y3?u9_|IHa*DG?JG{v8^d2&colsF|J3v=2hqZL1}Jz4nbak7 zxg80ne5Cbg)IRG=Z=Dj!@6&GlIpM8HX@n)1mA`B~yKNZh$pypiSI%Uk_yMEeO#_+F zx7mv0u0nQT3OjYmK*+zO(bTrzY{a5s8u$}P2i&b;)kJryZW+REJyfDy#XNg=a5)-( zilyzEL43#C`K&L(oMake0Ci;82ony6icjNGBBtP5kqv&+yoE)AKc>9l2A{EcJh`2o z0<MCSPwm!G)O{*+C3kqivF)zlqMwc-j&c;Pszf8ZB4LZIzG%JoM0mS6jT*lzi5{g+ zCykC=Q2bB;sy}AqF1dXyUznS&+9HC%jg}O4p9872m$>>v-eiB)p9W7VBlA?`zl=SL zUg`Oe7ju<03z@p8dljrB?=8A79E0P!XNzX;m4NT})R>gRIPe|6P+%N;z@5aotad~S zhBGzV`}7M7`Qr&O4$rvpi$}vkC*fOdvz5D_bDe$KWzwp*U@&PeRVG^7BfN_y(rWGD z@bPCjXEO35yFD@lul~yc$324E$R-b?vyJGtcNFO=<O$F9MBL50(w%R=(KfS$JV!O* z^n@&dWg-Lm&L!aG>kpSZeFSdrF^o<qq&e5Z!MkQ8;+8SMeH56PgSX)C&P2*xG6+)i z{OHOlTMRmG$8B0L4R((Ufp2!tarGew(k~MnuyI52^xZ&seMAB#-s8BLOAg?>R#U!b z%XRKnL%7J*^gYTt_3^0<mGHnXn$4Jyf#U=pZ@5<#D>pTQ(`Qv6wkZq7yxqY1o)0IJ zhF|Q*-+P>1QY0-FW$@)`1N{2w96moZMR2EWWCME`4LNn6)7IDEyfsw7M`Ai{U&k?n z?n>M@NAOwJ`=NwRD7_we91mLiQ<UCG);sWz?NfV$ThCp@&zqEyS_Gf!^=2GX>k7*o z>`7^v6xsRpwdN`erxr~HWhJeyX~j)AY_&JFmd!x<x?oaJj%B~iC1CtUBT_q_3MF#4 zna{v@_~gApbWCm<sa&dO_g^Hl&!^j4d)7^Z$TcEPDq;r?zW=qg^0*&JGEYpMDNCya zK7_Z%an@5N1u7n5<S3jY)XzQPrZ+o6T|yE~D3C+f_II4};SAE)|CvoOmL*AH#&OJR zCA(N&kLSYT@Tc(IRlfR9^x<MOh9}gudg@-m-glk&A=(P3IWA^BtB$aYL33DgVH(Ul zp9^bFmO{<72YgL|Kh0Vd2D-EU;PF5i7{BK;m$yC?uh=xQM>EIJleQxm^XVS{Pr`>O z=<4J8udcZMQ#sbUB}0Y4ay67XgDSHM$SqHka?*b@=_ED2Sh(YlI%Ezh%$~Y;xlqXC zKwjh0Gdw$IE<b*hG=7*JiR+GPK&8QE79=+Tcf}cDc5OS#pS#C4B-Eir%08xe#sv@c zGTPO7ktshu#zcPM5Jqxj)2t6xvHdtl!30nDWU(#FcB88OVrIDd7nUw;MZXD$@KyU6 zmYY72Od?*R{T+9jFmtz%Uy&D`STl<yxVW<1({1dBqa`>CUADk60$1anKIJ}WV6GR1 z8N-dOtaGCRZIsAm6K$2KxUvvG*V}@Qk|vp02+w`vNRa+A8D6^Y#2hDWGV-@1>4w2{ zICe1``F^%&&9?Eh<j)kiKRX6^$>Z!w@ej7Sb3Kl;cO|puBC7wj3d^!;5F@HYQ(moP z({1uGFIttm_^eELu2$l41>q7Md_nYSNfOkpF9%;&Tb8V3hR+_10;A4{TyD-LT-ERm zySS(P6QQTAQo5NLv}MAe??ZX9WyYMiu>q)+4S^+gEo}Uva60E&0jds(lrbk4_HP{n zIfwtltATy^xjJ890SRZ;E2(hY{159&w58?QGWfQ6JAP4}%)3TE$BX@uplO&4eLtgM z@q(4um~BlhWCdA+(xJ6BA0jLtvkWeUC6{Dy)gQG;vRu6-d~r2a&o!qz-8|zbhf&JI zLN2QB5&L=52CODqK!Bkz*Yvcf<n>AL;ENh<-`IoIR>P?2Q#qK_o0C*s6{Lj8(=0rQ zFB3xGMbdsw<#+<V&5(vuE0t*DY%A8O7z7(Pe8!qbVq{s638ONrFxKrR3$)3E6=n0; z&%_$+o#uo_5pv+FaElpAf8oMLPbI1DPE22Q0&g0c3OSRFLf_y5dj9mHhk9-3AT8{9 z`dnezes@ar^C#fa$;#z7TWfq8{U3F)&jH0?9D1T<de27I^;Mi(e%%FfZ~w+{wP;AM zOJ!64J>s^0aiUh?ygqV~J4HFEwBGT2-&$2y%I<58q{~fS&=@BTpCW45FJ2!4Gds|C z_;fN<Oo6VcUeHpzq4nv#gS?w}5<C`oh*pQ?`ShK+AW}GqQBMtEf=LP33*3vC5q>P} zZXJJc$6)&K!yIm$yeHZas86mxx;XJRF=jDMiPcuR(uVg>P;dNP%y=ygmaCmaQ%n0< zW|$eoJ&&VF=T7jUwTE!Fqz+w`Sc?<7vrt3+E^FL561UW8!?omO>ij>;?C3A7>8j`A z)=URuA)j$}(@swHK_+c<8^k<6+~zv}dC;7wPUht|3_po|WeXf<z=y#bvE;iNe(H87 z_t0Rll$ghM|1_adPhx3ZF9)i{ifHB7%~y!|2y;7e?!Ut?aQr?uShal_mv=-7O@+=U zFV)xjXy-PTH&sM#Lhm!ZU<#X}pA47xiG#P0(P}#|ll52D@pwd;0uJTD96uS_xWR|T zs)fRyW=Zf}Sp-_kQs~e6SPHn+#%`34U|Y=cV3XiS2z+!I8-?9T$Io931P-a)?L*v# z1Nl(>(+n=?coN(8lSL{|ppgrNd#<rJjKfgjJ!=3%yoXV9zOd^aD+boH?qZapkZ0D+ zz=4xdkZ{b81<u&Qt1WrLf<B){7c(Klpb!n4_Wi-u=WV#@P!KKJZ35X|axnANQ9es} zhfPnapmlwN$$RElJi90ldd2rM?;b0<ROmy;%#Lwa-sIETb5i)Vb`c9-D!ii)?Puyo z597QDY4*50gYvd+X7_9a=ECcK^!2}tlJaL+L)H!^-!IGC2)m_Gnp5e@s_(3}=MA>X zisSL^vr)9JjE3Cbz)L)N#D=LXVktkRpuBt{scsldkDe8<TenhRtg|@&+I5G!CH53Y zKW}08R&7DkE7pv|5@=d>j~^{CCGytHAkPsyv2f%bzVL=5KWl3rt}_dzzvCl7`)~lo z4ol{f-%J<yIGG?ds*qVvJ;MHrF2Ku;;)1iFhTD0#fcXDz;gJ~?LLShYE|zzr&QT{e zWWo$|3Gt%evDf)O>fYp?u7n+zDj+H=3@!Ew{i=9dIykHa*IU|RY=VK1V^xNIT>&)O z!Gj(;n9?$zaFE#JOe|y-{&s%MKJ<;CWe0|G=^I^1A$~bOPRyJN*8l%L7=|}Ht?6## zS>ClK0_@+KW4(nMnP4a09HKxM45}#CLk0$B3+#jk@Aw7o=Ww#%p^e`+iViK`$42E( z6?PVW^gKBnqUY|wB{SkdCG{v<dZU8AHe|w{*uhY6U?)<uF{@ou#4<AH;@2}F?9A;` zOms(_;-eKw{*@Xnm)4`StM=Hvy@K4c-Z5{PM9dKK3?*hhY*Fq$+*xJ>cDdq|aod?} zPvp_<ADUE<f0t!lXh5r+denIB49BDov&G3xtsjK%I0_Dl)c99;Q}mou-Tjo6{d&_n zN@sejRo;E>SJF!CjhF&UdcL*}Y&U>XU2E8VIkt7}zSUT<P6D<axyKr;oJG5?m14fS zli*aGMw?RZ2)q7fQHp_x9*0@bxz7r0g}F7YsJqRX7_Y&UC%TZJ?M$&(V`*}4G#wgw z8rLi^qTpwTT_1TW((aSRq&z2sJd17FW1BY&3LIhL31g5M@S%K-pUiJ*A-iT6PAjCm zNO{~;#CM8hpwNlCz8H~-&MCIlN{LCj4ub0Q!hSGy3jMw<_?z0V<MxYBP<iECw*K)& zwzV@J3Kx4qbFLTdb$`n1?cK|6wGZI8<4#m8Z3?&kTZNnETyK$@w49&wJCmyJ22;z# zk@Rn%2+G$k67CZfpd)(2YPLjE?xM*M%G@Akt_{u%O~F~e1K|BSL%RQK9CvZIA8Z^h zxKIBnfJB-G1RDRx7SAc-mnRC@gmd9S&OHxb#$RVA9?F7#=L>d5s~US973mImldPf{ z%;ksUGPRS8caef+(^OFQhbp8WIf94xad5YD9*a7eLL)4?SZY>2Mr@hGG^<{*y@PBZ zq;v+^AB%-<{gJ%pw*PPp6FTx6N8?xDByj!f&(^Q^#k((8;Q~8Jh&-POM<1k;`2#h| zy!8b4?Qx^a5JDxj5tt|S1f_a&>33iWa}b?o#yP35@AL&6a&RWQx_A>im2CtKO6C;# zK#TuREQEZ%*?@&tF@H@#js{r{gGUFfK(<)~N4wnN&!W*_dQG0h>ypqY)|YM03Zt?V zeTp7^8QTo)D0|yY%uNZSqM3tG#bG+!alFXxA5Mdt1BdXETs(UmC=PlH$CF6NBMuYy zg9D*ikn~QU#V6&X^TKj2Tqlt>$d3ncizx7VFH2f7s~A5mjIFpZk-)%`)x;qish$LW zlT4xVpumOM@4{Lo&f!zRlPMQ-izP0+jf=f{*{47g_@=sm4Gr;vCHXd_BlJ7f{wu}8 z_FkqlW+-^CJj5pcyTz?uvV~8Lm<HXe9bkErKN#xw;}iXNe9@KRtlG#D_B)oc9qD61 zs>y<)?5yEO%s2csY%Dyt2&F%%RWvKe8+iMBn9yiK+=XRKQ&b8oeDvw`NKZI7?L1m1 zMq%OobO;|QM%tS@@t&9o$o4PB?|+J+=hgrU)?BEo(W1BWt9fai@v!9OV5+N3$2-}l zIg<Bh{poRJUYbK4!f)&D4KIMluKb?rT0E6wh%H@((7kydyKSUEYR{$N$xDF^+E@vH z<np0GE{^rJC1QJ5CWtR)RCPUz4la4l3^vy=wfIY{G1*99m7PIn<$6xy;Us!v8v#lD zIBwhOba=0>1k>8QVV;*BE|EIO_%T;;+zd%r`e7H&&d-I6)6q2V#CLYnZ46XDyM`~q zz2Wg0p84&!ChbX`?02vW-ThC2d({;OgWaTsTur&aPI2Y(JM_T)$`U;LPMKn!oaf$J zmcYB$jr_@eF*2Kc9W!<d`+~++PP=j!21?d4#lL4UtjmMnFk=Ht=LJsPEgdSnX2LGo z7f|arP28qrjOzb<sc}>a&B=JoUY_7vPBRX+`FqjmsUdU${8;H{Q!)w|3gUZRY4TJ@ zx;+etb2!1o6xO1YuLVmAQ6zZZ#~ilA(0R8Q*kG>2R{yz$K_)yl9^1p#1$8j*jW*P> zNfpYE*}{G$EqZXffu()0q6b~Fu;;Q4jmx@@$zKay8dCFE^1@qa6Nmii1>>M=Z7Dn- z;{{tL8iG#Zb6i)l7JDE6WhWyQNaWxHD`iH)lOQK**|wYYKG?<-C$yuaq!XxHEX60` zmE3>IPIS%VERJ5X3zu&#Ws7su=(e0|YiL9*n;N|xB|>YMuTLc#h%13F16lOQMTHs| zfKkL8-eao+b)=MWc0%@PZt-{5S2257{2T*#wPT9wD49-f-XRTc{)0TSSr89%YXl~E zl{9M<@=T|vS<}n~Ui2$ZlAh-HlJzAo$kVXG97A>T)Nufv=jEKal`2H<9SPkxi^(uj z9?mI;;IM3AXCQ6~Vu#PS#{9U<t$%tPl~ST$&1fl_ef%=JGgAa>WSiM|>$m9KGLr3f z+|KSDNT-nZqe0WDlHRO#0g2Fb>c03xH0`k?8!OD0FV{)4)#p3d$0R-4;vNjc?;FGN z(~MT!+>gWSwDF9|A-45hDh;FSEMt3QtE1q?b1j=p_XUT`xswVo*XA96dyE1<wDv1! zUYUklbKCIpur|JNAO#kCT*i&n#-N%jaJ*`wDNM=;E;vU+)!A2kZAA<{yWPjm{xhem z=>|Y=wOEyk1bA9G!!-7qeaP+OE4!AUzrb(NZAgVfg;ALN>?(KZ;A;MpYd2>Sdqi;8 z?BM3CoD7a1gk9(P2#P)63WwcfL98teWQ;3m8T@3%K{~vLz@haLzQ;R$5X)CKr^fao zig%yQpTDgJx0VV1pdnLm>w^=_%5fZcR^~%+>Ns+=mZrn@G32w`2sI-kXij@1H-?WS zr_Dv+@punQJ?ut4IZ1f79JpQ98`y;>yZQV`+ZI0)NnG{)JWH}30XH6wq5d8NoHS_^ z-RI3QyIUWY`rL1Q)Bg>7r%s1g!X(>u#A)pBH=*P;jZAF=k1NCmaFbUOyY8n6i!CxR zX!2aFT`o`h^_FDx;0-F>+{IobPGmlN8c=Fo7pq)2mV6GFK!(R}&LY$fKFXRisdXGI zoE1o2LN{d3qnqd;{9b=NvPY{<5q-NP%xW~Bv-qQCRA;dp=Okvrm3Q_eTOhD0Rm^$O z8&^=jlFF9UZ({jcv2gfz1#NtA7Z>Cxa@Mzj;Ll(%JaZFULQ=_YdlaRtm4s9N+p*5} z4ZafzzT%CesBn*#aAtIaQ@`Bceo7<`YyQjB{e|AXNhsa@X^1l(2)^Hr_w2vb0@D<Z z;mQ54_;u;~@p$`T{$u+9uhKk;mWEx$l+(n<q=~`WgA?(^+HY8VV@9j~_Y`m`wxr_1 z`|Q@NZJcb23$N~GNqbYKQfIIq%s$e@?x<I>^A|GcfL0ag3h$+-N(yMuUq%i(zu9X^ z#!1Xkq_Rsg*r(!6>F%?!LitUrF+T>%bvIz$^|!2ZXAb-Hb~I%>3Ok!u<zRkV9eR(c z(>cvcY~8L*!ILNOMJxnP`|W<TJW&iv9{u=MLxPI$nlk<4ds(!h@Ed-&1fM-ofos3V z(@G&n^61?VYVQnXwols7wdX4HO`iz6S|Z4zWhqlQpaB=Z6~pO`u5iU|4SEmPrx&?H zY4xf=R$<~pkFQ3-q)VxA-Q^@&ood6_wTHOSs8oD7E&x_aj)ue1p77T;fVw6~!S*3x z0y9ZQ;IAdnS&=CVRK3DqF&qU;$4nw`Wk(3V(Zap2-pe1SBAE9z0({RuV7V)bVB-^K z`1$cV27FefHBo!;_MvIS(t>g5-2?25Jb=5BG3_eo5;y=$S!}dFtr0ROLI597T%A7M zjss_BGydJ3U2KAQGAy~6!EA+@O;5uawtI>k9M~a6W8WRefxvXSFn<_!C`pr&_i(ta z6G&TH2*-YLra@=qC^k}^B&uuh;mb(+ds$!zJ&=Pr3l^}gHoj0Gd<!%+76{znaa8QN z9K&2Yg$%ghD9YHtCKv5s{+F6WlkSPA$M83EXtm?3Kc<pteJmviY(cA@KKwb_hH2j_ zpxK#&Xms;vko$fR%ls!$m%1wze99%Q7w+&fp`ZO+cN#AUUHx@h>p8Ix)m)<YD0)4i zjGdOP!|6XhVQXtF1S&YOzlkZ}{<)RE<2?(te(RA;>2#sPPyCpZ2iV_MSJ0o*$j0XE zLw_H4CacxT&J~RylNMb#6q*T54>T$6d<1Siu1Fp0e92hoo?q<}M=?o1cJQ&fNc{Xn zv=G=%jaov!^Vc=pm{&|@!HF<SQW3Ual*V79Em*8n0C|`PL3c?d=*)>__XGVXX1O@D z%m;puZv&2<t`3FUG(c4@l|F5_z?D_#5}V-+1C|Pqy2z7`t~Y^;Ss(aolXBp~bq8G4 zbDl*QDUro9dD2~O4F$dK@Kq*<HjmT*{i-Sw3$<dCT5_3g({K^_)bl#N)4|6L;9AdI z<|gzjgz^=96=uUxA3w7tVM_41uZ%L4PqCXr1b$>{1QbpygXJ#i@JKP3s$W0Csjltp z?rFj2HS-Tj+!81C1y68eegN_3Kk(lShe4~$UQC5~EWAtLSO3k%#I_+cStE%J$tk6W z;V-yvb-gWrFEKnaekpoN8Nj?r7NoW53oF^}OD`fV=!4KX%^Ge=B6A_1;Oqcv&L+Z~ zfk`B>c@sV<8^+3qOR!x#Hsh>oPtYO79=hEpgWcpI5PnbKB(Bn+Pjh6Mra=;Xy|%4& z>l1bM*O!Bo?E`pE$d@!`C%^><4YG1HhMS9od{I*arerbD5($5{D;{)%ogihlDzUfb z&>-BuxBUSAg8fSrrQgNf3x~ko*M{ikno5R3U*e34I0Wxqg;#~{j>ocLpd$U6&B(mK z%6ADY(lLVHFRX;!)3)M0f4b4|jH&ofKN}{m7Nfbj{U|LGXCC>+aJ^KK%qs;}(Ayxo zmi)D~w&^Ap?XF8s@9o&o^k~R`Hx8b?wxGP_UToG@1sIjB33`T;;BMFg##`yZVSyiY zXLlk5z1)J=`h6(JR*~<ID5srqh7fo7AX?gr$nxq_mas+*yqE1`8bh1WVABTn*5ekd z-lIa=eSt7dc#a4Ekf5N0Cs}9vYZhn0NZZx|97oF2)V?<C8=XiCre<R3upG#jJc;j1 zhlBRmQu+oX=;Dc)t;&x>a9ySc+<)Um2aVTazW5gwuq25#$wq))hXh>zcLs-*ePG2e z{OI9IAye^g5;o5;qWxtYnZLY-UFoOU&hKvUcm5^zzD}FuPJd;s(<<QIs5X|7JpsoA zPlnx_XNoqj(WUXKKV02AW-|k;dpJ-#gZ-!%&Wum;A+*E+cc;nX+Q~@(Dh||Uv<TID zH{!o#nXG=EBoo3=_(WTqxFZd`OZylwGk(n?<sIR;axjIwnum3b3tFz{MA672XPH>{ z8+Puy7BhWy7?;cK$7?sGVen2RxPBrTQwxr=kdShI{rvF+mF{r;M!fJn%0P?KtE@EX z9*e9Vfi))v3I8v(BH!YFDBCQ-9_|YvC&favs7Y#_qJ0|Y_LZ}8ZBO`6n#_`p%;O!N zd_h;?99#S_jx}d&=QBpwVH@qi);FGX>Z~rFQFVj7KV@)|s{pTyH~56fmSi(40K}X> zxN>dA)aLaB-vn*r#(mDD!!dhV$e10daVv#Qkrc8Ief79(xD0p?DdtSyeiN;X_>4*C zcCp6OnozkeAHD{dQhA^bNFDph)V}O&ow75BbK0LtpWlsyFwtwSYF8$;#ZG`b1Gy|E zXdI=#E+@mK-K^SG@RmkTg-ur^;L58fY~qzPT(oc)>ZfbLz>Q}1VdXt+jJ2e>zZ4<O z)B&#*HS-=(F)*b271xoumir)NE+wiyh^~Z><6v1WPI;<G9x0~u<Db9Convg3vnf_w zl4SKmPBG=gK#W?bPe$jx;hXm)+TDJJFL>k$GfR|!6SABG)<LvUT7qAAUT`o7?u-3N zdvM&g`FP#MjuoD$fN#3WoPVMOcIWTGuyHDUnYuRFei{sl)1UErH3D<_PY!;Loyp1G zEd@~SWeJ)2IDFJj9JVVJqDYPI{XB!E*6#T1;Bz*RGZ+kh-eXBQGpH`w5@sijA=Piw zS`{A~(6g^I1vXqPe43aJH+{O$aGw<1eC+~5gAyq4tTfGRlVciP*Kv)`a}*c8kIe#4 zC*{FIKJTXv{Vf?_wK=PC?aQ5f`BWu*wMPt;g>T<IQxO$z6mrm(c{I~;8mdfQ#Y~K) zD5*As%q}lv0j2urxx1WVnv!YSBL~b{;tE@v3Sme@4&Ac+hfDJ$!0F3gJ}grctY;|G z^d|*Se9Vw`moH|Q|HzZvxCz26R2*JM#M12xGiYttrdC&9ADFc_1Y!@*W)DLAK+kb1 zDLylY-kwxWOUPCA%vXdXONUay;%RWWOSqf#F2wgrKo-N-vE{Z7Fmk#9JhWKCvIV}$ z$*IfOv`gi}JUbU&Ck`fu6OM35U|KG}SpXr54)Ecf9%xUOqF)8JaK9@IG7L0eWTFH) zZ@I&Ve^lZB^+`b&MN?w32c|3AkXg(u*0x_6qPwpk^q4~3nzh_xvqWzE@Dw^@avald z9>L_8O}u~0npTBbD^ZimAS-Dy<rId)#h-ob<DYq~bp2euO30KqT{i))WDqD|IKgBg zzb-j~9(mly%AJRCMoS*ZCoW_n{S<obY(+2Jukx3acxJCBMt9t6*hL9T(rGi}8Y|La zw$QUVkT`@>H~-4q8<ZiatAL9bF^|=5J<n=J3BKx6b<DKNhV0~W=<B6eIGA=6N2v%L z;u=G|FlGZDRqtiJH~vS_d53fPesSE6P!Y<Ol?X-3_&nzzJF+4~M)nTL$Y?8-_R!Ma zDs6f0la{u&(jJnAZ$o?i?%)5et1F+6=Q;Pe-|yEe2AmwZ@1zfBD9%bE{a;&P>xBWR zb1?~54wA-=4rQeOWEN}r8v=t<t<m+XG#Jd##92K<kwTlmZuC_)Z)GB99XO$#T`aun zOu?dU^0<76gxpvx#x`z(LF+OoPv*T)7I7M0j?G2e74{VOYXZv6MSO6396s5yla08% zNr+ZcBZ+z-?lQd!?!)6Te}iLF{vRvuAkC!j!*nrW*kO?G-^6r}grl!>HS^rpB&^-= zl11$P2Td1ksdK6^ZhL!ATs1_S7Vtaz_HnzIn7<Bcvl>|^|Gr$c(}pX9j<EgL9E7@# zeqx4|Iz64ovkIrXCE@Osl8`ZTxMO%Q2F}z&C;J!7x6qT;`n%#Ho>6_9wi%4)R>GRD zEF7gJgI0=91;dm%oGtnfs&=Yzhp`)sU8>FgJM)ko>nx}J`?;q=xdFlwy1+AG9UHi* zlpd{O08gSw)@m6mt4yGVL-FuQ_ZBm69!A^B1Qg4-75(WIcrf3Cx^4ag3hYOxU&@oK z#Ez1DGO_jBY0-8>85stZVgH|e2A5Nz=3k>|PrfE*mxj^yrcU^z;6N%#y^(3eu-;eW zXir5R?<FJt2ns;Q(g^ss)q;Z4>cvHk&KO)i5N9cIUNS#Fx0Ph0II)DNq6VyLB~b32 zK_zlKgiXt&@ZQH9x@y@9F)MA^%w00rHQp8n?zsgEKI_o=;23s4na>kfjM?p3>XN^f zzGU&aH*UJp8=YE|Va%Np_U8(JHrKhr>+SJ0d{G&-XuN`BLqq9-RVTC5FvIp3HF{C2 z3iFMES(f>6IGZ_xJ#3c2<hmX3z1mFVp-A{Oe>3|XxE<cV+yI6Hi|8?DUVJ`MPTngx zPd(6`?{w=SeB?7IblC-~2Ha-OpZ5r51)ML`5kab<^>Fu@HH&&yK`YO`W~t9xVfy_{ znw0E{E7i|P)S9$O)>V;qpWOnU&;LT*;dd}#lN_9Qsfm*wsR{QqY_QlU7)#Rb3a_Q4 zCHcb&U}(YLrZW>V@LxZ`o5uG}=6uYgxZ^%-blV6z^!WiS>5!!stG6zzkKcfT^Y6t+ z0|uin2X!3q(`9?+?qL(Jd(a<yz-pU~P`}R;BVW5?ef@0~|EmbU8z~9DcGNX3Tk?aQ z_f(-5M-^$}$v(V~c1Rqug?nP2RM5gaWsJX^kJAo4ff<gUAyjV|9bWjA)!eSY9UBAj z(?D~);+x8h28^fm4?2bKrwYj1{XKhZa37u;6;N&L8}{LLG~JDn(C5A>P{VtW&chO^ zM_rBf7rhet%FB{p<6?*`IRiU449BGlQ&3ouLqo?M6Hd80q1rtO+`N!M!wl=#i6Ax9 z9k`yg7Wkprhp?vSif5QZ+icdZI2#;FR<N##a-6;BjQgIhgIfi%bYpKWuB~_}Ib-vP zd8|*v*epXl7^aK=(#KHCYdhS;-KgiAr{VjhWwbC*g2gG0czvRVu&Lh{R<N~+c^()+ z``?XX(+|#qB@PKNB2U0&M)p)aWCDKPzmtW|>m{}bQ>kO>0~mJ-$+p0kQU*O_pG#h` z)3*oGmiAPNo}UX}x{FE4)C1#3^6%`zZYI3?!8x7-@$Wnf(vvyBoEFxLpHF`k_Grt} z<uxC{ediK3ET=cSZgih1X$Mh$InUGy(xf<|4T1|ZBmo=pn9;XlcI0*fEt}&=jwd|m z<fXlkreu!8PM6?;01fiFwGECb1W-WMZb&wdCEH25xbDzj7?ipgzAt|a<Nobt4__=6 z+O5-Z?f4EBePAm6JgOqv#2*r-l_=ALohR8^GDiKD<Kp!Mcj(g5fJF_x$o$_!_<VH) zG;bRY`k5`lId6UJ-R??({~}Nc4zQEl&8n$b$abv$4HMd9F?Ha7Ol4UK8XW(`?pGax z;D`4iRnUdurX@JPWj))h983Kd3Fu-vi0<ZYf}4X~aL->4wpiI0O{s_uY>%LeaiTCR z)Pru=rBHOnG&*vavmf9tgv7~+wc6%5BHf*5lGU2_Y~g(aO*fJ*nutd>ThQFUJ~VEN z6;8GCri9Mnq-0Z$dE4iL>e|_`De(XPPreheQK8h2{a}8$9QOKXMiu)`x!nEW3`<^@ zVl8JR2Yasq>Cw4p^CXo<l*Njozb~#iqsV{G4DMC0W6Je6#Ze!>fu?yl%54jzW3{@p zBO{2dvv>>_eyg#yWuqWAtOVha9v1)ShKZ@xY_8*7=H+pj{kP4IWEJMXs3oy@v!pM3 zTqjLWV~uG^f+O3IqJ=X)$8wLeB7GRg-Aa)f;NaVzf$199rNleF|NdjCA9S(b+)z5; zGnhUlEfA*j&e7wz&9KuepFRZl#_n}P(OTy{m~x+W;?yX~{`n?M`v%V#<tS47TNPNI zPysda``MUX?clWJ6&w8LC1khxu)2eN1oOt(P|G<qGmA2D=&Ex1kNerK@Odcxk00r! zT9U)r0&1|Thl{+IHKbJ@dmMXV>IEs9Hv2W3&$(+|+P~pm*hsqcCyS;hhl+h$`*5DA z6|5}vL_eO#KjhYnzI6;nYkB_c4}T)mS83pE?=<SI@9z?5u~6)KeV5_j5XxWhl`U9O zMkADaQ;@tHX-K3|@vA;9ikQVFN)Esm*ReS1eJ{MU{tL6)Y07-}DPrIGEnqJ{gp3#a z^7EV{B%gGq&lB}%(-kv%bS{a;IZVW-xd%n(s+sJP(7;Nk8qmh>IJ7>ahc=HKg*ESs zurMhE9l)Qu_UE8uQ!O*m4ab`coT=w!K3%cq4meY9*#7h!+j&VI1800^s$<IV9Cv0+ zpL7~*KTpJskNA0gXg8dkQQCCHwh*(5kHN_Av81x>sJMHKj99341jfx(q|4(thxDQY zyYe!WUPW2bpBIDS&A<AlZ}E1tFkvzl9e>U{qMS=TP!z{Vai6K<N9d_Hr(3EAg*me( z<0+r-EPl>4*sr7`Zsi?SkD7N7cuoTa<3K$7d=S?D8VSBDbST7o7iWTAhZ)_~Y><v3 z9+tTRC*4Nkya{%cbN#k(#U~FwwVh<8UZbd>R*IT$<<kJ!KJ=#YD-#zgP%(c_r+)Hg zF%|a60`9Y7RRc`e?FjObUN|X`KgZ$<;i51eUkvY%yb>P5H`xPCDx*sA!XeI;uk*v# zHBM~gOId8@xfRVD{pil(407;|B|$xpG-(7)JReBYqe8JY!;Vas*yD-iS>zVN`yIU- zVOOIys`d{czrg;aG5I@4FMH3f4Uu9+mqY2+t^Tm-{3u!^J`#V`*pX{Y39k1UN;dv- zqFqFT*l>0j{VmqSmIb^kY#79*4BgLCY_9Q`fGtKeuN13y7UJKkN>=l|9QEg<ptH&d znm2bMWERHLxk2?}uS1*Jq<z-7@>)KXF3-o%ljCqc_rf$)o1pS%eUdKM6a3F#5nmn; zfPv4RfrHj&asHV^2(q(dvu7LA=0s(DRGtN1Qznz=>RwP*vr#<%p7ZoSy5ghRoJHbd z%F^r?G6g@*-dW^8LAFD1;NDl_c#BhP)bVJv?6Smynny4xc?%oe&H0@h`rzJMZ{b9N z8T8@j20LFx^c0XY(6g~K(ty7Ao+H}XWzqxxv+R7IBjUMfxpV?n!q9F-Y?XJ$kzdBM ztA8_jpG*Z4&UUf%^D>-QsZSR)oW&!Z=HypzNRhJ1C_n9rup(j%_IKY7Rr)K~CjVz_ zvhF#ST3vw!cS13M_vF12+951+D;qWBAebp?;NepRm~3SVnZ<8}^W~|i<uQr-ADr;D zaU%Bf8H^V+BdMnK9f-$Y3!e9F@kjkAnyxbj8$S(ZT3yAs^j;Mln=U1eewoFyyIa8R zPL1e&H=ExxbP0Wftf_6^JBU_~#BdiEOe?5_X^FjASB*23FPEiHn;Tg47vg=z^-MGv z28-^7v0n{F<ZIm~+Hg;<La`ryy^@8}Jg4(nwh;{S3C902hVr=vw7nvoq%%*0qbbjb zcuzwMo+&-DjPLR$U1#CVzW8q5T{dQ;3vHR)$gFy0(dL93EMsj6IDO>2(~M*)eW!&+ z1H<UIwG2H^Dd$~id6M!rVR~<3;ACVb4!?buX|@i=Uc-ag7EKupD89mM7gWK=jK6G) zFb#u-D^kc&ZSm<w9V&k0N^Q2Q1ot25?1W=5$S<<QUZb;c#W{7R@S;;Rx%CX94$Cpi z_H=gfUjSKguG9|SA#AqvTK0<Lyw3hv5BK~NpjVn6IR8{YCDT7FtZ^tRj<lum7PBDX zNhDr<<%qw&Jcr%ORU2P(ZrSD0LojrG5Pj%WWA@U6XmYWv&{MyKwfrh%)5`qOXd2J- z#VeB5s|h$8bHQoZ9HCtKGgDq@fZeOw!7*Tg@XWKAWY77K$+Jd5y=plWY%^tc8xF(n zi<4N|K10m!a|rIQb;KO+m%`24E;u}Fj+omzk0sSPK+n81yz%}m3!68I9`t{~l#)Ea zp{N>W&p05?j<o>wfbHzhX5RZ7my2$351HPe0Mb{`5vC-ULBi!g*5RB_b?@bce>?6s zsqEtUHJzb&CW^bhHr<3V3u5TTS<YRx4#DG||3Pl`7_?Iz!1jNhz<zc0FujI+_T<P2 z?w$X|f>m3^hPG_-@1IB;wfZAz*2DNfEefyx!1nFE$lWNM(Z5oeayO=-;t^L?J>Lu` zzepn+yN9gGAst7=6rjc#H@xRzD+!wW24pTu;8o=y{4+tG3ePAqyQ2>D_L(lIlsi*; z(sQA2F3&9Te_Q@|A>HL?xoG1tw0@fjXB771*$r>{YMDlTj_#C<`#gk-TXKZ4X_w*j zRRIG71IR4sIJo*)k^8gJxblF28y99{@T|*1$JuNQ%2dF!c}b0?d8?VsjcHi;CxBii z+y==DFIL?%LwLN$9$l6e(}ulcnRSU{(=m^5$TBa(qn5d>=J9*cVtXbkjgdf1j~;66 z4P!UXsG!U}ed<*;j4q8;VH@|_!QJxvEcZfxoE@-@<%CY6wK?4JxZMojr;J8z&Wm1R zG64&s2o#&nK%!wAME3f|6nb4|5%+(yGf^I7$6ea<zOQDQc1m<wCqcY7Z6VY3dM&8Q z^ujl1BZS(}=}e#Z*~gauVaB}6*ctYeS^G@IAf5fJLYg}w&nJ`bdvjdvbp<-@&az^^ zCaBt$PA^x-lie~m`a3?CoWp`B;YJYKHeVI%P#F!sd6D$;L9{FL6KMIJ6$Ur({eQ$H z3XsZ!@Ok<0ugM*kMVr$<2Xnl3LWk@}{{h30^~}OuP0}5dNbSNHymdYa6|~i<ps}W@ zQ0Q*TxFbim{<{M^UDGh@s2j9dr3e-Zv)F}4Ibwr~9Ca4+9oGG^s55giIIAp!mNaKt zyg(oOc4xsOW`%kmyv1=Z2jcEV-l3Yv`6&ZZDSOCsn80Vo86`8Af)S8<o;T`26297a z8{|qkgUTx(t!<rfNNo_px?WB9|JjnugMmUsrzO@c*b6FSOfYX?fXjd$EgXI{hLSip zPyX}-s7UWe)m~w+C~1t)^08VlsV>0=?-4Y+mkZC(R}0;XG-=(6=}p@;4}o9#P`Y<{ z6gzs|O;mVZK)sB9fsA7k<=ol}@nZvNgXn|ttA2w)Rx|UqQb4V(CTycr75kf=L;5pT zLZyWQt99;S_f7RtO52<K7W-k_Mv)btoxvvW4nxalgV^c@o-uVU6Jt+31<k5KEGSS< z+%-RsF27$adi=i2o^(xMc@OpIPV5w6jIOniVVg#|H)o26?`;s4lPL{*5)Egj0~MuY zliP3!{MH)_U$4)HsP0h8ncWW$emujz|N1RF`Km(!Uj3VfADu>uZHjcbJez#{UI^tG zS!fyM1itOPnjR<1K-r{J8hufZS?IW9^D-+MuAmCXzMlhEC2K6IxCvcD0$4wpXz}oz z6)-`kiq#Cf$m~mfP->AUroXu<Okb!A(&l5y;<he+Su~O*o>&Q89SV42(>12{(G*Mj z&1dKB!o=h+i&&w35}isPLW;Vavo!87yqcViCmTo67ycibw!DJU40f{0QWvx-oQNOW zvgpkQq+D?ZD@wMf!uJN`A0I~#8ZDSyK`wi>X%fYan?k+zcd+Udd8+3(*S!mOgYD9- z%q8?Z3t9b(RW1m_U+3MZSpSDaNqL)O_6~mcqC0}JRHsqC-d@md&u4GD9zw*=kDPOs z$JW;;F}r7bS=MA3n!GL<6OR*Y%a0cQ791Cp7YD-fMH0NR^ewFJ8cp)b=HRt=7`j&o zIN((n`b)-R?r0sRwW$+k?}@~Am94BT&Q-WF)sGy4kFv!v4Y0^@EQXzEWm4YZlr3+I zHvRn3@`fGzbw7>G{bfN_R!d;(<~>Z|cL3V#FCzQ=Ose?mLGArhSX;W2us$%J8sATc z_dI9+vm*u`gnx#s)i+&+dgVcJs~OcN&IC($!292#$c8&1j5F>)<+EYpMc(a}IVXd| zUXP}`=MyDox6Ws`HP?&Bq#m$j<s^!E;X;4Dm9fG`P4v!`X5D87;sdv0@>}1J<O-E> zjPqsYe<+uChQC4cbZhuqJ_#$gCO~kj3~jCC_u;Pl;mnhtY;*ku*r8Lwn(GfUHIL75 zB3hB<*ezv)<|Sdsdu!1rT#6DB+%aT!EbCJA2HQ`=(2k~J!;arjH45<hw=n$sWgH$m z7EWKaM{?(m6dU!|jdqkDgj)}E>15yMpfg$)!!7DW%ZCXl{d^1SsN<g8f^hun8b;Gz zn~~FebNYErg^K!&p>xL+XouSf(*175=8bfM+QrKFbZ8x0D<294$*NS}?9P_G3&&T7 zZZN}ob?#+e$4oQ7N@lcq;-m*WZ=pCA72LgP^$5gCH?3*1S}Uwfo6iytCP7qrjMx^H zOL~3hvwojuGNrLQB&Yt2$6K2?9n&BlO)c}tdulZPT$4@z8r;Otpj(joq8cJKU$Cm9 zPJ+{?7cQEsS_IeL`oi!+V)nQXhNQ$|rnfg1&VC1D`_Gl!s}2{|xM?tXXKPGS381@u zc7t=99=R+lXQiKN;qdukbY)_u*k}+%dXs**T(Fm?twwiYT|x~kUe}A1-^^>e=F`k7 zwvWWydrylN#wy$a??b6`_??C9G<p){O|9riomDbyJIxi%TitNouprbfUJM=b+{rMs z5lUw$($SBG_%q=vsO`>YaZY=f);A5feItzytnemzR~MWU;)Lr*XW|Jyd-YjTjLW*; zfwtW^=1={pq%s?AH%wxCZkf~6b+&luz++hIXaxopa>5J!RD2YWh8-8~!^j*hy!h=p zWV@Q;QwuXz<{pk(Q6msqtmtY?H(P8q3MbZXf(b%jd@nA6l$w()bVLa09ByN~4;$i3 zb!jqD?1YNZE~w3Cmj$YR?8wgqR(?GW53l;chJ+@d(=#ia-&4tYg0$EPrvS3BFQr~K z#^nE>71=E>poD2npeL&(%!tonOTPMIc}NY+k4}T^1<Qn_kMV5M#xU$m?M=;{Qk3Y+ z8CY+#u)->b27J}U65f9a@>IgdF(DK>Z8$6$s!IdkaaNIw2C~;%g>lUnAVg4O(rz1I z)6}nQ@4LP@B&!#O8{ZI@FIXeK;aQlwuAGT?#tUEH3`LiV1?Um?5LTx0mbp?g6+e!J zwHZ^`-`}bf^|A{d&gq7uY?-j@%uN=Rc1Mii_t+-AMK<|)4$o|A5u14&%2(Lq(sQG+ z-1%iw=A2oQB+I)@nC5^AvHkgX>NPtU;)P0y@%ZBG2)Z%30;;ADp-!_voX+>9{aw$) z%)d!!V4jbT1><q0N(xjhPQnbMYGFl!Ddzu@!z)gw;Pn($eAs11G6S#k?9)_uc3=VI z9vVO$(mm|)>ZgKL?jD%JXSF+jzrZrLr%cD4JBGaM**}!x?#NH<rr{iRYU4-P^rI3K z_?^GOjd|dl_KgYmgTys=q){#P2;17#4rcxd{2jxyd&(MEd@YWRo<C7==;?xOj~m6B zyLaK~<qEPixxqjx13i9^gl(p=Ag{*#1Ha<%yWAT1uE{%mP1W%Eg)h^-?F`woH^O=k zJ>1wJMV3ulz@xcV82`rvI}c72!+j-e@zP{k`S>f$Puj==r6O_L*<#!pYegR_a&dRV z7k24ty#NVoA#KzMoPOLI2VPgBUK+!2y5<Oa|7Sbz<9}oxQIS~ufp<nOYm@ilETQk3 za#Ego2!>jC()JxSF#1{=*qpZ$)|RK^_{4rVB_^J>_Lsn)AYv=$EM-@%_lUO~qA5~J z25(Q-q#4(SF>{zFe6zbDl)U`F`VJjPn(C*RtaJv>aq5GvIl;oH%qYqC{j1sbBO<%; zyM%he_36Z}a(wgG2fv-Y%_<-3V~)abeCHWKIwmW*GwT)D_U{Ys0WP#{$a3L<?md<m zAvV1o>`jwfwW)5|VV4NsT=H<rMu*$aC1(mAfLgr)KGn%#m8YlSeBFyob*K(aTK)~} zPo=^jX?OO%a41@~DUfdQFK9Wajsd-Mu;PanH4NIxEdGn8mE#O3+%XM(d5(LyYdGFg zjKHz>Whgfw8kek`$alu&sP5|{L|^TTi;Y{GQp^sqhI~_snv+d>SLcJ*+G4Ep%VaYq zxZ#pMUet8-1amsZcm0~_C_BXlgAY4Fl0g*44c`u3JC*r<W&|y6L1>D5%o=V#Vs89z z_-o=EHu{()F3=f7GeV`<*jjUXbxn!n+^07=WZU5L-b-0zzvZC4>?4cUxyK@+Wy$(; z2wgg-OHx}BXy5D?Vn)p}Xx-<|?lj$GgU$y+)yiJzbvy$%MyF!e)qHv}B9+!!S!3lh zOZ03{!XnuWbV(b^6Zfvv_eKxX_cdfeR+FGSRTbTzJQHkYYeQI}C&=s?K@Z&?HqEk4 z;SAzr{Mo*U<>icL^LC$r52-PN@jor9DLDomJ!2thT0U-k;0v8OS-8MSm0r|5VJ(-r zH?F;wP1n>zU12aS)=sBPi%jNPXvmqmCb($kFjVkA$XUEoM5|A0#pcvcCvS(EQqFLn zCRuxBs*cEmx4M(?rC@;p?{Bhp=Mo&h@d5LbkHJlmQCK{+7WOn%vEn`3#CXGzwBq<q zkTIDLL(jHCLq{&8Wy<4=*8NTAyInA&F^_*n@}Tg;f~ND&bD={rWMqiK&n16^ZI%a{ zYHtk`H{8Anw+FR=>iG#6(QYR`_#uPWr*<%n`p1&D`a?v$#1J$ZGZ#8I$9zjQ_p0u? zDz3?$!<rxE5ytq_P7@v4IZ+vdCOiY@<)hK@xGP)ZHyTU7{}9sT)zKv3JiO9B0&}L0 zBdyhr>|l5=6s^qBJ1mlh8mX}hvR|3m`8UGv9Wj)r$DLb#KUw^_-q;@W11wH;z`rrE zDEgPsmI>zMuV&9mbaugtW&UU$j8vD|ht3?gg+K2ev8KL!_YiL&YCXNq*~5d#Vvf0} zP*um=r3YZ@M@^hEV4;v_z8T77x#Qgq*r^FuBwOn;P)y#&W*+h)@6$d^=7}4e{osk? z_50x0p#d=cS`k{8uYxVZyy&-@9y*@A$!w13lgFk+p2u*+Bi+_0b2yZSFFeF<k}@S8 zbHemF#~7xZgAF^5!8hf}6hHLQ$+14_!rhoj^zoV=ZGSzPelD@0PhBrrsXFIfspjH+ z(TSF>`XW3vKF01G@TH^m`7F@96pVV}u)Dh!tmpq?hr2X!rKbW$`gy?X_qpspuN+!w zr;gDx`EI3Nf&OE@RDWS1IP)%;pDbsMX!5yOB_2CEGhw3b5Ez$I3!w@l&_AEgT>eGW zhi7^3^jZyi^&S-MvYa)BEMg(=t}y+p9(bX;A8k{wpda7(yLHe8sB+pQevRYVi#t}h zDb5;S+;3;?M+?wws{$SwQx3iFrQy27DB2)Vq#IK_*hWbr)-_q7)Oj;fuydeCpZnwT zzG1NLsWf~lD}jmcMB%i|B(f>G#IiOd!1j7scsbIZ%K4tiBt(v^BBjW>c`0*Kdk6g@ z%;>H6AND5OkQC&nq05#d@b+IX)R>!r&c?Z@wBH@?^$%dxsafduHUqE9tKl9|2CHfZ zF?C;4<|mUQJo?<hW=-Y2qUYz?RsT<LqOy$obEdTk+aPYABZrZXty$uA18n~4!FDC8 z;M?YdY<JdKn0X}>Lwa&Z)<hkH<crB-@OU&`cY_tqJjvwqJK^Zh4)M@{-E5DP9_`q? z624y8%eHT^C*$4a<Z|5sL_YI4rvo!y!87)H7Ifz18u5bPIA*75gfSCZn{K4W&=}7O zEVmv;N4E@u?u%{WW2K#9<+a~z@Xz5GTxQmE>`x1HX2@ZAr8=!$aYS5s#|dyqAgw>{ zLe_)I1@{y0p(ZVe-mPd6N;CS9e)1!x@ZJzTA75kpT`t0_T7TN*{s{K14#wz^6EH9= z6h<km6~Fb@KqDztN`?Z-+dEm*bBv$OCJd)$n}1?~+bx*#%?S5IbJzUbfpl_!2bQ1d zi>d|Rz{Js*bXw#29D9JtE^CKho-bg3k0JKTtQFeB;>oA48nhhoq(f`A!p+g4B&Rf) zM%d<2hfgDmRWpT>LNA)myQqr!^Vqx?J|mpcgm$~}%%Q#)4%k<KqkSGoGPDgy5~)gh zE7a-9$SfQmw+9Tj#=@fIy41Tl2vwrFgS4%i>8Uibn>Qz7gl_^ybRwp1YGMJ?3NUpd zXHnP|&_v(UY|P>c+Pi%+)r>ZxH>e4l6i$fuE#=trDYIDM%npD)H^l7wX_%OnOV9U3 zklL9$P_?N{aM^l;bxiQ5CM^eMCe69-tM%zBhSTVdR<IPMFfVI1>|9sHUg>&Lwj`Z$ zlCtnz;9KELYo`z$nL@*>lVOK$5-D6Ypr_wTFnP{GQDfsLNomkNkmld-9oPC(@ogO{ zkF+5RyT?M(e|&yF<bdfffrhMB;*P}0{CS&$O3Dg2CPtGs6?vetiW*kz0vF@ecH9{o z%39)%u+R(s<hmjTUH;SKdp&=AkXs7T&q`^#@mna}GhIlTA<&|J&!ErS(QMuUH=Na9 z7UFw*)0;jPFlJs2o4e8m-!98#tL+QuvKjAk)%T{*=fz}sERbeo$kM_614w6btFYs2 zJ*=Ob3uPw*@ZRQNY|9^k(YX#V+HSLOUhx9ct>#`?)3Y#tgf;%vo&#11s`&mw93^!r z3Wn;+giEJT^;#`FXFiJkF1Ny*ac9_sB}No>!Gm`pbLn2cOK@!F0(N_y7G94r#{Zs$ z!NFCTxFzP1`1VvdN#$P@JYTP8fBz)l&U{abPV8Wx<6a56KYok5UK!G+t0`<pLjbMz z48~S{5%T+`pu+ll%#-KOa%!$ItyjCnHSZ?S%?wqlbl0L|0gZ5xcTJx2yO`HeOGW#| zm)PaVD-h?m4-TyzNh<inWlWO;_Ihv&0%DA4_tN|9-@ZQdBw!p46!R&?+!zDTg-}e> zQdl~DH9NQwa7*$(W^6fuYQ_{$;QjTSN%maKKjuc^m0!X0MlTk<Je3Mdj9{19WSaXl zpTal9K_=h39zKu4>zCg6J3s|}Wn(dD)l}Yn*#h4VMK?|PxepqfKZxsfd~v2^794vV zL$8)g<IWL%anf}U=n5D~`WH*secsy`ALmaBuA3nvLLFzGl@$YiPGajrjtTvn3&n&8 zQ_i~|kC#%6al<ZOOmp-V2VANbZ*XSaz~L&)^>{XY-YZ9vIPQcvP)u5!=lmmm5^XO` zW_@A;M5sRBlrA%e%?)W}ac$Y`%DzJUwUv7tr<{a!Bu6b{j<K-yF_KG#vGn3wFEVS* z$DTpgLD%LatWVWL7%t!@->Z<jxi6h~d4;|248#+@p43%RieHryLFHN}1a<n;n#q45 zLFWN8*gXQ1c_z|Sz6Ww!LWH^%t&rEWOML7bh27rU;b-JAruTuf90S$pfno<lKDr^C z)*3~NKSyC%x)~NJPGs?^(G(KrL&Kig(4>PcumZv;KdlYSb+552!#LMfDU(j$<P2=P z>+BwPp9PoNv$u^>D6@;Rql6=Dv&lU1TV526bnk+&MSg6@#4k+gODZUM_6MDRvF!fX zPat)qlr`>}M7Is?F(`W^J)GD9-_u{ij-{K~h5&gwG4&`Dej4CxOIw`$bqAOa_oEcm zLO4}0g+|-?lVOTHzAP#fQ|Bg-i(Uo|*<?!=au#Uv&4Rh4D&gsaeJSvS2L>JH{<G_2 z$xP)uOn#9~KeGJLi~sKKpBm3U2SYKt|5K(ecEIQ(;nZ`u4^B)O14<@A6!6_p7&v$p zYcBr8JW6M=^Vz%M$gJx^FN^;`CGr}(dBXuGX^zEvjxuPb9V;H=cPw_UDVzayl{IV{ zh-ojS@t1Bb_|;pHW`;IxFn6UfPNlT)Svu=JRYtq~&kC!AB3#e!=@)BpzvRq&@a4#0 z9G)~1^L|W4OFrNHQY~g*y3^4;b23Kfq@&dheuliEOYa;^X(-R}1Wo(Gj!cuoZ^M^~ zEsfJ?#PCq2bkdkE#zYG-YbW5?>R^()%J&R&FAFwu#ZZ1f4)19u;nq2CU{Y7piP_x~ zDBLAPyu#h58h;Lok#FA#PaU7KxqWww=d?NZ&bcq9m0uASb#`%YwI#MTw?LeNF*VB# zrLAvHGJTyO`g-?{Fy_T*lxZGB^}h?i=WQH+Hz?zZ!DCqKN<HRKrO}l3h~K?6EN4X_ zL(tE(TdeftbDtr1jsEkXw?1*go)LYps(2Ag`t6Bzr&kFX2R-oCk%J6)e*H<;7`S%m zBB-A>BOTkDP;|)}rJmem9}Gs*y|$NZdVf(!<oPxwF&~k0zb?eogLW9Oi-A5kYl|w( z*4oax-2%|K_Yj;Ey%l<$X%u(AbHevaOz3QP1@@g1jc6IjclTwab1)xG$2nlzz79}| zHwL|JiF9B<HZ3TZqiNX#FfaTJJnrJ&pIzEe^JIx=R9QgN@6|%Uj2KLxkVDve2?U#6 z0?Uzu@O!yEd4w53%pYTx+M+BP`HsSohn#VI_zxlY+^Qzkv&!U>VJ;jvS40+dr8q6G z1fPuSi{Hb?(s%_;boX+msQs%Y3zh}bx)~XG{*e|Xe@~;q*-F@RO$rxRet?3ZA=ov4 z01dkxOv}cbu|Gy}Vpi+|I6dheTP+7Pa-APmAG^<#&ndIIlcRYitS|lR{gwU5>S1G6 z>Pl{O+tE(p5L;@ahbb9pkY68()3#)ias7P0TXP{#&577LSC103Dq+-CM>@T@iaGRd zfQmzZ*cX#V$=;EcqR$5@&}@^%o)&d#J#2t~YG$$+zf5X>GKDoHbA|w)H*5O3(%XTr z1k?FRSbs1N|NDLl61@{JrMr*VJu8YXuDc2mvqMoyeFr4vmkIH_`zglc;RzQ<G?-At zWPi+(be-BHF7+CRlULd^j}-3A_+(3b9SZZ03Y6imPtUg95>M~*#+UcqAZ>Uu+oko1 z1)0_}zf}`yQI0n*7?wnHW5dYZAzW1beG~rhU1#2}!RV>sg*$s|utUD;wDayds2<TH z=5{3DkrX~-1rpRY?E*g~6DnLVkbc)4f^o@nA<9-A)w1^phCjHY>-bLA9GQ!Hbv58H z!xgKfe~I^uZn6g5H*7^ho!EJ(0@XYaoo2@1o(WFq_rw;fXO^Mu*M;ov^An7`O~|p5 z#C_MT#J*9_ndET-=OP7Rx8e|TN*IH2!bR8{DT5cp#VkEqiyBYMkoKog&OYkJ&kdHK zzkM79^1RaV#6a>KGlH4dNGQQlkG8h$f=5qVAhyH~tNs{Z`s_gHEaCq3+9FzfJR1)O z7vcd6{x@-MERGvAo7p_BhOpQnwDXh;1?-bx)rAT&at=VFxti>*s}erHVGn&*>52{N zZg}!^ENr^=Q?l}U5T56IbCqFbcxMXdXsuNxUA3{yyqt64*F=z6=0;egUCl)ui`l!3 zJEDb634u!h>1^4<<gcjWj0Q#O{4oWxU+O~mvMllq&84c4ElkQ#iBga5V~0wn(i^)T z)@~Dme!2b-a=e4RQ!QjNGn*k~R1r4F%@K2WKi#PPq|munpT*ud0fq@{n2pj=-p?@< z-)B5wawa;Mw{i&P&wl_aQj?i~K{04Gz69Iii4tFH7us{o5KBJ~=e`bO>PmVkOf@|( zzUJtkmqih@ysd?;^&Ux@MiumXpfhcr#@~VYN1<O#1at8(rp-@{DC?{P-iaQMkGH+% zUg%k3t#2e;&ncpZC#}gaCmOcM*b0jSTE)Yrx?rt8Pi(CmMADOw37_*uv3pY|(0SM1 zbh?VON4&>UuOBW@*kDbmoS#=%y8>)RJ{9ZTzOb_NY#cH3x?pna6szGGUoD+P_NMHE zu=UMIys0piT4Zy{f37_C30B5G>ZhUGT!0z>zJS`DRgkrFA|wa(r`Nf0{4RMj>l(aR zSiN;C^b7KX(4{AZc~^fkD~SnyP8~*#%~jyHxrm~Fe+1X#6RD4{IsVN2k6m2a7rj2@ zv0JY!spSyQS+#}Y*(yUkP1!VUm<&4PEEk8E{br$hek^F-9>H<%T=9D6WYSRh35)Bj zNvhCAXiZBO&3&S2Mfw?#qy4Z-!-e<Nvhc_;RW>70nZCz)(&g)G;Li_pJltz0^PhbN zmfg#y4~Zw)FBU_SlZLVIMTYq8ayk6fHKSaM&tmJWcnX=jh-K;~Go$`M6A}zzMQ=?O zW}+v&k&41)vpku(TO{kJq)!U#0_i+w&Zu^L0YjcW9{pU8q~&rbOonG(8U;)nd>0a_ zjdh>5A~f*Ax12ATS{Fc@=jC#5KnThW(Z|u-Tq&yIDbF(vVLE5VvE@5%HaT_eU>(*$ zw1n@?4jYINuWJfkfvGt5a4<!B+S7oHXr})oi*%|!vhhl(O;5hnNftW4g}}klq-DQR zFkh5Qu65<??i(YPe=ZSx&K+Wj`tOCgSx=ee@lcjlHyE#LKVn@KTi`Hf8w@?^0g6*U zi}%qQ{sr@XZn!2YwuMq1@Azvq)PU?kACUVPMip))?9!eJ^mb6DDK=G2*>wlR^4Rw* zk#l?oamSY9I#~*H^1!#8Ib9jBnN@hXVugnSTuX~2o2rTMy7PF`n+u$iE%v8-TisE9 z{S;vbpVtRl8_|k_Xj<qWhIW$%<LUVV8qKSqI;HvS#~vN*zqCi3S13aQ{|*f>O^5Zv zonhl^W1+v@4))bdf+kiKG{HxezRieb%7)?iiRYb0^ZDo-=RGc)$NkoR+%vf~8ozI} zq=(0OH{)$38#k|<yl#%b);^cmf&*sscasA~<Y|II#zA)N%nP<R#tA3aH8J<$uOuxt zzNpC00iPyn<HBTr$~_l|)^ZB8V(nL^+~~>PTNdM;ffnetOqT*m&Pe|B^M_S~dc(fS zp%k3*3QTU4QJt(QE-jA5ji%$t{g5VfJmj8dd+sbbYJ=)-hE!V=M=!kPuogz5;<2yn z;<pS4&QWKQf3{dTFbCC)U&G8}#@O+@mAPmK(@41zd<6QWQDp-YYlmQ>Q8GRWYi0*N ztzwJ61Ys8MffYHt6N4uYrGLB{Ijq4%sBgA})pi9mWKkgZ0I1-Wbq2!!y9ROHj%-TQ zCFt!so_f}#Qvc!`a7rl!2XcPB%dS53J}Uy7W~x%fjy(`tr9ulz^yyX{V79p`i+&qH z7C94Xz_}6hyKMmTYICOMBEE-v^NLxPM^f1nJzAsd$a^M*^g>IC?8iQbsfJ!Oz`Bz4 z|6Ph34q8!I<s8V+vuD>%O~vlUcIN;3)yZc&_6s)^mWn23A+$7)@8m;y7vglJ5N2E^ zc8uempFB1CZBr@MoN8gpy-QguE5Q$+docrRT{?X?o|d05BVg$i`PmJJpBqp8PA?Z& zU;@5K<bKb^14!oN0VqnECiH~q!t+J5nBnFe;cSZ^8lT{Q3#U1E+SQWwMfVjOdI#W@ zkjrfHkNNN{_d4@;GDI_H-djBANYdAblIQRifYGZ!%BYzoT(yFV)!krHy->I`JAoGD zWZ>Z9C1OK`7AD1b(k?qgbdb}cj$jRvT{IdM4M#xN_#V;u;y66K+l>^ZqTyTyzgzZm zz|0p)+=Dp`x6I%!;`D({Zvxd_N_&l^vIF+CU?SjtlO?b)*qj#lTxXt~<7Dg-h06!I z<L)zs60vlvVDQ3^%+@|&8E+iGVOIfaT|uf<lhC2rZjc)_l|{YY1Jx5%_&ZRbLuKnk z#kOSvn;lKZQ^LUP(@~bE-yjw~u4AjyufbvsXF7eZAG$Vg5c7gk$zaY$;prn=_&v>y zV(gwsbU81g!qynaOo>A&w{%i4<j%VDGohv51lDR;h~86Qu*d>wTAXqNoC*hEZhvDK zep{Lr{JG1XuQKJ&$$lsqsmA><4)8Jh3AnTxvU9?Bct6^Zs=vDsYp#Qz{O4Qjxxau0 zQd{A27&6G8KNBWX$GKRnpQeagR>+dL#1LHu9)gld|4H;~xKsJRcCc10q5AeRTAFN+ zIqgbhbz=@|iMON%({)TtFr-`JJhuAyGuZlC4#N~~i~Iid!*+h=%1{kv6Eb;!bchbx zXun`Z5nj|Va|&Ce7fui47m9x$zh}Q5jK;g;eDM3G<FH*Wo6J7x2`dZ|QDK1vRlFUE z7cazP;4gX5(9A>q%AGLl#W1S-&Aa5`vp{E~50)L&q3GiSDRSIA_!_H&saF?+^2Tb2 z_qW8_Y9-R^|4Y2``yQ;3dnp-QYyfM1YT~2dLi!=IfhG8Tkxcutia9)}6#Gqh35zs$ zic4eUaEhNiC44ABi<SxW+D?=GiJHseN{5oS!4z!e&PTicdtmt9aO{6%0Sl751O_kd zFl#<z@dtP}W6w7@-Q1tLl{3iF=Pw*Q@CjDDa)6rM8WM*hSJ>h@1%qD-O`VRZq}jTY z?L4DKx0l>yrgmCzszsK)-l2yoSsUT+DNXWQ3uNLp3~Oc#pa)&qY;Z$8Ec9_d8T*Gb zyIh@V?zl2$r6NNYjV2MzoQhNXYcrpZ3m|FuH2m77$^O$YrgC|C>^<raOkNU&No!Y& zDS7{iC#vgU$f+Z2K<PDcRjvy;DyZP5;7Gjn$`{vOSk5l%bARS)1^RA2PAFeEhslJh zLa$F{=yrKGv#3p?8NHT6Aca!sV_toV8_)CU3ixJBgSf+EFyuKFa+ZE3nqH6L`H){? z>LqX5@lznJn})35!dteiLmIv2Y+#dxrEJ?f6FSo?7fNc)Fy)mo{#@#g2NbgK(ZwQG z+mwb&V{EW)o;ka<_as}+^Owb~t6=nQUksYE8}wbjg3B@)R(QXF`X<j76}RZ&e<s#g z9wE=&t^LTh?^u-U=tB?xE@z`J<<hCgY8WOn0Bd(16Eg=?(0~A6!CUpOc=uU<I=&*7 zf;0B9d5gH$<%%Jj?42RJZ5`!o$rQ*(-W~s4Eu}50@8C=2Ea;i)Och1@!7?b2wn)1$ znRDE)E(j#78;2LCOOSSyumS7tLHx)i?9NXOyf)Mjdu|)RWo38V^xXn)ypZ6?^24mD zxrZIPlZdDLm7u~+&QI8J7%HL;!w+MT?K-8$ruFHDeUDq2(zO7rc$`afzj)KZ;4qr- z)EQ?w&w<X?&tNKFg3-#OXxSrqT+fv0c5pFHe7lN8#(H7$x@<bOyAKXqI09=r^ys3! z7w_#)qr<Cv<F_%TsQD#|PF-8XrhGglxCHy+V%;A2@GAk=yR8-1)igldps{TE>{tx2 z(SS=l6Bn2!p<kPfDKLHnx!pV>%vo?!9QdM?^{Y!4ht(*f&L_?#uUjA*sn24|;|l19 zmOb6nQNd-sZ7`Xq3#?Wi11X2`ux7ZdkgzG3`#e9w4egmtH=CVkyzx~*IpqM%Pw8N9 zOxnS7XD({+-oPKeZyf-I?ApO>`1Bs|LH0p0)@&lW>EB@mwrft#R#%`k+By&vZ9|oI zs(2{GlTLE(Mo79p)<(&(iT%vz{@Lx4Mb$oZR@xE=kDW~Gy*CPT+O6o=3JZ2hbp}g$ z_l0HpbqgnUoPe#TAF;~Ca-_K6K705i1Xpc%2qnMOaMG3xJiFjH%zgWmZ7}Q*v)U`@ z^?XfHRW=fT%`2j@9}KWO+m1C4E(P2cj0uZ$Nn%jT%)U;>y?*Jqeb*m$;9U<~47&kt zubY})9<ODaw%4%vw^LBL$%=Z6vf%R3q3lv+gyf8e6`lAp1S7}rPP6cvC79lVor{dv z%HP@OQU4zD^6QvmfjZ1pGD2-B8yd<vA5S_5(uo<KXln79#kttyBh@IHAzO;+K91Pr zq(qTkGMI46kq)Fjgu(ZmC?NU_tb6xFm|G@;vovJjal9FQ7;Zw{0h;*4;EeD*x{c41 zcJ!r>1!{EsWSh)Jp?PvK-I%M!a0+*%26nRkcUOs-x}$Kkegt^s_Z7~McA?9Gyk9(O z1uJWQ2UAx`SXG(`Ri+c!;&n;b)wrMi8!F3!cJ@GxvkiGnOT{PW%VElrfwV`ifqnQq zm`?IL%jsMCqFw4pT<%-IN|v;PbP5kBj+bXKdt))=bvO&G3ltQSBA91D0Z#jLgB6wi zf$03B?B!4|3|F$HLhnG%d$1(Af6~~r(;L%P?1k7)1=KUBVeReNWVfMPyvQ@l$xA1o zl<7_I_TX;7(z!4`Sc0ClZ6{6Lt+4Zj4z3R6_tH_C@K^Pg;9cl~V=brPu3h`tBCTHd zZu4o7S{z9)Y#UkIM+vo^%*Da;ZRw!nYIxlz68?I-v8OM`;C9b&cJSIv_}h!IAifv! z?c!di6AG+6qzE_b%DU`mw})q|EHI{fIXm-&GaDoKz_FWg^vR(R|802BeD^4@N9Q;z zzH6DNW5}5y*JWto&>*t?^&5n9)|54UDlT{L4P9%$z>mkVDAm{ocAEn*WA+$2{B;6- zep1A8^{2x1JX5mhNQDrei8QctHoQI&Li$GXAZ7XzMh>vVhr0IoHCPTaZ=V!3cFk+L z_{JFmVsFE^pDLJ}s({DhJ!#zhMH1<amaxk6vhZmAHki%*J?RhTGUuK?93^f=kImKS z$Tugve^>!kuBbpi5h=QF4qUJ;#)fUTK}&g=prgspJ(3K3u;v?#I<Q{!yY_<nn@8dE zpx!X)&32gWnoN6j`m!HNk(5;A1q;$E@JZTad~A9FJb#to#nC;W%sV=7eU>sED1&BE zSCC%(0uH=c$>v>45@qC*sYwz^m1E^Oe{ulI6~yA;76;~cO`i%+_ap0@8o0mC2pzxo zh<|s5Qnm3`HuCRExF5AZ9J$dN6%Onc%ax|k(KBke^|XUHsAabJpw|iBku$-Icc;;R zU)Df;BA+S#TxI9?`k~vNKjM$^yP(+p9P6yU&pL<fWNTZaS!`D+d4HFX{?%GGOzr}! zsTzkP6Xd}2bva%4=e@qL0*G;!=Q)MJ)H-h}R@^FM#>YO3Q9pa2@+s#$4T(Z}yo3E$ z(+&2|1QHVyY3@Ehx>Pxe_U~IH=BOKD!pb!E_H!mlq<^r=k^P!9V){ac@DyZ}<8gJK z5${2dgyseNAtH4btl4LZzWL=8Uu?$O$Da^duO;BBLw8u%l5{jt4q#oJ6W&{YFpfy( z{ZNf<aOhn;*#tV_&qalh{*C85`V7RXRrlD!h>x)Qf+u@1ueZ>+JPzAZMaK5cV0B+t z2#TND#mnaHpt8J}l4qP|2GhPVADtN5?e-2He%}Yniw59q`5YX`T`CXu#o<92O+2im z1d)c1BoAjz6M~M0VbbB@<g(6>uKgK9Gi-v<CoO~2Z>d2<MXXr&^oub6bT;)QmExLK zo;5cqg8v={qN7?W^K()|li5RY&4mEeH_ApuejhvQ$`N)YN}7(We+Mb?<LSbxl`wNE zpW*qtYEVoz?T^X9nM?V3J7I<>W2a5-C(8KmK8*$}l_N{9gW?1Q#^fVnS>=HUTq!ES zHMuS(_x!TZTVWhpWVM~t`D#cqwqIe(!~M`d{uLyBRwu5*q&5u&@~XTJW_JyR_WC_w ze3-E(yc1=wHic|{C$ew8dbDDS6_qFSqnO%3<Zbv6Zk-zj1tuXBf7=CydJm=*3%RE# zfcGutBvJT37gTz3N%BC$gn}ilFs{f28GpCtjeIK>6<Ok}`lnDnYc&{^_b{Jj+W7BQ zDAPZ*n`K8H<-H?ydgWIL(h6a4p)rX@UH%LjS#7XG{jKo1tO!N!o^P<;4RxBabRd5q zJlwMZ+VgM0miM7>e?vL`7?6lAsW0Gf%RwlyaYl)}6rKL5M>?A$AZoWP(^Hj(zXyk+ z(d<u<@WhRVt+YhNxpDaP>=O8~t%7|F9zb?;Z-Vm^X}Vz%h_NU0I4htx=9kCdv*NHO zyYow-^tQavGC!8B;`cJUCTwQ2r_L1(izk3K7_*M;+4y~}19_HiXIn1pgVg)F;4wp6 zoOaR+OZQhm)5j2&+H)SZEjq;Z&ilu1e}4pfBSvCTpG8bDVX`nw$iY+U7vQ^O63KON znA?$b{*Jr>zy9Un;wCp-e=nGZElFfs?~Nyo<w3M-*Clx4XijC{%1~Q&B@~(I($}7U z;&S<LT=leum6r3}cToymo>@xA-1<<BOcp-my#LSx?le8V1eIs~0cWXEv@u(b3|Tfc zX#~?nrzZB(aRp3Yt|cZIJz*~^w288_`7`f>;ABwTR9f(y*$Qvikcdq7_4*>VVd*IR z`E4@DCTwTdmKeayq+w0}&g3x{H@>GFK7f_TXJbYBdhti`V76;>A<VK^4KIKAP@&>> zw$3d_T))K^?H<XXvwJjqyL&K>Qr*NflqX=~Dk+`?&|ueR-DQ1+I#zRK7PR!;38RWV z=tWvOg^sIY*W<0I-*hj$a99q`y1rxPoIM%0rXPM7afAhCU1h@dr_BCT7Szwx$Ja(m z_|ITBTXW2tjq?hZypQ?D8dMS`GBKms<+t8=chDWkxYWV4_nM&fX3i};twGZpIgdDW zHyf}vh@{O?+^S*29=<fE-u}u=y;u!@oDZYwuo4=(kwLkd8_rhIfMNT+*#-M|@LgYn z^yd%80i|lVaD$BG^nw^R^H)1N{@Isu<wlX!r^m3xXt8*2ttmEgUaEzaC(ga|4|?Pb z$@N>jn0&`YP~`V|n`VY%^NES@)-dFM9G!POm+$w-i;T!fMu-r}N=W8?PNhT<GC~<4 zl}M;$C8Zthp`oEfw0GR+w6%BIpVH9Q(q6yo`}?>4@!<Ww@9R3}^?E+h<VuUwO?$9( z@j#Gwxv8^or#tf8)~-DMIig8U6wW>7%&iIq_y~1L2V20|X(T0F&4#e|A<TJJyn6an zw2rt;UtVb8+4_HQDH-64hAVU)J(#+Pv)=h$saTZuNN)OVOmnv`f=W9D^bg2Em4Z<? zwa*FZ{)}^!w<Mcij2(!xckBQM@!V2F2h`d$inE^@a<r}%|4~~ATlI>hWBZ2S>DD#S z>$WDv_FNAQeT=X+w<9c?G7OaVM6>47c4+asEe*OTI-{p08vjp*mPg;DiizWCUxkDp zc7)LG!=bEm>@j(b8_5d=m&Rhf4;$sU;%O5d$<1gGZ|%JfDvGwt{$(40ryqhh?we_- zqY6-i8GgH^h4<Yzz}w<D-ah*Y*o<8Uo%`;U4diMnNhqPSQ$py1rN3|q<=~}-V#l<< zmwfBa81!psBq!$_+TQ*woP6TOA9_V$Y{xj9j!9@fy_U}Vwqwm(30#qzg;u9Lpj^j> zQ{E(FmicHZepEvnQ-`tJF-tbzG8Bqcy?DO$a=3H)DfHDe;dIdr+wy4&k6-4%TPOGA z++L}yKK2sjo;74ia|HhRFHxQ)?xE|?r^>;beEEZ^Db^2i!g$qm3cm3HuKz59Ab~U8 zx9tv%x@yCtzvyCyUNL+eds4cksDUpIL`p9nb@U!pb{Omo!|NaF<noxoIoP!{3FifG z1jo`*f@PAyJN$pb#*WdflRu5CjwhnU$(b;|))`k#7tHd!AlxKazNTXLno%Ud<y~he zq@<-j<NQCt(w~UO<Mr|9%Y#rDlf;Y9TX4e<EsiK`hl8yzNUA4~OXsHLv64?Jk1^K7 zU5^5IZ*mFkxbKEJBlT%P`3$iB;v%Uv&ZBXCRY~!w7R{NXfi_~UskUeXMQ>K(x$T}q zS?CVBtDlbX1=GpcZytrmcM}|oN-|pVn;taw<hApB_+wrcHa&R=`*+?V?G>JU$#SlI zb;KyHa?nAW5BW4}&l{+SE|u46-Vn}_-qcg{lFSX`@x(!6Y%rXHtJ-8^(Xv0XU&jm@ z;Fl}<u7Y(iAd;(gCP7l1B5r$}#u-(9G(XaYOcPUh_iTTu*U2!s@MlMCH&+oCHlBk+ z9@}Zj%~OzIy#g%%Wx|RO9sK*-2tH4|1``KwfI%J|aKr4UP-!?E$BNv=E8_sj>>9@> zU3~dbuMqY&%*8t^^`N}N5WHg|cx_d=Quk}Qc>3}b>2Al<)aKcBDCnLgJ-HdpZ`Yiw zYko2XG#w{kpsEfh?`iUGy1Iux&&uTWep<Nm%sN=E@|+e{^%m^A>*O=>6%@`6!K}ZY zSmC@4Vk}MhSoJFT{8GW0uJDrcorbW<rW!aK5sG$2V{wu3RalG1DD6@sIg9t+<5nXI zjom@DmxuB#WLPjrfq!<(L^=SFook9o?OS2@<LmHiqXqAu*dUL%J(W8(w!?YNJG>qk zPUf!q^9W4Zf_ktMkDd^L>Q#!!BIov{eiC20E7%mphU{Usl5|GRfM>7cxiqaUYsK~l z|1B4(AkIv1`!0d};5E!Sp$HmvbEVk&+Ylc5g8rB|;py>i9JNOqt$GT7<VSt(SL}<` zxP|!Aa4yd?h2|BeEKk?x7K8C{#$gkkz0{}PI_j}BX+@IoA?={wVQp}8$q2lY>xwsm z8ss*?_UQY<mQ-E{Zqp<Ww472z1JVvkH=O3s?V~#QNA}0j^R+NX@Hb+fw&Te^;&9pN zV7l+nU7j<>2-p7dLPf#oUH#Y!w{~_Qg|q<SaGiktKK_Hc`6HPXotWd_NW0oA@&kjH zq|cc!ptcj6Ke$hs(PyCZlb!WV8hY%gu7}O924h;!E6{%GK*{Cic)FvKh{a#bsf(%t zKQfri0eZd2+d%`*e(r&WcO$vxU%8~KKY+iEn+fjoeoKxo1;gfVsBoPo@}zac*{%Ny zn3Q;$dUu;fOBSl)p}jK2Tiu|<!cUO*Gy*L@d9j(%F#bYHaH01!KJfOSly)wh-xe3c z^#eNu<0p#S=MO=nS8e%`sw4Lw(2lKVJ_7&msa)U}3Oj>G%bGjVj}D2_!08=)uwm+b z(0{X7s!{X8nNKv?cyuTqDGcE&7EajRC6(4!XP|so_)%KUNS?2U@aNe*$nMZwI@lvo zbQC=K{Ex0UtcwEK-B!Rg4@+r~R+9X3d@jz?*(VKGdna$+2I&7hlKYr<;jq{{G_kH0 z=8kKEAbA9<)km>%;!1c^VUGho=ArY#Dyg;oElApVjrNpT@Qht*c=3S9buP)lCY@i< zCtxo;(`klbP5p5$y9$Q361iqZ;?-@YqC2d^7o6N!KV&rjx95PY*?Azx`lw;SynMd+ zpk6rpjCo7OXl#A2#!j8a;Xsp3<laxXfnJH8%E3aaR2wUL!UH)_GmbQWwZoK>jkGDr z2IU`S>}lB%4*2<#w!7#D9~ud+Jr{s#`5<h!!IEpOHvt`T!F#tec>GjN&Nf^D)!+y& zmp!>h%2!a@o5=H*9EL9nUC{EJmABfHfqbQX1dWw9(SkGDXp=hut`BODKH^S3bG_gn zesJTu#zgSzFd2OR1+f`~;h7POy%P=_;=nn#CEa#)(EEBGe;KL@mGcMTHD?1_6B&!4 z$_hME&y-JB*s#9SWZn_|MxGIBj*n`3!g#Se^bhaIUhN*xz9ZAP%*l}x+MlN3Cp*+V zIjTnSBS&N9-`6nRY6N#L59N6mhtRN|_Xvj%g_VW*n4_%6mc|*B;kSUw+Ni<%^@dot zCz*^_kAnLp*4+1uF;}_y3P$|~T4Q<?2DWRV^tJI=eq)-qORY8Ed!0gG#9aN*l6w?5 zZZVCR<iL?(BiJpYE1w&)8S*B%iT7%b<gsTNb)Mb@{;TecJBRefU4837foBVrQ8m@} zxj<D<S3vjv>*#!ogLj*;7wM3}7<?VcbXj<4Gki{x*RUlZ-wxtFN(I;>!j*E@iO;0y z^Ud#h*UM^HcRtY0hR2K;&9*-a*!V;{+|%rWiibPkzBjfUH>E;)F#8}4$@k(}e|w>x zVjrnZU88IpyA;k8M6*MZ0_N)~!zMf_eLOmpSAI^E+$P<DNw#-j(ab3{Y)c(gRgS}u zAQQ1qe+bjP4oObwnmE--aNqSiQr~HaMqRCOd&CH=>wO=hIxmr&c088PuNs6A&j#@? zji=I}-BCDpm@2rZ55X>0Uns6@1yG1FeiIo-TSGU#w<iX^*jv$=llSFzH8FH<M+|lz z_=!HxJWEeJKgm05eoCKw=Sxvren{hY^`XSoCGzu@Ed1JR!2Q0@rPk6OaO(19_<i4i zT^;gaP@p1q(sq!e{imSK^8}dXQ%0RHAC(#wWU}tSbu^-W1sp1#Pr1b@II&~}7!Qf& ztPE2g9?+iKk2i;!KFK)3^lRNFy#RTJK?Gk;%0_UXO67;*uw#h}Mo6(d&iWM9bW)SX zHaKDC(s5Y0u^-M|8P6fT^+<Dr3cPA5tE(-|htq;r8aK@kb#JKAuRWGLc%C`kci8~e z6>q8Xr4L<6oxny-vGDX<Cp=Rd!>i78LuKK1Y*{gd!(Zz0C{>Y}Ts4#YPu!<ntJ2YJ zc)9GK9mlTm#r5Os?@PJq*@E5R$yHa<Fx5U5kBaa36mHKd{!64ChuhGs=p63*N{z0E zH`E8z9EN`TiUiyKAQ^h)^S#g6^@&^Gd8?ms;@;o()1)_hVer#!G~q%f7n>ZBGv~C( zA;VR{(%6$#@<-zNr^&c!i6{2kX^%yJ$Kmndc3hvbmHdX?_wM*xP?P*4ar-4D4g_Uh z()^ZGQ%7RBk_~x;#-Yn)L%!0OiZ+WuGIt#$dbY}_6rh057M_;$R<%Or#n*Ds0v&YO zdP*u48Hm7EWl45-;oAMd_;S@Q`Q2P^-1RGy<$+n~`7#MRJwFAO0kP~AMd0VE%>70z zr|m-&;d<CJN#W-&csWrOkDvQO_YUZyTdY5i)r;qoSRqX`3c`E+hTt5Fwe)n$8qmF_ zB3p$h<G4lc*xzNSe6@WjHe{63jnNz7lEaevt!;10YMq05%%M@ZWRT#It!;}6?NYcw z`yr_1`|*IP4s>FNOdi>v1^011I6W$a`)%~#L-=m^Rb$FwOLX{!ry&M@I4G40S6d&| zA$-qnrO4x%;$NvV?*1}E4iDTUTYvJ$b2YnZ<hLvG8{hBspLF|5j~`8f$G;PKvWYQv zZ*Grsx88-R`oe27w=*y3UjnOJV{pt3CDu;$=TS*3=#zM-x$6e7UO^j5-7tu^uZWRC zt~=mc152>Ah?TUzsqwK9FX>fVV^)7N4qkNHM{1{IK-JKN)BPv&qxnOGLu?FA|D6m< zcNBP{Z5xaTc0n=G2j}QDw5(7YAO4uc>4QCak>+DcueRkCk-ae5ZMkIhwFi3Le<W?b z(S=t=cyQYO#kBe76WDX_9q8W<Cszj(e7*1q4A{03_Eibj<P1ahzPp_+9&AT@Ubo{t z*$!A5<-i_SvoYvqI>kTkTKB2%TL`}@@$p@;oKw&q_a&Hf$z-K^s};KR!7*BJ7zKCh z$pOkY*+t2xyGgZL56QX47@wEhz;3<&NPn9Hwk~R<p*P)e{O&NWRcRx90*X>EgE;;W zHJB$3Xr%KNA`5unIG7|Yl#Ug*(y8*^(&cYmy`2h7>z`{C;HKX{Y5G799%q~Z<-dmD zu-G$l{JP&%^wVARO{Y@Uw0^Qz_mS+bF`Y7#t$A{p1FH|Igvo#1@$~ryn$mBveD}Zw zsbuhXFxze=`k}Fs+M{+nC?EjWf3f29oPOk0I!{&=-OboDmgwd3k>=g+0b2zpQgKv2 z{5q21_K!U<x}h7#PFKhBdp>+*U=SZsm<0D~CZbpGM!8{rDo=7!#_2sQxOv1cxT$=P zg2x<&t3P77R(A!J=W25MHM)Fa$y_O^^ck5CO5^0}F4CP{AK+K5E~O}i!w3DL=rS!3 z@42h;^8p@MVc#m-`4pf^sPLek^2dr%;h5pn6WgZ8$cch$mftO!FCUJ?bjNrp<46b% z{ClSU)~{bQt>X@=ADSo4*_TD8)?FAc=U``p;dtUpJAT}3gMr=y@#BXr^5NMATzjY& zkGkI%!xC-SKR~?uFF8Vf`9`?XD;96p>&sndR?2_;+sdCKe0f}m2HOO9V#BIbc35*B z-rniJWml9j@n3Jgw6qT|S2o1U263djx;t;`Uk%?HyP@&x01)MK>Tf@SiUK<EfAih& z&w?kAaJm<bQTZZS2zJ5QJQrMd<T0#^z5u;VMxgIGO&%zm_w&0tWAx0uP+?gDo5v*5 zpMt9Th4ZgapUR(frg<jK`K!#szejQY@Nqm@c`-C)Oasra6<{i|O1;!CL-xwE)VZq; zhFYfbRgry!cQ#z@5YCNzUde%5oM70ZEt37&+w}X>J@3}};<>l&5{zE4kn{wHusSOY z<M#|i^~G)Qnf@9WU{_Ch7ga$oqg5KR`y#kL3xL`ig#`8Q$?~or?)m1!_&OTRola2F zs==sWjbxmVCL3*T!%G(2kiCarA;+9Bls=EbzPit;;d2bOydhfOL6ZZU1fwQ+F+9eD zvXRGZ?`56xv8mi09!*!nG*7|WT2c)*I}g*Jm3Qh_$6N6*+iWZqjJb(3dP+^{8!2l> z1Xh={C!MGT(rvg(ts{HT+=c$aE3p8+tG*zk(!~_`Ar;k|RJbK3oOdqq#(+g>Fn!-O zxYz$FtY5#s{$E8L_nhzoly-Vxl;BQ%f4`eDoTuPei)Og`O9|Vmdh!I!;UikhqzK`< zb7>#M9b0rI&E=)=>YfP>bX);b<_zZD?(wKyYl8f;9Y0?;m4<8Fkte?&3+)bnq$5sB zI81*Ok7zfVt%IhDypkep8mbT8N}6OB6U2+nXHv3@;MAX=12a}WCjYPp)M1z40mm$* z<zHUN6RphAQQw>s&Wg{=!_9Q=gq2i1a4atG6axw4*HhYJ2kA%2b*als@&2!K<S)+! zseSQzP`s3fi~e)O<-)7`W2h@y_q#$jxAo!)O{?HQ-WPc7+y<SKRbb4VW?DSL7%yHC zUW%cbocs2(6!>WzzWcU{8op%nwDyOkl?9!6cCW$YG_WtM%e*Lmnwx_DrJiuv{F*d( zhz~D0kw>8l`uHlgJMGUJ&2nKfg^S*LkXIhr%vXUmXRgw-r?zN3zm+)nBc)~qp-;NA z6jMD~vhdWzZ|hdcI^`ZxRf<0UQWy%aTg&KOn{TlB&VMlQ@@B6uq8}w~XqI->bQF9W zJN!6x950h53D4s*squg=bt@gl>m_YW5`X8_ySIqDhasvjnF}u_ZH9%<LNV-0EZa=- zV4saH^{(3lbL^A_d39IDIWhUT;<^$q3`xMkMYVKw$XEE@G#T|$e$n7fA^g!a7NZoJ z>EKR%{;H!Wy0d<Qh4z=+?oQy>UGq_`^dV^LKNIZRGmx}ouk_q?pWJ<IM@}+oguV0J zaOB)<bo1&$z8g1D+7!Vhn>q;Hr@fJq?%JdI0c~uF87sd$+ym_fPQmVLCvnN5jy!$T zN0|LB9I{XK0Dt$kpecHfJulYEXO{bLulwP=-fTIQt(ptFf&#Gc^GM0BlQ;Hicure9 zEHN{ug?>(xymu+zgt9|3K(mJ--(R4NyR{d~6Z(mNJJ^+~4{e4z(+D0~W6c}l+pvmS zBAAFynTh2EsC#K3x3}rUKVG-PbM+^vlSUZE+5RGwUdxKd1-rz~1dY`kdF=^nRuNoU z{mxJ1zC#vJgkC<zMTlNf^l`HHO2b&eR`RsSmgiPphOH((yzs;?3clxp8^rH%L}C@h zO+NtZ)3<`_!2_bR`xiEEY6Lf1b++H)M@o8c!Rf*xX;T+ZEPZxQ_Ml0;%zL)HJ-j!n zYhQ(1Z|>AvyFY_x70pmm`kq4f=Hi!ox%K|B9#A?&3(veqR`LqMx@*<2qa~A9{<TN> zrY0>AzjH8ZkA~~};av*_j=Eqk@3L<I^}Kp&eL4d7MJ$Ey-}(r*wYhL$_2k$YhJdS$ zu(546wz?jXXNTOQx1VQI?9;WvA2F3x=eKyhl(VsOwib7GoClXHw?OrBS6o#xfG0op z!t@Gj!6_Sr3s$@Gr*6vlCcz!M{C-L{Et)hTA^}rNF343%?ct>35i*)$!}kC6qodbU zSmAgezP>AVE0rgqX0xl*@6iDEul_~8&l>6MW6>As{eY5Q7vkBh$$0U(0S4^X<BL)D z7`!P3PdB`iLN-mo9&^MFW`sY+tAB^`cLDIr#!hkz7oDB1<@EW18J86m!C#Lma<*Fz zU8=MB^zUMM;TjFlF&)8y-;8-qr~^DaYsUKy+VO=OqxoB}4=``tEOI@o$`u=aNn86^ z@!U1ep!`#o$2P=MX~G~<m}A1Tj@zTchxS<7FG~LQ;FgqH9>+;9Y_N}W2tH|4A=ft& zE_Jen0{7>#*_>py+ccZBh5K4_PmT28s}XOB`bh<&#$)3i!Ts`oK$e;&q}0n1i>8K= z+I(9K>00M)JYh-w^NJ(%_tIK8dRZTjXt?8=L|c;oY2c#iDLf;_mxe%R-WU_lZytHz zqjcfe9i{*i#xIfYKT{$1uV!33JVf>#n8$DbM)A-54)PtB5ttCLmD)6AOY4T-pegoQ zBCp;R4-Xx~#ve6d<IRh5=M{^o?|ofdd+@8gFm5=y36}QxCz<S=UQZ_nIkNn`l7?^3 z#an9--NJmtS)~H*ZSRK733>9sV==s6Plukq%*5#Jv8*B{V3qq0Q1RDMoUu(q`kOcv z<3kkrX1N)3To#Lo-QuxxuooAIcgK9uL*D!0378sKu}hx}?EgNo{*CQ?`mM5}UZYfl zGKD8kc|jcR+TzHb`!CU(Ngt*B-L9x*vWV1;t>gh-AEnjy&*{YZ6yDz05A{y#pu!0| z(Y4zEt3w~t=G*(=*w86_bhR6&If$-*J2f18HIC=~cb49#o~ZwKMfBk-Kf#u0Pt=?# zo<&o_q(i}@@P(ZL-+ng~6;=*GjncmG!Z;L<*}axlSvu8!ciBm<?gznaN<V!6dJt}# zpw13oi4?nf<FdNmxZ8UQA9`0tUDthw6~l)>gtsn!wmnH-%G|NxkvF|GyG@5K8S&i^ z6;j(>FPH7t!#Mjm)OcGjxFilRUn^Dgg2FlWS|Qur7H9b@DwG(ii;*SgB!kPxB(1ye z<YuL>P_)G#54Y~6NB_og>V`->9-<E>22SYse!2JWmu=DhdM0O$bi*;0J$cH+WAde; zQDT>L40;Sk%$~ZOzK%<#<dcq^d-1Vce!q~L=ce+6O(M7Sbdi`hMWgSyV>GU!k{*6M z24=RlSpPB{Iu&n%88^1d^Ab<dG5;L0);mP6`aGcZ=11kf=ELP}`>M&$RG-Y=EQWJm zhw&WI?;gF?oARf1mw)XJpeNfjpt#an<Z~P`)zKd#6m3Li=PP~Ms)Y7SZSj<OMxEx6 zZM3mS4To>|0q2^$pi1S9w7R<={YVLccU}6@g)?K&wdW%mpzbAdyiU?8<;j@nnLzVi zOvNAvBfPyR0Ndqj!JC(3FsZLH7BzR3%&hyO_Lf}sKH>yT;K)rX*GQh%pZmNJ^Y$H& z>F0VTC9yZFO{t@U5yPeYRA>33+XT|69>sBw&&i_-vuMkiVSK;b9L*sLM+AR`F6VZL ztn_)B7Ceel|GM#e#fiwxP8@MibdBc^z&{3WX!Nb2aO#F5pYJ`Cw>;ON%5QNz_{&pB zEE-dPFSLiaW8R=}-8g(}hPdUaKK>i;L<d#{N!AwQxc%TH4sDzTVgJpMo-LjOM_yaw zwwm^=5mM(}(J1l>H%+<gt>J85z7=kaFyPIBCam}(o_1|0k*8g{Lr+&sr|RG?eAnz7 zJRPBlw?~HK+=r*>{Kot8j#Hshl13tE?@>gH@s-}nmy@~Kek9EtI~6znSHO#-Y&cy* zkqgt-!BbBI?5rq$zR=Ay=1v?u{2+KD(HVF~WSct9i{ekclH?y{^XO~mS@6NDi_~<- zmbZ6w5S%Fsh`pVFWj{5*qiZ2-9^DOR+!~B0I-0=CG<RM<(;WSyBDrh58V|N?#}j5n z^TP+}IRAV;uTwOqn-3>bSs_#ZM?_`MC*!qyi9EpKFeHC&gRK7>_Kw~HkAL-tiwkB^ z-(10#fBF)1yl+wSw*>O(FW&nWT`9h&rhH=JN-FxA#R1pcIHP9=964hG4_i8bp1zgq zXHMG*P9@%y7w^f5%OdfUu`YgJp2T+6lW{-kkx2)A-gh*M%nzCJo}baQ$F~44G^K&j zS>cv{q6__Icyj0Zfo#3ggRQ(D!Fv1l>~z7CCI&gO<+8D&SDAyVmqZUMx0u#1o{R<k ztE3)I`TXy@*pc+mVe_yXboyc?>}Z^TM;D88gL4<G+$8cSx3aL}@d#cqZl7Ejq)p_Y zO8(Bqxc|=;>h~#A_G#`$@=Hq`Z8S)}_ID^BzCDP0YVD*cD#Ek$Iuv&mJcXQ++XNjZ zc-ngHB>%&$a_W_x<g8vsl?&|TGuM_&+Frt^^W-L}we;o*7Gh?&U_EsC+#;*3oyfI* z!u9xY1THtMq#nO2$zYBa`cW^|=r|rLr~XG9EE3r2st&9kdzOAChw=Hu1aXH{!k50a z^n3qFvK(keU26UVmpjwFKmPbbY84fr<@1vUcKjuu&NSjX#@g5?e+(#0HsOomg)lL= zAM}fN#z}WQ@aW{v;$B$`Uq5(YaiI<c1@uHz)q{om|HwN&hvL$&J^9?C2Q=<}GN=s& z&fcESFOK(zejg@cx$a2_eCk3I7L7sG(*ayE*O$LmdeHHRZ0>C8hP3T4bkkMmsVi@h zgK{@WZ9e5upets<`Ki48c?Dg4^POU^%z;|N4|MG)(;>xM@Z#+!u-?&*vk%{uZ>cs( z$HF?`mxLQ~%tSww24(Wno!{l>dwOGKsUIJBnGIh&P3g!lKkV0t92TF5JANOd+<oOR z>!#>|-TnuE4+;0wv9A27Xo~1->9fAJ0iK&Vg##}nfw5<2!KF;+6<dvvYzpv9T5pW? z$wE_?K79F~os_OuPrurok*~Rj;?$TaQrSZ@e(%{$UR{1o?Bm+dp~8RiO(PX3dab}l z!D5CUZNjC82*!vVV`8fZj*WOgZwG9IM(6c3xOY5?+7mo^8O^8X2XWJ_$#QAfZOJ6J zPVUpM3%>m|49@a9sp-W3?tzU`uMWcFeZ2!eEGUA!_kwe~)D^63Gf2C{lbs%U;ei2G z@I-jzEid$wUJpA6&0+4`t)>`K{L*oZkv^#|UN6o5?-yh&9*M4}d|Bf6bl&b1oLm!L zH%vHhqaVx>tc`uZ&pM;ZF?Ebp=)+3uE_$!h>wx{6?Xlx-K)Zrva>{&tevekzDMJ|@ z-7?{PH_@3WNRs-z=+7~fgJD&{;5&68Db>z~9`}``BM;lcj`u&w+o%#U)@pKa*i>Hr z#g?r9_UEgO*Xkld7fNsY1oQWw9?-qx1aFN;RnT^)a3h=9L6wz19~j&Oc`<h6VHFA| zdA?*G*$d81I1a}gd{Ewehdg!lxRdS(oW3g)oy!bJcds(fQ!(X|=0LKrh-Wi(d;GaX z>;b)&(Y=77>@2!TG1mg&pyYwZiAt#4XEx}}{!WoqZa7x#dB^AW=IC4fkz8ybesT!w z-ncGrH<`*eZ!Uu|Z!bXAAUir-5Qlp8(|DzQCv5nziUtaArDIVNmYexuzJU*GXpG^c z!J~1Sa|*uLa#HH7oWuLoPm@*9cxs?=QgrUG|Ibvsx7_2se*Zn{A#ym|kH4p+8E2qt zk1^QSz9ar!{Rz6qjKqm~X>2>>53Put!fOSK`dNn^@GQ~-zddusKJ63mT4geR9y61= zu1;j*c|kOM!&WMKdIuUljKub059K1h3t2{sq;_sbtQazqJ1Ta;BcivntWosUFO0*C zs&L*rN=r^X5XY~dcjvtmv_<Yxv3~T3Lg}YsHX9t81=cNZDREOMuY1>-$1Uk3_Zl^y zJf4MM?Ub?9;{J$wxpe1y&g1d(<aacBl@m`<6OOIWwJ_vwA-{RpM47gQ085v8-*$UP zT3tQ3N1hT_-{~avXduc@FNQPsl<ADf^#3_=7Y-~A=2(w(*0`l7?N|Orht7Ay!y^vS zBhmR7@bEs3de#Lp&OL`y$D^q#KsZWvE8^$1rL<?>SUS6+KXx9ATr}VXq<JoqtJJjl zRn9EIk=Q^X)=6+c^=^H%g1Xq9-h$ZlbiB6fq;Q^E@Q(@~k@1zuIaXwLp6TNJtD)fX zUm7p_*_R)7pNM-o7w@ZtvUcxKX$PocZcR1^E-m1vb-DaCNE>cVpM)*(*Fd#Zi9-Dy zg;z3KF!k2ZI%6l!`D((s%3d75O{O9XCBZJs;@2%fXq;-mncaOb@rovVlHD=<au9dP zXv14f({M!i6xdVO8LdSpLZ?=S#2$83zr7!BG#tl^o-dYne{4$)3Ilm^QX18Dj%1V7 z2DHes9IlPJL{6oC_)It$D>^L&y@)of=y4g;m#MK@=t@X=BAljI1V5pv2fG?4^U3zM zv`c+a-QdUsytv^9^-DhpV;&v^JC{N>|FzTG`H9d4#piRgfe)6(kK)XtO;DQt7INBo z@f@W<UUOj(IE}HzERUU1V6-i^IHzG!LozKc%Hht}%qcZJ4x5jq!0F9-=z8!SSZqn; z2?s`Tjhzc^RxBp!D0=VuRdDrB4;+^nfgMjNv%!`VRAPFJMkf!#Nh#ydqS=*|f3>B% z;``mA8YJ$vDtxDn1An^Rhr7<MlI(lv)5+p8=-X2R4J!I^(m@w6OIRz#FYv)gO>LOG zPJ<k8B+=PBFKJz~2haPP!#kd)V7_S#*Yru`IR&a5a5NV!C*33M@=EVB168<K#hjlc z%%$qXVVreJ<SKr=04&jx@Qw`*x$_&kdG3|_`*uQ~;x)2P;!arF`bg|TJELod-?VMy zR?==WiK9QeL9dSkS^s<<pR38_<YzK`*<MrsOS=miWEIxmexN`{|CsWbfHdy2b_VH( zXdzyl!pA0B9X*~Sy5Uv5IWw_}_Up7mXUBte-G6<CRT_c(I9O9snrFtUe}m|t=5cD} zJaoS?kjI}M=&e328;5QY-#>R<%zSKwu}iYp<8Bg1*bS#uCZ$qB^j|vf_XAX}AA?I< z(lNI035fPx3x8dP;SSrmv@=KC`Oh4KBl%&hdvYVh2S%c0@)`Kvxfe#E9~TU-ko?!r zl}DaGE&B+@hgx0^wwlCn&mpGR{P8j@@$bx5?*lNsB!{OPB|y|Z1-|psmyMK~>FeJ( zdh@RVvZf10Uy&Pcujq!~{+k5dq7Kra)uLN)u83Y-vtrM>X|U|cc8dM13Y(fMXoUGG za!$6zv}+sa+}pmAQcV~RzI#bB-B<&c=QhY;t!?nZRe!8j_#sW|KZ`7mOybE4y&!F$ z=*4CUx4_W3L=QbFOR#9JM^1+|>&xMa`4h=1cNm`dR}Bf`oK^n7p2xK-rcd_g#W_cY zo`;R_#>|muobv+)w^PMZix6y8i=c}iYe=D7<X+p<)GN-=;M@bf`Dd!=_8-_NP5-=? zBDPqNn`tqno+^ia78}VVQjImw2hb)(6Kwc17`N$JQR^v@Eq`<pwmNI0+@~$gXtcm{ zX2ZB_-F?W(8^+J7_du<C2CI4Yf~PILq+6A~7+%#EBd=y)$-t3VY%`UA&He_nH&11A z(*gV`sV{1s4Iu@8d;ES#9Th9$c#4B3|5?+P)93f+jk!T+q;AFKnt5=gZzhfxoq6@i zz8rtW4=kRSLF(jaw5q9<%j7;Vb%ia?xUS4!Kfa&|yN9#oIVVg#P)7ad7;)!$BEN7! z3x*FFPTj&C@xiJ9Ty<^&R(BL`ke1P0=G|X9k**?}RjjAx#UZ@*g77qKNugim6S-kV zI9}K4!RvyDNnNLo#%|}7$SuhnwEAUYP19<MyV(ZDge1sUhj@a1@H&dF?aE^;1b^tu zdMVu{3e@i%hPQ(jQNS`gw0RVXwOYPBx6zsHKEHySajW4%NLRci--X!RZDh50xb$S| zcbK`mh34GIXRCI%DRzG=<fJI$u6frXcwH_x*_O%MlN6<OSuXJ5KhcM^(1%h@5O?rv zlmmj1_qiMHdz1%OCw$ROp*vgs{7o}EDKhH3rn4I==zzHe_6~nnfB3>I_)kxTH9?i< z2Y#p4XTF%ZdIImgmx4zY3btAA7&vXJ$SrAR{L0xBTn_u;?AmbBy*U*d+GKF}fplDS zZWfrV6WuB&B`NqoJ-Ei*0IMNo@O5pAa70|CFRy&D++iZGRnPWTv?@eZv+cz0yGY@s zA9vlOhcg-?>fbNjOBJp|uzuH1GWmC$n)|y_K;U|qx#&5)a<Sv0m*yO^NX$+jS5RQi zG)QinidIS2DSNVns|y7`W#({U=llZZ>ejsDR2sUUb>xy5E%Xtt#^8Q}#q8Gx4sGfU zJ49bVuzujSY=hxTV>xMW9y|*FK*P3&%3CINWQ&b6Vc+dc%(Dw&qZI{sqRg8I)fCai zSNgbFa894h&*q~#ioDfK@C)Wic;e~<X-t<c^wK4XO+p?>-DaiXEVF?e*>N}dI^Tym zy{&ka&PaZ9=N@HW8OXgK^ugY<W<ges4J*7vTr4$#TeL0dhqvV+JKjOR{23Iw!$Ce? z76$!;X3*_Uo5(vwMR=FKk<EgsxPN2>o)PDXx#vB&X60%+*mfX4i8)A*1cR=ox>d4! z`4s+H8^9r(gYfg-bsFqG2v3JqL5b!7ytr#G%vvy!oz5iS@g*7<)+vhnE*;95*8gbM ze>LQLW|(Bx+F8;mokx*<*T~(919{B75pZr_Av_3pE*W<8MQyj`U_I)D_fZ>jj51tF zBeg<7(?*#qvNQ09Pm5@`jo=F7R1PUrf)8hsF?X6eI(igh$W$wwHlvOdkNZ;9tIl{} zW*6?2D>^bC6Y%@jwtUNbIX$B`6qTlq4Q19i^we^=5EjADlgIOv3F9$po+56a6N*i7 z@w_DXm~^(S30m@f81~2;-O_Yua^gJL)_tkm>TwBr^_K9d@-a%7u@HO}rr^6=L;k*T z^8fOKxF1eayZ2grYw+Q^^U>Sj#XnE{yia5uyQs?JKKXIO+{@C*#fPBkRR&t_nuu09 z+I)8UGRW&T6Y}={l)YEPfR^oAfOdDle&$K(kM0+G-BX_zrvO*}Ok(*|JjHcSA(f5d z4B%$Qqn*8R&Z3d1c_W!u{Iv5{RJNeHX;0*HhxXEybxY|%Kss-adqI6SKBWzt#&gZD zZL~DxyVPHC1E>gB%9w6}g0r$;e)*}IvO0KU`R+#ee%_G1ayLmU=49{-<&)I!%i?-J zn@(_b%szUYVTF6U{Dr8uHL`KwR=EDoljjC+hAV%EV#GgNyyd%ux>+aV>b$dL(=QuO zRrP0^7bP&Vq7!ue&y96X8*tmX27LIVGPq}i<H}rfnw^~w9`icj8rSh)eqtO>+UUuz z-1VqzL7sdhAcjq1Be8p<1FljWjFs2=^VoMi@ygmXZW^kCUOQ}g>9bMzdvYr6e5lXz z=>Sx?Z-_px<8gLp7xwCNQTF-Oi-#I#;fc*dd4}N@Sa~I!7cTuM&#bVb=b~p0XY=re zx#%kw2&bk~Dk{87=Z{|<;U|d<Zudo!Z+ivNL$Jc6cr%b@ZKA$2oN&RWA#kFmnoJJv z1+y6gFfhFX>)CFWJyYB9h0OW&;XQQOk&eo(qh^s`(PjBat|p?IFK#>#Ouaj(VBLf; z?rG+M(vx8lSovU8#Y(uZ%#zo<g;0FwI8DF&kphzM05@i1*2DX9`1&BWFug_&J{_>c zXbW6hQ3BT|RzO^nGuY<5r+P&vtnwPcTf!5$bxj8L{yK)w{&HkT*B7v4bRZdCG{vRw z<4GQ%jwQ?Y(%#`q!MyCGRFq#yo08q3bD0n7T-p!Qb~V&bRsIg$Om@iQ#telW7$n)y zcWGz93aO8!$T3;i^4pqeVoz^`v%Wo})md+$Sg{yZ`P`vUB`3LKu_C@#Ohfw<X&AP| z1=9CBVbs_6_51jwY%n@nUhB0EP6}`FR=P-e2Yom>u?xTJCGu{Qn`pklH+UvyS}`Mp zYa;Un9GJF;M(;G>ozqsrMYGAA<K@Ka>$BL*r6<RW9P;&DBWZmfD_ZleKmI<D!B-V3 zC@ia9UOZ(0wa@H}1yU-`F)X9NwH-LE#9vPDTp`csBC;2q-C!8BqjL^nZ0pvAb1hUr z6ALia;}bPL%17O*?=Zkd<o>mDaKoul_`bM|oPt{A@owV(Ud3~-8#^I#$8NA4a~1Mu zN3qdpW0I5|c--cCiXPO4O@h3+YI71U`tXDHNrUmit!p&A*BWTq+76do-%kUp-jiAN zb|A|E@|IyUAh_%fJPS7DET{gMzj`c`4Q+$(#~+6LHOTqN6YylQ7iS+!kbejD<5J<v zT+nx-9QJ1trg=@5{#<%UJ8~6KOT909zSpNEF*8I4s4Wau(xvht8tj+ii3?#Q-@WaQ zX}b;Z$oWxt;msBJ9B~-ZU%mtBN<J>OJ}!^eoW|$uZo#2hYW(4rH#Y`8ly_|DLFQQo z+{xLVE;eexj{Ux3_MgStnGs+-?lAmX&=s{)y5f;<nVi#71^JUd!D6!^obcl-%^iD8 zPVy=A))ASsSycmgi`sbpr{ThnS^{vkVm5|+9?Ol2&e%iDeSZbahtqG2aCF*J>g^MT z(cP~=`mQ38dvM@B)1HB~lN~5tcR{O|c3h`2kKU^c!XHk5s7GLbxVJ3~{}XvyyNui9 z<GM~TH-FThoHv<!2u_*1OD3kbgvt$y;vCdxG<WT^!#nhIFnwxC=RQ4u$(~R9@b+I{ zDRJRO`s-T^i_eSf&VqkZW%NKc{cVp0iQ_rZ)DfG0M&Y`dwRB@;3C&bbV4bn<*kP?7 z8t+Qx3lp=22VHoXMr88ch><jFYZ{NyROIhAOJVvY_P()K7vlT6V6({u?~A1pv=?lG z&-U4Dd-^?Sluv_5>z-0|YbbW3JdV8d368dTMJxMT^SaR`WO<-Wt_XF4*Ud8By_&+) zCtZV%E+S)5tBl{)=kd%daq>u!X)N(br41#cWapK&Qv16OJh3bgKi$>h%D7Z+r=5)M zd!2Y-@i1`Cj%54d9FXn@3ZLK?s{MWk<`xfy2}*t7(4?KBhwm;2EK<c?A@4xDt2%zX z(w@VcCrK%tB<y3Vil=Nw^KP>woXn0`b^IKqu64r`wKw3SR*`&lz8deIug6mE5qR^! zg$wSSqD|Jlc}~9ILP%$+qyH7~Ss-@A)AQMWxHuzRoCq;q$6%DkRQ}zZOfUOs;D>Bw z`Ke_L&MFjn#3Md@tEj6qs(v_(cCVwq3M$-tP}$Ky2Qyd~sDK+v4?so4H10cg0R`xF z$CjcA@Oq~Or4^;~kA@3$BcwN59H@}jJ+{NYQEuRNY8W~TH|V<1f7I&zS<Kr4d0lTc zX$mz$`m$-#yKwOgHvTEO^|%jXVjj@C?Ebv2TQ)b1=>YxQZr2<B70=1f1IWTf7jHg4 zEBEZvM2en+#Qw|&yXmQNV(2?*YoxO<Nld2(gF&pdcPDizv4mH<mcz|=A7S}LJ5+i< zfmhwDkaF&BmK3ctF+9*1RRb1KeTE6zl!@H#u5tV~FdWXE_r-)&j;P%!8}(nt!ScN- zXn84_ZyjAiEiMVz^}Jva9I2K%74<@^@Xma0ex=uRI|E#u-vPZ!heFJKWNSkObe<ye z;Ex{4`zNh{t6?{xefe5&91#pRQUhS1dnzwgzXlt_v>?}f7#j7^#++HlV4;ety#4Sr z488c1o^4JRJFil(&D;;_ZNI{+@@{zM5!3J!Dco8VOi{YyxRdV!+46=vKg(63Hk|@s z?S&AI?4H7>7SDu3A4C`IZ#0H2?1=*^gHU6N$V7GQ&2c9}dF#3ac%u<0ZJ9C=8y|^1 z+vpW?<*x*O^e2VSExS#$`y6zep6~tB!Vl-&>OnEPW=TeMF~Tpe&Y#|Hg&8VlSd0JB zg1J$={=i(g@Hq;nHD0As?X>#twTq>z5A5*y?i5UJFZi96;(XA<h8&hA;Urld?KKwE z|4Vk~!ek@ZaIX_iZIxKB;}5vMMR-k~sq?Ae^OW>8PV&6Hm~?xchL6Gv8CEwH?01Un z+v`=}GIlk!?UaJe;!ZwiV;&a|G)I*WeX#SDWKi$mi?2+w__mcLPET~@rB?0ngV#`5 zD_tLyBfWX;Il)aGMzC1?F19NS##;(Tg4gKC9rp{K+&z$Q`Gj-Gx5<38?<DV)FGbgW zrY|?Y6g<(BDYWfUCY=2{nCI?ohRUA8Mrj)-F9?$%aQ9$7rEpQ+E7;g(UPHLLhsdAD z3`0NP7lP4p85(^Dz?anlkaxJEUbmAcJ1FSF)w*n+dVMTezY;m-{gu$z$sf;(+}Qh_ z3AkpeO1*BxCeVM@k!OER<`LI|ag3%py0)C4byo8rDX%Tvz$7-jw+=p7oP!;n>2!SN z4{Dc`k2WqnxY$jR_io!Nwb?!r$2=IsZX+TgqHwe9*g+SyLR+biMJmg;tnsedA($NG zfI7zo59RJB$}%*@<gPx_X1_Dg{dxjMY%t|o(H(61H<E8&Ps9IoH^GF&VtQw&&8dIS zKzfPrnD+f5Z}~it|C~BV0YMpjs!ey;SDwfxLJRQ43M-yA;39pw{z-m%(gQS)9rq3i zx5kqDW4N8mP*fXO$U7^w+53M^z_WYcns*WGSfI_B!c*1uq6W4R_oBoB*C;bE5w$&a zrOmZk6uP^H{4Bd*#E}3Rw_O=i#Jt{lgFP%?+ZE@|@8d1UJf`1`qadnnTX3tjq2_Kk zz_x=H&73wATgv-W;epd|>(O@aUVHYy_Tg3X_vUEXY046s?tYxwTLDJtg>cI^T{M*a z`Sy>t!n<fKn2$NQ{qI*$TsDOFZYZUkt33oOa0eJ9I`N(CqI&PAw`s^xM||r3oeo`? zM1jK!A*$0ve!cxW{IM7h{r=R_tmb0!Xr6#+LodOsVRyl1p(0dYGDnN9!d3LpnscjL zXzhP?*!1CB{oHpJn5Od$TI&=!zEGW&7U{r!&h#!bpDC9<aOH!$7gFi6#iU=Sz<%W> z@(zcku=0sE9IlFFSLNxn);JP}oUnlE1?^Z8pOvcrgs14U3kD4`!~-jX@VH_Gxu#k0 z(V79omG*cp3ox#2hNNFNlI|AO!s^e{u$^)uUcF`qcid-?#-U);I};5HPfn+_`6=uh zX2QKhrmuXHKTW#WKuT#t>B_U!-sz^Dc}`X`7Tk`<mv%qI_tcNQo;Yy7q8@xsb2ymW zn)A0wzv1VeQCPN2uru0zpfC7?8eY2b+yOoDQ|mtJ;c=SwC@QdZ=5NVLULhM>i1)Sb zD0+|;!)Mg|FurOeRSdohdxyl}k;pduu}eJvo$!htOHoo>%}{v0qXThZ3g-mxrJ8S3 zQE$u7x=%ST36pE-<~`u#v_5=x+ZbNfAh`Q)0`bd}1SoPel=N32uCKU2r#hK(?$}hm zGI9b=aCAU<P!IgAqK<95hjLf%DY$Tg9@~E|BiBFgXkK3rUhJEJW0&m_{`C}!Ee~V= z%un>`UK5mf#^Vp&HsZ6Gz&`yWrPjJI`Xc6GjXRU@N6~0(bRCcB&%Z&yZ(EquKaXR_ zf05c;OMypgFVoZkIsE0IB4wIuvu$5(+!phPe%a}As7@%1wV5j~31?{2y**!?EI4h} zu5@KsJ{yc`&&LjIqJX{zY#KWm@8wq0Clx<(@=?GcdeJ;e$ph<qmr5$yN|>LJNOQ3b zRa%<iwQkSg`P;SRB6=CueV5Y2qmRg_p$4w?cp!aVIFKK^4Z~+O(HIlC0irM3vs0$< ztT`&P<$+)}ENZOZvn&W_d>f5F#Mxi@*aoRUIDdQBi+Nt|FA5KvB<&hf$WLc0vE3vK z%<U+$psnBF(bhN&QJVuzA0B{d{b<hET?Z+Z$Z>f=JVLKK&#CzWh57qnfAUS~)OuUK zR`px%-nW4N^Ag?P^{Z*pI%B@PT5y*vPSS@SpJ3>&d@^1Y%ej7<xZy?&bSipBs~)z& zR|h9@$u*L;`}XGL$(8W^b0n#*oPd3Yn{Z@4aAtHJMOUVYY>*BAaJnLGE){#{^JTQ@ zLvI}Ju!2mx#qqr~b^2}WjUNl!vT1=G){oi%4W$vJ>!;5?9dhubMH|RjvXIJC0ecs= z(9j$+Y)nqU-NpvobNOO=H|7M*+JDpA?C2Eudsq|K=T#GwuZE8k27<vUOUkn{#V3D9 z;+Acr@$mGM#1qGZkC7GkOSixW8@+MB=3(5vu9~tB*|ClK1fH2-jHgl#g7K?8bmFW7 z{#`Q|f0wSKTQx1@750Mcf`wmdiV?ofFy~k2&OzrhKWOoGLp-i!kAqLf;Jv<1f@LLk zLylvRcAB#Dj4V9T&lEn}Md8Z=AK~}@ONDKGq@fiI4y#|t?i-WfhxmCa!p7r4vkmgd zvDLz5tirl=<)oIrP|g{63sx<?0IGi?Agrn_g8C{7@tDZlzZZL<ca=O2T0sP^Agv{k zyMz=|T4D(ejr&aZ`Y$FtJ`p45ANLmER=V$O!NG=xl4`>^+3tcqe|FyO?eWQ-C!2?G zcvN=`TH(U?HfEx$@jy&p)q|g^Y2n0s{dk!}1)a?5!Uwxs<L{9paoWcxv_IB?-zi?L zhlh&X)HD_fwpLJGQy1Lbt4e;=8jMHvHpmv54RmE%7hV=7ydC96{G<Il`NL~Vz9w8_ zt}BY9-JN$+POdpe2@k}ywif|guaS*PA2w_f-qUR{_~83Civ1nLvop%2o~vKN?bZm^ zKiHm^tyoH<Mg@v4nhN!;_F((L^W==YWL&hZ8Wx}Gz(0>EOOmc5zr9;T$3q`ddck%o zIJ1!EnG|B_t2U&NTSjR!pV3KsW0Zd;Vr_2|aW**zd&lN*b@E1O-qaYpe?WtOyZuL& zMPdBp#WuP4_HyXhUStJ&PQ;nxEcyDECa4T?;Fuj}0WYd!{^wYr54Y>L=1#$!_Rf5F zT|TaUdK89S7fznPI?}KhPhMg00Zw<Y=GN}XvgY3=?~~%W5xsO22gC=9zLFMxFg-~f zMriN|Yj6J3kOc-)J95`cljZrh(y+_;f8H@Yd!n+_f8ZPbMGn&m<`Z@M<SyUZiOj)K za9=9C7z0ORQgbBV3meI$ru~F_FayJ1nR2g@6KHItBR+W}I&xi1@r!nUe%);{j$fEU zrI`~@oE^DFX8~1LH3{eSc9~x1;!2C|?60FvMguKy>6G@uF(-V7x=U&QcG0Wb+zaRY z`(FS4t_eI9z3{ZCF7(hkoWJcmOZQc~%Z*;WVf_9LVDB2rezod&R^0|^T?u6#5S`*I z3z({S2u3dv{*>Ht?7cGq>R);Am+#&2>Q{Aq)GMB+4Y~$FH-rm$yg&Z@QNV{QW=jzo zx>!4Wz4wV>^Xb0%1irB@3&M{@2~T7)2W!ojf&|;{#Bon7DG>L$2UGdaq*izwFTt{) z7J0|D_w`xHi=~RXBs45d=c-FaaPpH2ZgKl3ZD_DZy=Fhu@EFc(?F&$EvoGCtZwns_ zw!(bfE~0;b1V-t8l&^0tChbX9_<HI%d~ta(Y<RttmQFlC3L}T$)<N~cQ!$u>=bRu( z%t2O(*_1&+28O(tOjqb4XnYVegkmr1HS#(%3P(`cjGGj%f7p9Uq%Y{ZBF0Wuq_KxK z)ptI(i?SwZz}X9_ywW0?r|JiB!74Aly`(J!-Rp?wzYW3PLBC<$pLp)N!ksVG4B?z> z>6m@im9zg-$8G8oR<BXx4re?$V1n@crC+D_O{<|0T)BD83#sPTHb}jbjFH>zIQvWs z444*$3(tG9#)aNod~zGfZ!)N9P9GjH><9Fk>mc0`cf4*|<+4k)7i)X0fsS??$vY<= zUvxA?hfdb8nn&@S6b}wKGL2>^x^dqgj^ey^1n%rxCx;nzq@D?v;qcmgNkAz;{&n;J zC^`?foZl~wYZp@5N}Ec9v}k<JX^<AFG?g@^5>X<REi$qrA$v=<>T@4Ll8{kGLP$jM z%_#eK|Na5jrR#Z~`@YY4zh5u5dZQyn>~+V|X}sSYHHhw<4+jU8PS&UTj@T1_2!{2% zWAMjTOup;FW*P*b$%5yM6%E5L1JdY3mKvJXMbVg{VIX;V3jKL3gG-dOQ0b;LMHz+* z>ayzKUY*PNG|`~Hu?YO0tzwevPqIucV_Nif4qelF3XUGGRBF-#2Odh{#Di8CT~R7( z=*Hr3!%gDo+UcNmVmEiG1mlMH8Myn!07%=YO^c)6F*#o&n%TIAjo2~=<%h(OYK}cw z9}8y3^E5HmFcU8-dy&!U$#mz-6ZWUvi4E&S;SXm=?LY}i<2#%g+{Gdv=0{;WOW}#* zTJBy8!~T<_DT(_eTf1CX^V5ZNF!KbM>Fp4{S#NalS4wtSb9^RS&gUe@zUfe{+;H5+ zoN!?r??etUrkzUP#8#s<?8IVym(Rts@!KII+$Z}EJY6N}c6K&sTaTdp>$9-MG@LG- z3a8Qj<?Ko0Oq#biiyj^p;p4y-)^BqmtK7bTW*FGB-9C!cx0}1Q9vFf7`cp!$w;gR6 znFqcO1(0u(%f>8I!QZ^I-gcV*y|?NL4jc5yZ0{iay2nGjd}so0Tv5#D>}|lFK5!}u zn1)lfDhX95hf&vNf1L3&7G?77arVwV;8t>+mCi~g)BT>%EK^e%<vIqmzr~T)@-!9^ zn_u<#vJBeYISlznez132vhch<(C?pL1QzOvsl92`??sy^ZDC8B<|)F-HeI|GtB>za z{AO0qg0btq9cdmaVT+&c6}KI%he!9GajuOwl;jSgJ%{GP&t1VJQ=bU#zRN`O->;!O z)CTYR4kRaIo|CiG!#8mr6tZazD^zf1?^6uWa?K|w3y7f;2NST?+>utF)5Hlc9MCE} ziYY$ev)@WSuNiyUIf0GBnmA1iEpWvQxsz~mp(oqEL7i=0IT_tw=+LQ6+t~4{NR)W8 z9WJbW4^E>4@Xgx85d3SYc;?_dY_o91&YI=y{-4q4mKIO)y3@#t-(im3$ikk55%g%y z16WiPOg3(zRPb{b1bFPKy5wKLPUlA9VFf!1lJ%gpZH{!n;S+qHs?S=kh{B!HKr)bY zqB(bsDI$M7mR`0d-HGd&ZI~pESTUJNnEA0W$Gp*8Z6a>XlA-=ms$dZ1AT-5E!;4#g zS^9Sqdf}Z3TmR|9UxRIMd-*A$!rKNvj7a8AD0S99k)z0#htvLPkuZAy6%lu>V|e?I zkaakM6e=WXU6L^kIQtl$97=;z?K!ml>^a!b9fz@3Ezxy~0mTe4MWaAn95~t(_3|4) z_Ok~C9yLeBlh$}(XfOurT^2K?`cc4mHySy8Fnbtzm{s02qa>%LY|zVd?3-!_d+FHA zRK^a(-(9bvSN%FWCBxZ!)d`%%#Ixs_8!H1{-oeKSPgz1xB3_#qL)&cp@&1)){-%^c zhheH1Vzq&#4N&2I%tofVRTbs;zJfhYwUF}uCEVMwgGt(*gX_L|G<|;qJKQWy2bRoW zq1$It&J<6wnGXD3R1B}u!+37ngC;B)QMGrR1g7XMWptsDeLiYKwVYY8a*is=^Z&zk zezzFa;)*6g@@P0EiL{0qzzoOp&^_@q+}jW&2A_-P8C*N^%gIHf?fz)pH<|f1@xS-= zIJ_(Ig)KPDSrogw#d9(}Y(PaAwpUDnm>)fYy`wCxJZ8Z;Cw1&~hAdm%GLDpw4?+8{ z+SF2?g9j#mf~>tttkSa&joBH&EL#j&)SC%HOJ+8e*{@-P)|lZnb6fT{$OJ!M=+CBe zE~$I-LTnu!L7%jnS^UQsTvWRgoHy6N;=BDR$h8GBi_P)XuvQrDJfH3g6QE2l2L0UA znJJ$Y`Jq1OZX1YY(}Sr^c{)iCjiKkN_Uu5GE%ZvR7XPyIY+e6LLelVYcw^rL^tcd! zi(5ufetR@b_dX1_Tjt@Bb&_=OWDxnx&cOKe2$F5T$xL1$tIOBHYW+y@g$IG0)&sUb zZ=K-2!V_Q2+r^IblfmElUhK?(_b&Ox9k6!)SGHSyHl<IRi>9qM6x^<clKgIZWBeL8 zB+($m6+Lh{rc%#5#*AePwk;re)Pw2a8^w^q@ica406g_vh<a;E*xv9QygoM`O;{)n zIB7`}Ezdwu%>Y`V?gOn$Rm3}dHalml1KWFU14}*W#5TrkVG*l%m)2B<P7OW<swvx@ zLqCpUYv)gbP<c~y5Vc)`zvhao?=BG>!@@9Rb`2Y(Ek}K=wCLBbd03NvmHqSEF4o`n zpdhoU@TiviSrvA`^VTg)=l4vy`X(Ifd1hi0XYM~7l!UW8Jh5GE6YD%BhZm{WWl&@W z4ttk?x2g>3z}#H8(@zJNXQ>N|2l`?^&)@9FhE$sB$7j#u`{2<zZQ{j-P?Qgo!@~zn z!Fc#_`1?Zv`+b(D-fc}VRQM$t_p0I4Lx-TUSpw@09fy>E3u!=g9$q<_gn#Sfu+#7n z8}}P=j%hILYZ^?3uJSOc>pFX>x0g8(L16T2cIAjX*5{3*-o^z;sa<@IZOFXOE7H%1 z{$#vT7MJ$RhpG8G?DxQvRf~uGCuV&bj+?`cxC=gmtV0K5*b1epT@jLk9N)FY=^NmW zC*1Fn+shW0K8FF4O>EQ+&Ihb*fU-qrVMo4Kl}F=a=9<$F%R(Q5uxcTy#XN+Tj#0FK ztTY8ouVx2_%aim>o-5#)$c8HeDz3>tf|BA(a4TM$F6vc6+w{HA?3Gy6R<A;}PxaC3 z<axN!rAi-vyJGhpXRM0{(N{$SUtT!Os&N>TR@1|QgHN(WH#N|Gw>s54a)ozMa?~T^ zN-58KnQB5h{#l%hJEte(#6_++lB(hKDZbmBH5Rk~{1A%*wa~244XRGou%eIxw&z;{ zejk&BzW!$D^-qnis4WsUtxlwpows1#N?oD#xFnX={HWOUB>?x7XyD)F)l5;VA6jX1 z*Qe=W_AJi{gNJUf`eWC?cKHv%C47E3x6_Q)6_0f8>#+bUdKOZB^HkR8u0}ViJA~Mz zN_I5A03L5E2IAQYd-j2SQdkJ9K1EWmx;FTC>C(>_KGT#8XZN|+%fYoA-u7;<YO70x zrMAV85~5EX!v$ehVQ<yBW_S3rFI%+f7Xhy#S5>7qui-pC8T=c=yJx!UbieF%Ro1QV zELOh(a<xNXW4H-rcM{lT^ZD?lTWoW7GH#OxT9E5b5z(b^YEKNx&V0($=M2Suwb?lL zU@WUt7(-HLdssN14Vinb1zTxJJZ<fPiZ|`ag_KFrjyp;o%_VgFB&c-`rkN>U7_0QA zk>AI&Kh4VovE-^K;WY<5LobU~DQ>i(%7B7~rwHMmyPcmT6*4WyK(h2p#$7h$V)w>@ zsF6|(Z`PN?xMi~F-q^ub=v-$XZRW8L1|x*-?Gh;SbuJc|TobcmmRD(VHcsiMd(d;l zi`M%!vD_KAg`)9O$h&tKd!$qaX21HP%3MD4FW}DDcAzU-_o`mmY2v8K(Rjt5yFC+~ z;ry;z_HUIrb<W?-BztsV`ZPO|oj-~$Z_5#eKAXWh&uoFO)&1CivY*5T9Vx6!La?9O zmu4@RL=pVEQP*il<JSFvXnl7$9rnPbM0YQg8UJKA=Vgd?CEwV%X*&4(j3mlG4ksO_ zGaw&06TjTyyN&QKuzY72shsZ*Kkrt-0l&HU#Yz&=y)U~wx|@!ZrgXWKF8%{Q4Sqty zet%l%-;V;H8sg@8S+M^>o%peIwQzT>54*pvn;Eq6&-5TS`XfD>@@EKa^G!9%^Jxd) zRAYEA*T~Pj7}_)~j>c)t!VEYDas|0ma#II8FZs}AhXts0A{>?CJt4_TiS~GY5^XB~ zuyD>~H#}XyeySa3C;UROwQDqIYwMwb=~%XX&kyL#IK)gsZwQa}oM*E#(=qBnAYIye zp2a-%6y%O?fX4b0?AG&W)K|=M$^Y!kLW14VypIG-pE#G*$PS~%#0j``Y!aDQJ7e#} zg|zYY8946jho77V(7sOoZc9_dJbDUWlKlAG;3Z2Q-o~Vi7Vzxe893qbgXPSah9z}} z*l|-mj0lY;<@|8+SqWI27Jxki<fxDQKuQTWr3l+G^f>#jaJeHACfVeY#<RJ!Ub#O= zcBP|{wKWdkutzMH4M%0`dhsh{Pz9ee{XQLuZzM<1gO&i=H}<J`V$U{M8{;5q26PCI z{tLrAzURr~pZ*8!FPQ4;Zy<Z;moRsCJ=;_)OFdR{7#@F-Dc|IILgU?R!Rj^eyLcGO zIPx48f3Ib`eJ9Y_h+0-h8(5T#lh9zyxjk2|31_!am1pf);q&={bVMhTekU5@GYF-$ z&Hr4S{X(f?*kt^&B99iOF0abG(gW!~irA-Daww7W88#nXK>e;NV9DnT%n&-*CYi0G zcHkE_ua7jIKjlfaJI2#aO&!c?_d>Dv3ftIy3tDenbdfMR3%gdB)4EB9bm`Dh_HI=e z)%7y?Ek6tOE!9zRw>2vD+<^;mDyYdbYs!ls!s^D2f^ki&sGAyxC(pU!98WtOZQ20- zQaeHVz*xK(C$goxhSHHfrqt|T4O8C?z_de)Kx3K`O>we9k9=T;ZQLbGb1C*pe{3(2 zMrp}Z)ZdecZOL}@aPAnSiITKs)>MiZe@vJY#oZLD(?mImIds)t4i_8|a7?%}MxIQg zJ8!3Ah3#$j<8>IFRB<A9i+6|?so_!a9;~;{!itYwY}Mp{E{Bu9^4YdEtuOv6mVWTZ zagW=DzCt_8SiT5`uUP^;`3K+y@lK9LDh)5mq0yOpnI_1I$8%r8EOR+>)z_z6-v)uQ zw-0GpOVgUdqb$8Sf!Yh#vK^QE!mgnsv1?rpR=RVCd%Qme%{CQk-IB1m=ng3R{sDW= z<8SRZ0e|is2ny?E;Z+1@W4j?;E|n+j_ln&x|ILyg@O)q1IMgoBU^Q>}d$9I8D`+-D z&2S}rKBq5RIlly$SPNZSv&FZo=ac<IGpb(N?6TR1@7(y_wKKs6-);KI*7{`Q_9|nH zO;n|rFL7|&e-I2xwWmLe22h**RkkQfgFYsklmD0>?7-hoY)_#%hHCO`>cVXx`_!K^ zre@HRBa`UYIv<#pW{5i$?Pcb2rGi1357z#UA*H-nbeuGV^6fP^2P9FruGPV|T1eya zzk2L~UN$|E;a=>`M%XB`mR;KUm!0|^iu!Z|EDpRE$1ak>#gDDXZfpaTA2=b7>S~6O z+k!}Pb|Eu<E{|X0b*hvO9%b=)D_LW!BRX7P4^nA|S@%sxnkzdRXKTt)vjXS#^_+#) z2kQ6_6)5&ZCSFMlq{PEEG;MbsC{8>MQAL(yf4L7U>_3J~PJ~gzAwDNQKArfEltoWk z!E%FK>6Ms8ZuA#Ul&0gT>NMIA#knx<8(HvbODeg%g6WmYVaQDZGqvZ?8H-ssbGtwz zA~bOQ;7{yTRx=nUMzd9o3MBK(5a;h)D@3Y{#AiRGsbEnfTkxMGjt};sV@(oxLBUUi z(o{UOYZ@C|Kbv+y82$W@L3XSw-BKQfPYxe|gEr5_QS)UnOTirT+h+3)E$8I3SiC!W z46Qk;LK9_eam$A!+WP(<i*z^uw<FdFlgD|nKF{=NU-cq3>z@z3ADBn+zK&w@<$g4R zdst?gy5YG~`=EQZFUtKe#rg%Qbko<Iw2yQD?vRaaKr46piDPI>Xgsd{HI2+BsM6E! zt8fQ`#9I%~i1!DW@}5{`)!-SLIC_#TrPkkq;KotT<9@l&SLaE%IAcDAH>*%ky&v6( z`X{y;ykdhE{S*gm$U}Y9qKEPs;tPWijCq<smkn*P>gP{k<%eo^VYfW3exD2(J)SUZ zWH3a(S;V9}uCTrTInwusoPp;iOSX<<ac;UCekLtCY#~k0HY8E?H#JyWsldjnsnEw! zL2Tz#?m3o>p_y_+$t-_3&A&5+j&R2M?xqQprEG>{mRg|B$dBUv{Tej#fB~&NKc}i= z#C%e;*#%#X0%5*Im#B9ro?b2b%kF&K#a?nRW>AzD)$Fk$-NWzUw0nPy_O&3HjzV_G zUxLOozhnBxGQmJ<Hu=1m40W2aFu#5v-RMxF;-@}P=j}&7uS??spC`;D-Hq-i8jyjt z3ETKE0r#4;fcks|9D4j98@^{G#Vhne*Cp;QALmY++Fr0HGTNeh=>ZmC1h~BEy(k@e z4o23=v*&Tos>Wa8UcQbqEWzCiUumvjHGAXW{@ps3{a+phUg76w7<Xt!JQMwP-4H`t ztl`CUp1myO`>y+YS?yw9G7g$f!#JPv{T^L9@_7zUEn{%#=0MDHjDaMYLh@g|(Y`U6 z_3<)hdvx{5W$BHoW4F|C{v1Wxzg&rCxIP4@!5N@5E)t`cWl?r?I(BqDV2K~kLw!Ol zMaw$zzIY1e^mit`KHS%v5=Z7SRxF+WCI{>ds2r<w8syXapv<l#!p1uz>4}j9^PI$Y z32x<V!_8Uj?^}C}C@+B;Uq0V0vEaGozIbK&P^Oyh4OUvwY~PyU;@TTlbV=EbdQ)Pc zcCjthDE)T+_t~3@s}BnAwluN3G5kB|9ENI3Zb8(Tw}Pq0X{dK@1lc~eqWOe7&ZD$5 z@vZ-MF-y2AdP~ftGFgAx(=-EoBcHKd-F~R)vy<&^wr0N$E@2BJ?u*;r=%Oa)DwXGI zklz7&%=(#xGEZJI#ffw1;y4LXIXEA?KkHK6n;XoKv-1+1vQhuvH0ttt0DZsA$Bq^a zTxj_fHoYx`6?Yedx920_W_Pu?Ebc$1I$5W3OK1)3!N0Jm!3@Ta$fK0QHEfN|eJ1Hu z$2<1JgmbFf+4HBW@M%pVsPMCFTQzqZ-(15UgifM_hqG}1@ej~gJ`QJP4G<?3oP#&c zn)tiv93)8ZhttWnw5+5LTQ<`jzuTOHQNlaHGHC>p)i9uIdNy?Ng$-sa{AJ%)o`sj+ za>N};*=&ihifyfLg3i-EWMDX6Y#M8f8xxK*g9`%P9<f818*;xYcyR)2+`pL_Z=OvF z6M5$K%pdkBudV8vbuhH&-xu^}6|oqbHaLGi3E$#)Y)&}?pCeYV&aIU$KNAJq^_cG- zIRCVA*0ZX@%Z|*8&uKc=6|tqY0-tNSiwCx)(@LksP~aYmJ=61Wk8T<4J-Hc<^wzQq zBgY8iyw5=9es9c8ImgbJOvBVLPaLmeimmSy$XVM7``*x|_Olk`6di<>`$w?af9=Bf zSN1e*qM>Lzppb2UuZ~GM)5z|)0TrLt$62|Lm}QYQ%V^@PsI8nic-?|?$s%!ZL?A0` z_QHNEW{A=kmx<p-Nnqs%f%^Sh3SnJx!X)W6&?#lfj{ivk<=<!E)A~O!)Gis9@jY$% z_*)RMKL+c+?PL2b-08cY4!!v?o4O`F2jl+Iw7tj~t(=a7!K1OL|MnzH?p!X^Jm)>> zydfk#_?wvYu3iL5Wt{F*z;<715MH|4V?p{1O#H3~57u|Ut;>@sP^lji+s4tBv9YKa zn1*H9QRMOKAFIzl#=5x|WA%YJ)ScEO#J_!6aZEW5Hym2XyOhbKY!nZUM;3|_&U!Fx z?P!=0l8PfvXW{e@H7q1a7IzE|V_($!Q4hr6#tDY_-s%v%u=vDc(!0cmzSGF~)o`p! zbEa&41z}k8Fmmeu9Zal}MaMC6xc4NY!yVrFP1^wNqsl9c*IkAst=*h^qyhU@T4K}@ zTlzY8EN+Xt$bM!Qu}x=#sX$r-KPf&2-@U6~$@%xJ-WV~&teah&l}Wl$`jFA_j~z?1 z75p|ev;L#fX!^HW!8*p2`j6&pg13_@ZNzkHRgM5FSSNm)IUWbR>5E$jtzt`}V=%~E z6OOIC0`3RoaL1Y_Fu{4H_^19kM8^N-eYPGU;UVHxe`{7>l}VSR)aYSIE2~#fq~xmt z8o!vxZ1T8ck~@d0W#0)O2D{+z!tro;`!kSt&O>vZgTl$Rz7%TX%w`{z!oi1(*y8-x z?BLGL?4q!O$!02`OOOb$zvYQu{(~n^r&7?wbMRkzHiG6@QlG?`i1`AA@EpchcYaP! z3P9<Hb15xM9#5MYvNwuSXzZPeDPuk1{68=DXxTM!%UD%9^64<U_~-`wTLs|IrA=3F zo`G_|+w9cFYs_M>JS;xcA3KZo@vP2L7Fj$HhxR$f?$1la(i_f#Y1k4ds8E2Z4)5Un zi8TJ^OJYrmzA(IS0K7SEhUav9q1ci80IaXE^T9ss@q^Fe?Q93OS|^9BRnrl(|FXOp zoP{tV2qoW|;{?U|wB@-!nnd&Nu&Y2jhDeK9+;=!un!6?ra0j8kD*E3Yj<=gF1VcG5 ze0X3q-v6sehWst?uCN=fSvq0qp<m+0oiS|UGZ|d}_#|^M*T={hb;vm8LFKa3F(@|? zu{jJ6KM-M{%Oh}|tSYS3)S<Z(FIC0Oo+&yVek5F|(<jS^acrRh&;G6Q!>`Xx=>E7~ zp?*V)pxCgHd6%68_bv;Vvg(&`|6ChPEW844oEdKZ;UVOO?1k}FN|eqSLMP&NP%lps zBj3~txvk1<2KQn2TON$vsk%7A*O5%7jD%Hv$FbH51)O#&lclDvV!!@a(%$XBzF%Ak zUTx{r^3x9cH`GEASW`F86NWfHU{4aPY5u}#<orZ|)r@?^f@ez5#}k|z^6`wAn0tUV z4{sCy>xsu}Pm}45WCX?@nnKH0rjXW-CoZ!WjDx@*-7F@^gF1?2;K|k^-r<nMRoX*9 zHB|<;N?j38Dz6ZWs_g{R`SL9K+ZJ)3QUI3EyAO{>XyLufX0%Yq#AH4@-G6aE9QfA7 zru8{1zLt*=SC5cIxA&GXS^F;-cUa){Q?9t|v>9x6E`_!EvxLa6Cg9lDihGBr!|Rn& zE<JzR*-hsGXj5Q~%7<OaF;7(tSNsg!rZcI*I1;-*z7z7C6)0q_G+Lj`<Qc?^tj%G) zFlgIxaQLT*d9VfI3T`twNj01%H5{kkcNVIB?g@dvRABV(sa2QSvS{laN1WjmiA_6) z3PGpN2}X&LSm8Dqp~M3|TgO7Tfj2s==|=~PYT@|+ZHk|E6!txG#!WR<Q0neaRd|Fo zz<et0>1OW3Z1I7m4_%xXN^i$J6F=Pzqn@GaICWPH)~x1!qVIeM+98WIAD=_oPdS=8 zD3kU%ao6$YE>_GPGo+@9Uq_r3>w|nive~I>RL^2&H(rrm$vZGLlRhvflQS#3m$9%J zKb>@ybzM$eY!Pl&d}5uQ&&7(>Js_&^{Bn0DetV-t|DEu~w0U1y_LgiRkdi3#>^nB5 zr3O}1hmgOOBmG%Fh82m=U~sA>E|gNi58)1U<#(ePy>$Q`)cX%sl?|h@j-Eon5G`of zF_q@DI^myN?qsABAV%iCW+~fd;@+sM5MShile+jEquw3X-QCLqD{RTL-$c>9_!ZyD zzGf|j+`GF<gFYWn!GAN)z`i4H^yGaU`A<%VV@^{^X5L_G*=U4HTAV*=)WCjg=uyeT z6q+?(kGATL!q;gw=+x)4_^fm;&T<<MKYw&omTXKxg-R`PS)~eA-%p?)n@rHBtPQq> zy5gnhI;5U&jHxS3;9^E2yQp3uKK(om>>N}`%J)7f9f-s!O<%+hj;UzTZ#r8tMvBHt zab{%eb>Jghc7^l&C%X;CBl()Bxtg=RDwDBjeGWc-mMebLED^L9spCM${xsTS6Ijg? zsHJN+OG{oO#44m<`>*w2E|Y@$CzEjHqZBSQ9Zkwd+=Pa-NJ?%p!=PouTuPV%=ypse zyO>hZ=2|G{r&X{c{JrM7r-*IucNGrCT9Q?f4ZQNS#P=pTuuIw%=Qao9mI4DT+O3Ls zPZhgWCJJ}r$79n|k+nH87MG`iDZk?d`?t#Yg8P|*XJq2zr+dJq%Z2aC{PD$(UF`l% zMRfSI1^SiS<CcG=Rh*5<Mh;1*!KXIDhIi#KHFq9HkC8;7c(i!_$PJ-=feE`|-3k&f zGH7MP8Fr}J4D+gAh<){jK-si?%(0H&;U~NXlT|@DI=BS(pQwPouNv6x7FCQ6o{1Bx z3SjQbZ{n_j`{2hdbDX(+Jk$PhnyowsG$Fx`*4qx{2k)_}W5wR|acd<c@~qeM{65sF z6GqipS}3ug3ue5S!nShmvf2zwdaf?bGT#~@q?R+g?W$;S{2AOjs*Vd)hvEvK#q4hH zbvTuHiMgyWWQNI?nCz}pdOLLvU7ymI&+mp)oSHn{ZXSi5p+;0JIU0q6RQ$~M96Gj- z;ccWD?7Q)cWoRbB=rfZr`+hX*RBm#y`8Wb2sz!sw`ZGensITnakPsR@ikp%joP%qn z*O);7_h=qcC%YqaaQeJd+OXRTRZq_Ztu^1--2RPKQCcE1kx3NiDHvm;LK#bcx}LcQ z^0!mKILxhIR_SW-kHrMep^by0Fn1l#Z*<=Sm(4P`F02-|ob<u4YwMw6Sq}D@Ah4$J ze=x|+mC{~~aY>b42QpWzDREIb=-n$~8()hM^i2{2<2Eqc<q52r?_G62%);HtyJ4St zH9H|c1T2^A2mJ^G%uyPKg{Vj0^_`iKry4%0I4k;ZXkd2tip9nS5l|dF4=&!}4mtHi z{Nt*DA*GUdS6LDl4&i>rLze8+$wAnyr$E~-TohILj(nr>7&_pZ!$u~JqR_uXodWyy zfqTp2FzDqtI9TLBC092x(?(OV+50zi=5`A6%gph}`ykwII~)&IPZn%>FLt!P1XfHM zf#KJtQkVK_AtTono4*<f7hOJp+wLs9pqYVd3LAwc-XWOlwnLDRC>Fl*H_3kew=n6v zk>DN?!1*pEP<_V^!*z2pGdYwd7)#NRvw1kL-T}3%fznr(LGk)DtmACkB#AkAfvz!0 z$Dvd)u#Ei@3fM4<XnJCuO?9cSoUeU6DSGtLCDSG;lE@2%E9Lvd;n#-J(gl3JJ2M`C z-7$cEs7NIPr%_lI_trcj*7enaGN*9Yf<-=Dd{7A++S=K}#wf938PC)4{8-VV1m^U% zfjbaZ!`DG!=+K*r9>?dQu3jII{25D1=PiViS1l}NW`7KrJrv?@T2tY&uW%&9i}H4L zURstWA^!89%RMUV!L#lf(+l&*eXIQO@_`ZbN$D5ct(GUY{AbI;^eo}ZfWGWUhCLg+ zYn`ZQlY*Iz-`Oo`C+e4WsB(>(Aq=z7fbVTO_;rgE{q3WKQMcEzVHLL_=Vu<?)gFO% zFO=B7T?<G~KN$Agj3e{Sa@2NUICnuWVpsdxW83FXEI2k0-^piD%W7#*+Ym}26R$!_ zKtA|)xZ_@ff!LmI&&FRdg{XtW@PJu3eF<DF>K<~%ZxKAF(amR>x@kCdbsf}<I0stO zWN>w$B9-PGf?JC-K=W$}Gg<KxZu>3+-v|+|EFH%guIq%?_m6_aWdXO}o{i<ahwc{3 zc|qqNgHp&AF~@2hyVasbZBO#Bbz?H9ba3vh<P&yrN(xq%Wsy8-qwJ9(+&du!{>IjH zxxWgoZ1<+vaPATulFtSYm;#3f2BO~i<)XCq1M%mlpW?dNb7AqPfi9g^+l6-}+t|pu zGI5og8tR3Nr_rz8z@ShCJpU%0ZZFSe>tY=6<F{a}<L}i%`^)fdyR%E5!OqyuIRKA@ z^Q?ctS@2L_fLDzN(B|%u=%PIXUEi)1*!(f5-8+`7QXSy?tP!|;sRNxjVuqKzm1xx3 zxuoGYj=Ckh$$y4G@pVs`*LDZCwqh3UylIR2of}|>R;|l9Sx2ls=0yX7vgz9`HS}(M z$M%{JC*#pSVf?mulmi`5l0@7Zo`gmhB2aU}P9ZL5CaO&}gN~<<*v6-FRO;;pqKUOo z^<O=l-Vg^<RQ0e>Vmmk&r{X$2CA8gh3YOVri8DVf<T-tjh1K@MN{0BOJp(6;(wq&P zN*d8;MNr7acP2Ts-q)564S6Q!{>y{o!{qRo5$~!+cCv%pXXB5PD_Qn;Yy8*uj<8Eb z9XEX7PUs6mabDU`{MqeF0mdPyv+N^#dv~a4Gx9fZYC0rT_V7%=W=2tov_L)&3$-+H z#E=<qzIP%`9}z?6Zl(%PW-3vi;c>Xx!vl?Uhfz<(X;2LhCcDX@=)~Od($PZrKGD_X zrBVp?%I{(64+fx~-YYg?cA#h>=S345Qqe8O9c;#VxU}7LAmeOvG*eWepK0?+K_#`S zx!px5ydHtA#zw+H$FD9EU;hzbO{rt~+Sggm;}gPSo*zE=aSOXPT$K{r>qP6YU|M0? z$(qlF;NDNZ%wtPC+zxZc*8T2iGJ6W|@<ig(H6i4z)C)HQgIK_Wk#r+d3tzvN!_i9> zX&1kc{1`tOj^4?{34AxZOQDm=x9E|sOEh_?DC67GI9jAN1T!!eUmje`=JzJRweFX& zdQB4ccAL|I(JKT&Y8#Uu<b}mU4uM0~AV`a!gZFmwz1>4E9O<1-oA*VdOW{?qpdy!S zDty?_2Y+DRBzZPkvWQ)Y3#GZbHSF@nK!V12+}1i64_%LhXDt;%S;`d5m)i-Nx?4s4 z0B6{oWx=!4HsJdHG#szjg=1?L(DaTzlqUC6%y06hdqd}w!H*O=@*#yZLKm|sKa|LG ztP|RAe#qa8_r%{#M_FC}p|EuA75J>bK)fzhB}_Pcj*($Ctc?rDzdkeY9?x&;SblTS zInKQ<IuXLtCB~HaVF*|mOkpd1W8t<u{|w(9K$%-pvC6uPjhdJO=|{UomUN!Yh`JBI z`$uBeG7FUI31m{9eE0r@cb}2(44>a;`%2yE@%C|eCa(Z;Z(kC>D)By;>;gP+=?|-f z98?M11@Hdp;LcC#WMB|Smn&w_y#F&R<!e~e*k<?;)d#h&YvL#U+4%Oz1T4{f3;%72 zVu2kG#iO6h(es84Rn)q|rO)NC$=4qT58DMP>kV=2a(-6pt6_Hb1=##ZgzJv+R5UvR z*DDUh+njaUb3Gc2NBE=knhMse=}U($dePQ_7r|Cjm7>4<<4dpg&~$5nV3<)3n{N+b zC*`AEZiV~tGtLkT+dWXYz6UIxXX5_IeN6Fk5ZmuDg<ja4sa)m4zn53Mp*@p32$LVO zhJeRxP1tJgF6HO&Vf89u<{YeFI+aeJvL}u1Qg*f?K+rjBi#A!0tI~}guoZ<q&e5aP zF(&UQMB0oc+lf=L^k6#0-!Xwrzg9z*l?hXNr-u9Glu=<^Rh7Nr0(NDK4u-W)qoxwR z)1DE4_UQpQ<8v%|j_0m4ug`GdWroY;MNZ<9w~NJ!+x@Vjp$8V3k0;A!GqHNeOE|8o zge$L~fZ~ij>}+O*V;Rrmzqcrb=9rz(a(&OG&2L@kl-P$Po+hD!wGEw?$irD90?Dgb znQ~)1vCb%uN|vaQw)$OG>L*X}@4e|v&2W76FCDL1|A2d$ikL4WDf&D$B#!{jyyCmr zEpwCcUGq0)zI+UBUao>^|2fg8=ebpHG>H8wlw?zK74Y%qIOw0Kj=0MZr!-E*IX`)C zcwQj8Y?E8%Jt>^E#AQ+6hkJ!v29s!LSSI~(4!~SGD*7xu$Fkmavf7~=0B<_ssP{(b zIXIq1WaWV{cS6<PXdfD^;RucQs#&zo2-s*)3u%wXqoHpw^*yzKdJ;{r_td@0g4EUQ zM8#wF$L5Z3>Wm%ltb3F4n-<o1*q;?i1mf}7Jj$%&ojAD&8qw|qLA;N!&)&XjUtJ&e z<k=a9>$%XvwjAbIcZvrpDq+5dBV7FNrC9&@61#aemyQ+HGH-=w+|w9`3tIii!FMLD zGB^mA%zdag`8qp%ZW@h@8bj-*KWD$se-NVro-oN1x!7c2j<zRHLrke5l*ujv<(rc1 zYPbU0TpmduXX`@lvJaJM(MLf6t?1W7M||a(3RbOsale8IW%0~jtJ(p!hiBP3lfv27 zGHVjkKCrhPiX`=hvv00Vq$at1)@VN-YxO6QmBA3qn*5Pn4U<5PlS62Y&ku2&-AA@1 zUkYvCWnoTcKQjBRk3Fd&!oM>SRJhQQJbnyB?bL7Vzh5J8%H=OrHnAm8X?_(J<nTF$ zk}VyyRYmirZ7gMQ4CWajovc44{+y_Y2~Y!1YF4wG9XFZH8AW;=CyVogqA9IzGuY3Q zr0Dt`?Bmw}B%YrOS>}LnUSz*4&1o5TkgeOLjooGjG&cF9cv|XC<=~CgqNZaos=)+0 z7@|&BwRf}cyfZg<iX?4a{*?7O9YLB+hR%PAjo3|%R>6MDDxq6RmIi$^L(kT|Z0qA5 zzK1SkwH6YXllTgLzL|~wJNYj7h7T=#k%>CUXWiGv($;$_E~hSs(<}Z4da^8;jEn+> z->Lp+yLT=PP>6y2Yg*V5HB6Wh8t$yyCy0z1&NI!0`OLp^GuyQB1I(n~kUn~~csC}M z?HFUh?ibF+b>IAG$<YW*i`dCN6&)8Fmk-90(3Yw&7eCrO<q?}cGz*pY01TS&iqXb; zm>gn-k=>uG3d)~CMd`CDrzivV=e<AXao*n!n_`x}Nd@i=^uVjWjcksYIjvtB3PVrt z7IWP*G0EN?y`zPyW<?u%moWsq-8O<&Ll9Zq=j=<~)6&`LMW>!kgjTZ^!j!u!K`HWt zh)3?TKS$Px-wl;eGv7s+tvwHAe2nmjuMrFFErV^(?z%+sy;7{~2^P=uQ_p8|){NbB zDi7rSldIWiT)mU+ksgWO?>@5ZhRIdCVx9|M&T5Mt=cO>`qCS}qI?Zmx`P1ygAL68M zZnTOm6$e|kvS%$1+1<!>>{%7(;T7Lw(lc&}LQ5R_jCEwHGMbo^-pM{);hDZ^a)RE- zHn#Qd6;WbM7^#<>6Cz|=g@E|E7~&cPoon*pca$jV@E)whwFPMXr~uS{YT|jlO`x*b zgf>5FfiE5_#oh_pwC(O}d>tf58eIk~Wr#N2($ru*51S!xH{Ufk4#a1?YqFukhUw%_ zq4)aB*sZ?b*i!D9Rb82i!~2;~;@Df@c78lvDe8h(nbu?(6)$XiT_84SG{cem{n6Y| z5#<K1W9c(DLR8~XFuuC6s?XUU&IemmVUKxVnwD22WH)KD;qOOdy{{^r3BAD*X*i|p z#p8mc-IbCBZ{S2~4h@Q~<u3Jni1hKr&&3K*7nwqG5yzk;Y!OSb+ry@|9c2eDNTSiP z+3ZA=F(fn<!H=@FELbNSF1@%@@nY*xhHYhV<H-}a`6mlY`etCMc^C7X8-VI9f#kN~ zHPnt9i)(thSFkjk+9wylrUT>XX1ph<8>!KrHWS+aQxf$wEKos58=v0LA+tBnMU9Id zxVA2k;yJS~u_=y<o6KBRok$W35`5@xh$`Q6hf(9(@fhjIbCAEx@pai-;X_*_$`lWx zHiO5oWo8OGtc)S6E=hW(6;FYZ`e?7DfM0IULyI_PTs+5^`WB8N9VG*Jc>GM&-IBqy zV%;5=XXbNYb#Jloyg7`%6i3p^EjOX=WD7IZbj7;vJhIEu1Esb;bk68FthMTgg+{KZ z|NAWrFPKX6UzbDX&K}lBc5~G}+a+w|5f$!)y)CTQAow&Q1(&ED0>!D(?8t&Z^lq$T z14kv$mqzX{8J^C}xszN=_X{lf{ugGymmw_nqleN>Y|BPN)N5~pqccMB=$2xkXh|3y z{rgy~;hCl>FEhyPbpqZ`{R4JGeem_-s>+=w%}}4SIWIQc<lX)sE_=@C;mLohwCF<? zdTTvqe&006XwGHk*QCJ;uU}&y)nm!l#!uApI1U=FAK3BS>9|i}Ed5yLOyPHvnf)(K za;>w4z<Ulbu|b|bs?5enxdP^F;6y6C6Fb=8BYfIzjyHEZBiJ>wC)!z9<!KI0kK?GS zHWhPT8)3d*KHJY{m?xuLp+&C+`foHMrHl7i=eAeyU3MD({p&+;b`blU*e!~lNQ3V7 zrG8-ntnGmk+1KRY<9FILV$3>WLqaHf>mbJ#W|y;x7){zyxfr_L4@b`wndbKw>`p1t zxPm4))43MXhTao~&yQy7gtf4}@hJp9l7V|t^U%#Cl3B!QV8_z{I_r8FHfhIEwnYNk zZ<0Yt-t)G%vW3^vRheSxO>h;z2!dM`d-mBAqwLnREzVb<A+btaF?R@5e9gg*YeTRn zqn0K6EhN)3Dm1`;Fe!EyS8ab;a<TG+3~hR{h_w$?qTOHnfWzRK?D6Mq5EW#Lk2?CZ z1N&dG_cu1MpTR{e&ApCYX*8i&T~kz8%v}{LCR4|oA~>@znC5>=B$KUH!cv_Ke6@cH z{eANp4p|yw(%I{x-ux7tW1L2P_s&Im6DO=#`3@X*nlZ;S<>HZ|2^8%njoK<h!AX}p zO!S9fyE4xUU0{M%d|&ui^OJq~6GV$w{)HXClt`Ezisds7!PeeEnA4&o-d((lecfe_ zW2Kft!i-!JD=LMTzr4`HIvREa_GL=@ZC%O(Mlp?o4mM`%Qg+Yp4!hYHNBc)|kMpYO zP^6lG7i=GZ{ufvN?=8fK`-%k@zv;N<zgN)L&J13f1X5RA8_3M&@4&!8G?H`ehhO!= z)D1f!cGGOSZ#tAbtimZah-W|Q-MPOo4qtnzlIy()7`0UzPb90+G8YHjTbKk_?wjD= zvx?NM3uroZ7~Tv#1FKaQ!;`YUY~ckbNbfYo_Q?_SO;wRJ_8w=Q`R~97N0BAJch0tI z7n+pDqNAf3CfC@|+1(1T6US27=M++C;@pfwWsvos4$k`T3iJ+{N~OIO%;8A{ZcTB= z7t1D-&+7FoSIb;Hdnpwa7mcR+55746X&T;Ik<T(J?dbTMnOLc5i5JB}I2%?8UP}|I zR+Sslm}C91>EA^V6eg1S_W;T}`-Sl&li;Y;#74*DP><tP=JPQaZ#1{Eq2_Ml?<6%W z37-islOou_=rla`R*H->hSP8D5In6sj<#`UZp|}KI=1T#+hlYQp5<neWsD^m`YB^W zel0Y0<nbIG&ocdVhm-wouzzVUg&|9`sa#?zcJB?M<EP&+txfZAdj1f~IQ<hc`MY|# zSsbqIZh<YcGimTV9r_a-%YyWe3&YQ6Vg7It=J4OWS8v>)g(O(O0ZB}4zXUzS%UE6W zOI9+~h`uX-XS1}Gu}}3{D0?7K^=vhI#9e7P=>*fB?^QV>NCiXt2GPFN2Oz4$jjUEx zfu-6+sLgg2=G|E&9G03zn>r?u&T~Cnt~VHm{vAZ^27M^<c{aI+C{tl!B|PaFgR8Bd z3Mw<Uvx^GT&?L#8uI$OAuE*)9ddL~e`kT|c!=YH_8$_G;E@kJ(ZDt<^G(gLoTJ}cW z38$>s4tZ_-93Ec-zE_`tbIJ(peCo!EmP|oOiO1aK7DMwJCegmbr`VA*jm~o~=0euR zK6r8GA;I_6B-RtOf+-!@AWZ7`0LP_I3&V1PQK9W63;wf)?NQgq--d3&yYg(9Y(Ei8 zzH(Q0(lpAx%J(GSN7GsPzSKD-4RUTgWBVuUVGo9d;@+ED)Ds%OCLDdj9v0`K%ws3k z`R@`euJ2#*qt4Lfpiv2&z9frv2{SNQ=R$>!S31h|IRR<UI4`}Yij6YqLuwK^@b-H- zcqAv2FOJ0Yn|gS^W~lILW*Aky{tvveU$S&O!wSW-tk<TVy}P;q2RrLiXZ%1szU>|x z*x`t8Y!}ej57xB0a5y!usAiI#1F6=o2CgLa!pzW%EW2wUH4DLHu|uoMjAyQ;w2X)b zWYOD0&eSkp1-I0UVTaxqu=R;ics^E!DwPu0mke2|@N%Y4=bPajXR&E5R>I;vA3%`G zBagKsF!tj@cIl%flPDO7ttXpc{$)efy+DEvt~F(c*56_6zbxs+C4IJ5w*~U<O=q`C zRb7sa;ePBHZy+hpkgi82(c;?c&MiYeuon|%QEy+M6Wo2atMCy_nCC_Zt?V$ThOxC) z7uf;R;k3(FnwpPrCj8FV;<8`E@Z-@95EF$}?MHmsJ~9!`4N{|C|3Ua@Uo2@`FqgT@ z4zl~Z&u}-wWG0#MiK)C#BgNAK&Ns?pYvf+=4$^wq{*Lph<uYl>b8pmb&7hz$Ge}KK zky7=1$-Bk`Yd)W2R`rr}#>WFwk0{ZX5l5M#Qwtm)*TDS8O(kVNdsMIB*?I+UlFXb( z<921R)MOJpf8PiH@mxnw^h}s?%N|vh+tb0sS=hY}u#b%f8ka4HRLj{kd|fm<Toa1E zr!;8B#62#F(hr35&Od~}kb0)KGa04pA2Qc5OW>U5Vfc3OJ3Bn-qtN8#NWFZY=vg)r zQxZZU<dQQQDtw1?<=WJ=LWPPlHo&Ye9`rdqg^KQuq|5)A(#@NXp}MCR#;%x8iMzwm z{6;nz^fU{fLQK%?8iQ8O;GC$GP6LN-g!U8ng}6u7Xg5TLX*OihB_nByFC2)LJr<;) z)CUv%YuK>C1@JLQpp4IxY2NO|;ye8s@!^6H&dOg0S;5;_{lI$I_PGU$r2|>^x)^$O zJsu<T4~X|Sr&rDU&kC2iC*wlBlS0$B0vCmc46OY(3d*MA$e?c#3%Al^(>J8!+=Tfw zea>{sJ$@W!JFgO3^pA*_GjwQ_Z8ePTa>KCyxO@9W0J|3CkBJ`ota-D@tTtV&>M;yw zUuT!YvcKg}(%zq{eMjN-f>g-7`wlKgN>Jnh9okgpPE%7H*pW0B&IDO0CiuT*nkUY) zdDA|yB}p5FwH{x^lD7-#$^$7}$+@MK$6Lj+SQ#|jnnlV9%Is~@Xu7xi2Ec#ZrSVHu z+|3zyvwj?cV!;j1L^@H*GFP_miUYMu-4+cZOCaRKK9+OC56Ad<f~?<YvhJCPXZl)l zFF`Kuu3rM4&&#3V`F8ldLV{$wezUd5X1n;>aFxQuY;xdv^fgl4fwp=G7JaqHslU#! zM;4<gD_0%&&aVMQo~O#$Z_C<e?P5Mdr{U0Bg)ES>TFeSd*iOzp&h#9?&NRMae^z9O z$3FJKyX&W5&%U#OGFyZkCr>hEE)>36i_|_1!1Xm_>3Z-3_R{PrTlIY}>j?0t)Uhhq z77&ONZiJ$@oGQ&b)5*RRM3D?|_h;!M5k_5LwrL)8u%->VbaQCS1AW@u@D;u#oaH{> zMNG4J9)=FM16JqLv7YmNe>ih~O1UB4a=6Q?I*zc1D<hGrREQneQEA6ELr9Vn)=kQw z+}h3Tr>#Avl}$i19X)WmIFFw8HK&8q6LGU?A(ZX826Id$sB@Y#%?xj6r@i!8Ty8Aq z@PB27E1lTlfb$HRzB2D7cTBi*NEm*Q&n(A<P}P7qY?WzYduOc|7ggP5{nKBG?RL2o zyJ9)}+Ak0#ey@Vzb61Hfy8GD6lMO6xKBC(beY&uA7TP}H^R{(|#c7^y_=<B%@9g&i z&rAuH;JRE4erJJ7Q?<oAez&3i>?kS-Y=jZXPgqF(0*cOiDjsd>$DMoQnd890tRpZI zBPUIvMl%=68ySb^rgbr`*K*jWG@otRvyl2p8`0q1X%xL$f$rZ9#daoz<(oNdC1o`@ zde(rn#0i&0@Ab&M#~-Kflf_}%?toumIdj>XgBBW9;{Ib*%%<s-XpmJPmh|(da$6nT z{cj?k{vMAnYGx9Xn?k2X9$~+9_OoSo`oT=+6k6eaR+I|KrCXKG_|kL{8&oCW3*TvY zRbCYG@4o^oi#RM>?|@M`i$GYINAHvk@!P0B;Ie-fK3ZUhDsLt-pRKD{<`PLd^!yLI z^o`F3GCs4r2g_Jr-bH*LJAy16M^i5UU5wG5MFmo7ki+M!kGB72eVnz~{n9b$zABRX zG9UV_mQ4$;O~qf11EFYwIc4b%!3`^G*ma&4I&pIp+!A-Q@vpP6X4fzFdX1JaX1)v! zVP5pW=@i?(I{=@pmuA0ObJ5#*GmPIJOLZ2LFs?EWe)}ha|HoN4#b}XBXhmPz8q70M z_y34vVz04zLq9Xcj%=*s_vJR@X#hS`u;uj)c(`Q}&D!Nbd%9Hdp?NO7?YIxUR@Lmw zs~C3m-Bb)u1nl%)$-4EqW2?}aCY1B<#W-DhQ?3CcejgOqKbwpfB)rIH&1{?#t4t@i zj02bKtE|v-07W<a5CpC5!txF)w&(b0y430l*H``!7iceoJzDqSja3~i-RgpKM#_-k zC}q5n96)QgtK(m3eRyW{mYwWxhb4Za;YocCsc#f;^jH-zHnAp|)8lCB>_O<Is*8Oe zM&Q}rT+E3}gmH(mX`22GVaE66Y(a!1?zfyn%VcxWZ)q&5J<f#n<F2s}8&<m<{4)xF zK6}Fa{8kH6PNC$vO&ayT@Xx-64q5JuMepGWD9B`?ffdg;R+-bbzZq1ZI1L}|&}LDS z4u}Je-xaPMai=HG<mj)5lsKdM7yP|Cmu|Eh(%n*j^4TO$>0u@CbL>v`P&tg|<rpz} z<3?!xt3Z!KH@Xy*K4w45M$q70s-QV#1b*BWgHOllK(I*(vmC^CZ(rQ-(x_xy^+X#F zuo%cvG^KrGq%rBt6buX)jJ@mloxIbShCfingC<Ehi)W#Z8CF7iw5c#Hx1Wp5ry4=0 z_LI|wjBD_470(@AxWNpLnz8+6gQzJ4uwgZ4q4C|CbCe!?mA#B5ukVHU*8+Q<_6Fkn z9|OpJ09RkgVbtL+?sXG{8PgouriTrz>}!?F;(3-Z*wKO}J~gL(Hs7Fje*(FzXk|AS zSkVGqB)e`)#s+i$F!$|Uu8u=7&4N1Kb+MIl4?xD)8?!BTit;A;qNaZgeaxx^xv*xY zr7)hn`u=9$c34scXJ|Br-hoL|`+)TO9v97Yp2f>76KuQoiTmGQf!lTol-Q!d`tK_i ztJmtf%(xjzfjAG9%gew^szj)HIF0r$;7p@2x)@zK2Tn&x(9R;!`A4re&bmE@=7k?) z*LlY)ejx9SD&J=PHmqPz_<3M|ObIele5y`o^+&ZtEi_pri-RuThUrI>Vbx2{a@t^v z5)&OLKQWfV9AxQ1r>EHFk<P4K!m#eXIwmghpkxzM^4Hr6*_=r{XS^$>doK}M{E78y zOW>n=XDm7CB0ku_=SUA;ii;$&{zuVy$7A_^aXf|0WR;askr^tLxX+P@C@D&4QMLxM zBB^LE(%z*#w6wU-X-j*jXb)d$la}~hzrXt@&+B>a`?}8geBN(fdo+$66i18wiy8NM z=L9|%cTmfc3GnH{7Fu{Eo0k@x5&6C?a{szLw2#{It)~8vt2mm&?i--<k!ao`y54(p z+i{hZJ0^VY0n1JAk<OI_ek<-u@8^oXLS1K`ZT6mudb!}-RBM*MU6;p2yah8Ccb=7Z z38s89K(**@ye9VteIF_EOs1x|Qp=MUpDl!{amd}5D5LFhJ9w^8OA5n+amT$s^ja{< zF7KVi!zy>cR`ovE688yIiuSqh`ml;7wIpGyuQE!89npAHEH5#$M(x|e*_+o*N)g_k zB_$L1^~mGY9VE`(u>mXx7eLbWYRbE>&ih8!OCepv^GVYJ=jiD3o4cvp`c@H!F7n0v zvTnHHpfMKDH{isWY&<OT3bwblK-K+Qlxbmr4=s~f(z4=|50;=VoN&Ju7l4XxF+9eb z^jJs2RUJm~sTYILDyfn>v`rxGH3#9<{xKY~Ie@yxRFl$v@p%*{<K^lEY|L?F)&14< z^n$&#=k*l6)b2fb#$1DLSDc`i$}3nz?RahPai~=kC7)Lvj+@GJ;k8azoVai*{&_MR zys~G|<ICFow6iW9@Caa~TfyKktvybgmo2+R9HKFvX3~W3$>_W51Z5V9e8lT!S@l9Z z9u#xD)-w`sZMDYb4~Gh0XkT0~;3my;vPR>560cM2!(ojlY2Lkxa#P0-baVDKSTQya z>~ALU^3Ka?^NkFAoIi~Jea%7T0HnqkBebyC07KWVqtD)@w0!AoJYc&T25eUl9=qrA zg3&+3zqxYPpTV@!bq{$gOooenDqzm+G<Y>&2A8Xxq`9FYt6buWUz*yW`v`B;I4b5P z+kNG0-6gg-ZNtOA`=C=$Fxri+0t204(7T`)9SxsP7bXmcj~Xic(lrn6uK6Gz&~2q% ze+Bb&suG;OGElUi_sYh3;nLQxf23P!?P<yH3K&wD2#y`=X=c4yxt3WPwb|f;mlg@W z%T|$(Yio~FcT8cU&LhjyXW4VUZY!vnrQ?74#T0DN8J_zYP>Z<3g*V25=a!zZcjs_c zjmcp9UxE|Xei+0@J&?P7-AEZ3ig@_PQ`lwJ4K{om22Y1Xp`E@Xo4Txpm!|4`Ft#Tj zcZruC9Z=>zSGPl(>z!F+uZ<L?`AO=tyese9)=WFr7ee8vJbLBwhHekl;{02W;M&k} z+}z^9se&IlGj0hi{xgdQZB52gw@2Wju2CF0Pj-K>wGzUgC!zC)W$rg?jCi}f;DU5( zi#b6SXs77JhvV$&bQe|b*I_IV%en=M3qI1`nH#Cg&*^yLnW<cI?;(|~%?F*R5^A;a z<awPplag=>K5os25wE9l+iz9$;MH1k``ZIYMM&jib05IkEeGZ71@(e!^NlJD^zqmH z?ogDWiz`+Q!NN6GxH4=IS{xjRm+Y<yo~$N^RokH7)4t#mo6biTDsY8qAgSz+XRBd` ze4|?&792bWZ^XSd|3Dw?)6GUwZivSh*UTxdu|2Ncpen8YZ!j-*TmiccM(`8&KpbqK z!-r1U3QtBSzVcTIPpR3`uF6o1D-(RezhR7eBXKyl$Aj8u<>BYe_&*&JR`M;U@>etP zPQ)?Ni_Iqa_E5Z8vRHn;UF-_H@}%Ek<6wH!UE2Qf01Tbs$E!VhoZZ~go<IC;h8D+> zkbNkE;yXq0W-|#lwshh2H|rt($qekje<Pf;6g&ox&*il`Y1mk3%bnIa^X|qF3U};I z->bUIZ`JhphD9h2%e^5Te6P(-$#MK(mpz7Std*~N_25~HgD^*AT9QURlWh`2x9gh^ zy^a~oxa|-1I<ZA`0xF<y$rISCo=Z))+jB^rD=y9LCdKzGpfzIeeB05BK6k$$)g|h2 zC#gF(Z`I|#g5mS$`EZWW+D6M0iwHH4@7x!8X+6>FJUkN4k5|IA&4aM>*8aS%K?$N< zJf*FQ6Zq%vavFQVm$rNyje1*Mx!HCY<xdE9kBU>^)3*o0*WwED(nQ{OL&EtE8{xc? zE52Mk3BTD$xM*D;aQ0dN9=*l6b8<DrJEZV^O?3`(na$gFIzwpw05mWl`g_h5wWdqB z#PWxf{-!`y9O=dDUwh+o4Rt<i`-)zfpQ5TUr(oIpLRt}{#sORX`E}M=`t7os+Si5Q zm#{FtQ!oH6S1o~pp;2<XE53Ylo--JHPzGLPh~aP3d3uQv*-Y`3V@gKx#7Q~4cxex8 znA(rESLnmNvOv_alX(5mj?`p72w7xVKkg3X2kKK%@7s0Y)=<1&{0c7RL}ABJV_fi7 z59jWyBp0=-;Ir8abN?KH4W6a2X1*$>{?`#7-}1-DgSW_!4mhFXA_qJgI#h0JIFL^i zc=5>)D?X$?9J^Vqp#ABi1-7+<9$p^BX4UV(r_^1{uIAI~+tGadsxwW!It^pr#=(Z} z&iG&Yd&)gKkcyQjK+c>3a7jacK(kQUq7FVpdW!dy2IoGo<9^KqyA_9FfPx05*a{w= z@lRT=s)K3Uv&l>8FigwJ#)lt=p!)tPuzk7%ee5wEUQUbP0dJ$BWw4{vtgMRue;-l( z_bbw)aXS2JqZwaV63;tkO~Z|5$Kjj*FfO-3{(brmU5MHP?K{tf*5;WUeqkGEI_!}( z^1s6S{k9^<(-+TOjfduhA-v{^8#J~Lr3D}JXn4X<*<RsIdB>o<v-e6(c;F)&Y`6c4 zTzgUn^~>rY%D@Np|M_51kq@6&3+9;JnR4xtH>Ce_Fn(Us4n2yZuydO{X}EC3#GguK zqma!oY<SUG)HQ|J$qiuEajN{=F<p8;_76PMHsmcv#k4?WgRHzXkw3Rs;*iUm=%~vR zi15uI?J0ktZ2T5DDp*M>#vA1RuhsDT{#^N<c@n*k>@K<J1@OVTsVEI_$1hKQ0C*0O zf8;gMkv_YnA-6WbuSOj_F+UygL8>?t$79LvY)sp;1Fq)RQT2y-u30;RSLvvu_9{mX zEbD{OiuzcxKc1e5{%TT55PVjRW%Zg~93XbZ7grcT`jMH~?A?iL){E?^ttoEn+z03F z>O{w##Lj-&J@<kmMyNA)1+D8;>8`yxgn#+YK=&SQSUmU(ta6T%c;N`lopK9)?iz-U z31iS^feYVMi^mS9hp<s`06(~_N-h@PiT)_?sK4jr6PDR<DYjC$5`+2T%}3B<gC{9> zKL-nzI<fJuM9vZ4i%X6YHiUl#tLHn&y1yY;S$lC^&|Uf9wFJ>Sa7IYZ!nCCpvWZ0w z58pNkTi!YF;KWbx%PooVP&;TT55xw$PPp@v;HJM8zP_*~$u2#RjPDLat5ZQRd3ho1 zdZWzOYc#Q9+IYNJ`kbsrd*aXu!g?HRfuH8i<OszMeDPFI?j7&Q>7zn$)ifV=c%H~( z%5~sXURTt$86cf{c@p0I3q;)=lh8YP6NHuam5ZHY_~Dv~Y%83-MU{K#cdj+PYaU0c z#mNx)XCV8C-mIp1C~8kk!!FV<h`l;fQol1BO=iB9+Vo4r=a;<r_<~HFQM-bM^^KwF zk@-;7L%593?k6+V-%_B#J}6XvPE*F}g0c5>%&0yhXB~9q<n`^zKh+Aibuq&8`9EoR zqY9_3x8T$xrl_ct!OzcSu%x_>-USQK)MLPB=`;D}3(-rN&<sJD3#j)MKdiKJ;WHOw zx$)dGDqmoW-<~;OmO^)oP?!jl=8xx9pPf14p@*FFZXaA#Q{sE^_T12rz$z!h`R%V5 zJo{U4)Uxz>^{WO_vAjzLj}k<G<}o$Rj^ZZ~ouRtT4Y;BD4BCceqEVowJglXazGM`V z!Xe;EI}d?|Ycg+l6JCjJvp{$IKvX>^m>FH3!f-plqw#;Ffp7lOp-;W}bm~CDV9`b2 zs);ugoZ#T01hgBlojwL9;kh02DYn@OHy`N0E4EeBqm@Sd<IOm3J5d`OcX-N!BF5mX z0}5RJ=nxIP_L6=JM!JouU=S=z!@RxIp~}>i{qu^!{aP9;-Oj{SW8AR*oiQJ9*+rTU zZn-DK-iF#2o2hrPKd;p9LN9jpmG+;oFK^ru!VV&nxBbOY>b@cq-;|5u{Q77-6`qPW z)t14{*O|~7cn>tTT`#}B?4Denl+BL!u2E${JK7bYh6=O0%l~?3OGgSc;dT62`gTc= zJI=78t>Knf<~@p)zfZ@WGoHc8?`9C7ZOq5qC*zSq6Rztqg`I4Bp?$?3_spdu_{#P1 z7~0lE3K*4vMIFR0H!O<xXy1pRd0WZGP#3k1#z40edvcYVDXq(BKDj9Z!rMjQ)^TEP zTP)5znd|7<vn5awV#))&-h+itN3K7k1fh>DaAdtdZ@PIAs`~;y>NuL|xg+h+SRs4V z59T(tbL2X87i@~xV&$uGJmrC#m^D{P`!mMjw38R0T3zJ9KMaw5S6nE+GIlVo>ivsM zJcX}h*738G)pMw3UlQw0_r$r971b7s&co9-Xxh&JuRe8PE6*QL5$BISIW{<B@nxE$ zq=NSYG+6a<jF@`@c319>T_?++QP37wMF<vJ`_*8+A%lL6^aJxZUHGbzHtVzz96N{i z)OV^E#->>F5B0^=Q&$6*H=dw(@d|vy><$gR*9?}2A~AQv9C_yEENt(rB0S?6v}3in z2Sr9>^`aPl68cN}T_k*B*LAShj}Y$p@FtX=n1ObukHXUESbifo?ptSyeGmz@wA*Fr z(54vLutAj`_S*|*$4ul%x3;)!p(Xz^GUK!-+u;2Bzc4d=D6Lv2(b(ifs9HFO-pMk} z6&ZzcF{g3Os+D>g7MCY({0eK|mw~}WZ`{7klG`m=CA|&41y7F&zG|*6+lNoX$Rms4 za9SGgco2_19!{{fE*;l;IdJk=#IJs7IPH-KM^+v>+xb%p>K`2kzmDni)=49IkJCY^ z!;>N0tsr04blph`t~q!ucZrnrRS9<u5%~x25tOcAk8M0dF?Cr1Ii!p4p2h&$y-ak` zG`&z~tO}0J_zDqE_rRC@AiS@=00O67mvU--G4EV$dD<ICZ0^<wJM?sftMD3_4L0EJ zNlKjIJDoc&?u>30YUt(V%|6y*?pME??u(aw({2a4__G~{xE8{Ywkfo0mmfDCu5j0C zalzuh56QL5FWD^NJ}Fs?cd^J)&C%Tp$CirUTUZ6`EBC?z^(XLQZWNm>_2QMg?c}{5 z4Eaf|2QI3O!{6?s*<ihJ9)9(}tVNbQQ@m2(@&rD9e?EL`?ZNh*PPD)yox9BZLskQ9 zd41PIP@vw1r;Uzxzmzi&Kk1Z{=ceV*E6p5U8`)s)Z*@Gi_y{d&bYf_U!+*~PaW~lu za`K}8&o~f$flS{1#hNR!u9HroaGr*Z=WQc?(LlEeTzPy3_6!<`g(uB$`|M`wpXDO? zy!s@aPJReY&G}$BsaA^JI|bI+Jd?T_P3Fi%8+?CsJ}hq?#UIQKv2Nu(iq7xDKfYzd zQgvqzUm#I)tvgnBdrx*bqcB|6kH;r1l3q{MlMlah#Px^FQEnH+8#0q<sn!H6zdi?i z-X-(tai!Gh+EvLS=%AGMv<J%}$7SuUJ?NW5e@uPRiTv*#D4(3OlzdZ@aM0uTvR=e+ z9C4%uJazP;#?OPh>dl7^&+5oYIDND)55g7IS7@+tEMAVCfd3x$f$d@swnb?+k9<Fg zvfX|8mbn>e^u9x%R?nqAv$|uumIi7#V1Q>WzEHO4qiz&E$=G@~+7M!dbCrkk%fDHI zb#;%z^)&h4tRQrE?nJq_!{E)7CU>K2(b!F^kMIGBJ)f9^+w^Oo+qS8=?Yx-(O?yX0 z*UM?yhN-C3(-pmsuaJT>Us7qPCibeBN*?!~(eg!a;j-YFdzSWtk8Z>8=dsb`=`<PR z2CbG#^o{wNst0C@v)0u0HWb&jq1^0oA}9C@ww$dol}_!4Hus!xQk4PUeJQxig3*{C z`o63yTJWn!S@_Ll4a}eG&y_BMp<-)EyK4wm3AXx&rjO+pQ>VZgpJ@6xe=L<@U(B6u zh;E@3kaaFZn%K!4=lqUkzrka9Oz>LBJ#i9bV{7*Rtt5xcFM&yss%R4#0B6*@^Vdrq zS>uNT&s|}UYc?9=pNT_ZpT%Mbk5=GYw*5Knx;iFD|37=tD)kOC!h}6@p!V`SSe?}t zH7efFPsc83`$A+Qm;3RzWLHubS*g{1EQqT*@w$VGSY0@u;xjTis!lj9L{6d4K6@IQ zC7!#j%JipwBBxE!=g)8YK<B1p-h4EHjXs)TR|nBGuPlWodllSfJWF)$8J0=<^f9p% zo}P)r*yp`j(U0lEf0gjZqCy&0Al`M=3H+y=D0Z!c8o5?{?pG{6w;h4=3smvON0ICO zG8sdC+sI7;4&@&->%gzA8P)FHAemejGu4Zp{ApWf%orYo?Y;za!9Gp=zIO{9Y#T?l z<um!%<ug+5=1o+gFZP}qE2Y_`)99mZ7yLNAGppYpiO*-QhKk#7Y3P+9yl{mc+g*%c z`I8RqS~0~f=<6JcIku-<Oq0N@q%B%bd?}rD@5+PriMhhME@<}qsQc+0;lj3f0{Mpx z@yWKEQ0^6v`$D{U_69e2_C1b6jB91T)C7^C)dN``!@biU%E=N-Bd2Duc0&lq9)ByX z>1T!ARO6WZA|O$4C2t+I=In3pY57Z$b*SDg_cJwwg6&u2=+gUiTO~x?Y2U+^5iZ!~ za22hXSu8vK&Ed91x8b5x=056H558~dgC}hb`TgoElKK&4YFw0z$IGkB=T3YfUG(UQ zGmor<%~1o<q1GEc6$~ML#uT2PcokwkwUM4Rt)mfY?&$N^6~~HBLF2-K+}~vu`nNv; z+2VWU>TnXw%lh$%S25WBL|YtH9wlcM_r$Q&V(L5O0e!UjM!srx_#j*#9laDG{c$+1 z*K3jg*5t#@;KlI#jx9&t%9rB`1#3su3jf@=0HvuB{4>Izy^Cu=$@&<a^boGQ06T15 z>w+eq74Xw)KaAEi<&3^#_*}##nBiaw)Ay#Kz27H#wm5<w;Bbom^$~QxZGuZ(CgI*U zZnz^sg&TZ3W4_&S@?9!;!FN1JBSTGg&d!xm<`2Z_FLueli!%7i6<1E(7tNP@DB_Kc zo#FFid)TrulrGq*QNvGt+|<qse+@{+)57~?5PlsB0|()Yh*+M#e;=Gzs{qe5bL=)q z4SP7d;-4SyC~3k5_;k?#%O4z;W{#M_^)+u`+CU$cuWyF;&0pX_el2WI4uK9!F4Ey^ z&!ubaM&QRQzoh4>o$%dmTW%JPiKgZQRIbpGyU!8+>oi*))l>!f(-Qdg+!@rY?7^d7 z8cWKG8|hcvciDKHu6uC@6PnsT9`i@-B#kZZY*u1|yT9t<I`c(jCAuLi^E2g+50*e; zsSmW)q@iD8B<@STMakxB@NCv7emJ#1Ef~~D)mI1N!L=7DZ|+4f8D@d=k2>H_hd|yK zFbWhy(%I`@G+wm(Tduh7qx@l$;E3hSqkW;H*wUj7HoSU8FKg_uY^6CDG<4yZd7Y?b zlP14dTL`Ih55d!}22}fH1r?ofg}?86;J9sO)LLnTUITwolW>0AxEaaLnIa?iyq;G4 z?MGie^+&g>he+%2U&u9EK<zR<Qn1cTnlM2T&X=8q(UYgh*}qeyJ>KSs>yn|O5HYih z28_vdluJeqm+m`c(TSsp9Cd9se4kZ8ON{}B+T=lW&uOT7Y&%51SVu3mcZ3DA?P$^S zAnNcU8EU+Uo=gv5J%4vjDVu?+A1v_Tl0du~WKCD*X0yRMHOw2A!I8a2@$YFG?Cdle zSM(eX_Zz0*RMEGKziy972jVbN%p4b)r%<~Mf!xy}lWUYs@p|VdKDoONf}bj(R>*tu zbx7b%*^TnhRr~1biiOf@*L>;aXiZGBETZmj&p`OS2>vl!1}EJKI51G`g$8utMzJf~ zUH6ErDs<TIv^{=5-HCo*O2(7h+X+UR$cVPo!=$}~x!<}VoK&!xmUYU)$v?ZYbfB5) zx~pSCWjkCDd!bDCiWU1;bfY$}6w$F#7Zr=sSySW#%+_|qYl83N8tni#U**t_DFevi z$O{?+QM~)H1_l`22A8QuxLfrM#dmlFM=A&K`O;FsSV}=RV{>k+DEj>9$~)36u>1Xb zsaEt)BLycbO!PXdr%&Zs75?b0Vu1ad@6+~cJKT*g2ho==mOS@kclz~23!Sd+gVU~F zEDcQOgw2ac_wZzryT`G?J#9337Riq8soYj61QxxS#8E?*L8pch{C1>7uT*>B@yWjI zlqYufUe-9ezb9&DFQovRm1GR@s9!f-s(vy6zol5=QD4EY*z1p-^1AV($~mB?+J>~E z*2oWz8^PXyKpwMI6_@;O&ziD7mP}s^SKD^Sh5Ib{-<??W40=ReHHboVhhx)=U(&P8 z-mI{6JTL2c9<B;z;ogawm>@Fh!|r78{7H>)=FB!|^VoxOn*%wpR)b2`+w!$4e|#U4 z2X9R4Nn=_H<e7-~pva9?1+<Z`T|XsnxDm`JEpEex)>G88*oMsXJ$cd+Uo55oblL3$ z$4bI!aZVGJ%5Jc|R}${c+eSZ<`mkO1PB{KfJob$0%6g9TAn=_H&l!4~p6{PbLo$lV zX5kkqn&FMcz1Bj3#b`XftsDNgcPSn8100{3j6-`bB~7p2<y%L(3ukyJyldJ{J)^GE zZo@rPnmU(qcRiq^o8xKXlrk6+)f<x*8{ln4MOGWS2-FI$(xq#{qcW^V`OTzgHr)CS z%*+zlFvX6)7OTLNSF!S~(;HwyUvpF#@Dj|{$8cPi({jXt!*tW%isjNW$}D>+zn&Du z;iCU*JI+<Uc_s|6Pp}qQ=TZ1^>2Q8JB%3bgIB;8w2$An{l$Wg(IVYVw0@3wY^{W@( zir+}{PUP^HyV^J@Ns}kk-xK_uXM|D&N;jwB<f8v1U-emNt#Mu2Qm~5Zx9);#rFpVJ zLoFqJ=*?G`nn=zs(;(;GG@jM|FP!|RkJA(~+_yekNf*rpH)q&+!dgR~xA_nFYK-P# zSJ#$1>{}zveKC`(Jfk`Kt6(L+iR7JKdZY8k$vk0U8czt@Bbnq+<EdA!!S8=%q9gc` ziVw#N{)j#%Se~Vg?z^E*OL!=*h2pFh1$O$cKc8wpT(C-tY4&dmKEI-gt`@I=#F9XE zJlBCbr+Q=8!wsO2G+i3yqshVJI&r?uetEq8Fs!;8#`pJ*M&*qmc-l}&TE1Zbo5UGN zp6&^tuN8!c`?bZ@RuY~Y=pi3zHw~()r@Gyq>C1829XV4TCGm?n?qfwy|8#b5Sk<yh zHqOZ5Aw9Bjpx<<Ng&zMXT0Icw)o%x<@JkS}vYEQeec2;Eg(vR%2JdTn;gzFiJa+VH z`P-g6$!Yx{F0SrHxt6y;#d<ux>KMUu`^0e8NH_Y|KM^;7QpF>tepqi}5560!<gj^W z!ablRxojMSFSpLbah8RY)GeBGhW5Y(C!3-0L?q_Ks!LbRy>YVZPAC><k1JL2{Oo=l ze*b%noGVOm&%#rrZ8sHL9S>1(Nl)QU&|(9NbF@}XOJr-$(Ad8<vX-g=PSR|WACDU; zx^d@VUV?x5oGY@tY1|!{_%{$=Jay-XY03OkvnwvSIu)(B6YHHEK#KdGgUzH49Q$!N zn&-u0yy_G%+AjW{<HqcALU@9fA0elrXjb{Ai#yc6!d9&(;F)TRF0cN<t_kl1PiZV~ z5m}cmS%Kxg%UkGi#U0YP9frMZWGMYNi_7;2=0ovySUKkbof00L`+K*Oso>{bvRF;0 zmaFpMt$-S%L{4AJ4bP5JCRw(__Pv{E$dB)2*<~>#nstN=Vm5lnCl%u!_)tQD4JKU` z%z`U3(5rF+EEv8{o_q|j{$w(Ab^l7bZT(@$;Wm8stRXt}%ivbk9R5-2Kz`<t?h(mt z&~CS#@H}1sXWwg3=org^rWv^M_Y=6SJ)hR=6J-RYu($PRi2phsTc%xvyH2m=ucOvb zp~e*Ycy2HT-ZJBfQ5ihcuRkvgQ^guJSDJj%1U-`8z~v>mq+=%7Be{sBfp_V7hAMw6 z(L?pvFbwhU#ij|n=-Jit;8j)w=eDn*JL&VGD)O`B+;%P~ydH-CHI>1bF5dieZV&z( zvdw*9Qasu$4dboP^hAd(6sP=nP8suj*s?JJ{e_!cZP!#Zbm-1!<|EoUous230|_7L zVxGPycJh7#jw@ri>G}vf9_R&8?>15WpeK}{as}F2FQ*2>LaNlbO!-aYc(kMhwHJlg zpu!imJ}Sb)*<D%J(uFq8?8_EKu~gBfK_0c^4lGMl#}C)kaEWn8{IhEu|9fQy`o%W* zd*@fWGH*1l9%O<O{!GT6d8a8y{0)spH(^N0Br3_6gzM+}k=^-8IIcArJ$f#JzobrH zh6m;2!w2BJ7uKlMC6i4K*TCx~X}oB;g>3xbBHb+iCWXa%mycKYLbF~j0_)CQaln=W z==x0)Hy&t#QzdihzXM%SYS$k%Z=R!^8YAo+o+z!?kH9%TY0@8a9}4K`jq;9>tdr7- z_qo;plwF{A{}TAv_DoFrYX<^a$e(ZRk>AFx0evq&ns;C<8T?k|@__XeQST2eEt%Zl zmo8k;2kFK653rG<P?R)a*(C?Qa%TqW-2k3Hy)6v6;m@I;yg6smADTMJ7K<7SAtOzV zpN^S=KV@_7(sT`uRb=wx4u9cs?;hMyC6!ADsc~LY4CyNhW~EXTeotx3maR2ZdG`+K zp8Ai1uUc~WfewN(=ZH`K{ebr~GBAJs5lO+$4OLrv;|?1YtQjyE++P`R<JQ6GCF`;A z%1!ibYYJ}`cbd`H2E&{7Zg_Q(5&rw|02aSwI5)|k)h`U8gEM3CmC1tgy{^V+dEhK9 zJmJdk2d)KL^}PJ|@~hw<H<nX_bn%K|Fm^E82qXT}!OOSilJcghWV*+biZ3mrJuRXu z8rF*kznac!@22y@mO9A2FqLl<{Qw6&3qE_Qom=*mYc#Eo*iA37<Iv4hFecy*O-Y{% z)g>!ozUV~{?!A}RZd@pj%+w%_<9&Esj{&^7-(}eOpdTL4?9VImyOPt9X>f3iuRN(d z9$oc&a8cz{22ch#)P`MBoq7Ivb+n3hribtLkjgnDX-&QfFS=So!%jzWeVJgN_80`p zL)wWM%{lP1Er18{*%(zdn&&-eq<bzR%Omb*cWRe{+TuDY*Xhc(`sd||y@zp$W<8lN zj6#3hOTnwB^Q^s0<D(U@>4qf^_3DrF!>y@<<^UXac^eD~-zH&sBOFNSix~%M;d<Xu zoc3-azD$wjSGOnQ(AJ;uL1cfttrWOM+5@+>5;1vKSIqg5Lrbr=N>l3NG2QqJoG><$ zN1ltvl}53Yqc)h&M9rh<b1hIW^4$iP_3+T9ofPCThC1(4<f#?95bL><3cl1qn0+fX z^^>p*PR8lYDd;}CS=y5t#%uJ_aK**_;J9utY&Ov%-TikZ_7;2C#VYhNa=p9N%SUj+ z$w^9!KT1C0`)0d&J?y`}Rj??Y%D2KY`Cgaj(uvU>_|hw)d7AFL_Hhm_->-uUa(%e< zr6vCA*o({}hf%dt9kkJO#i$@7)I4K~=P&6%Z*X8uXYuS>{g4z#et~u4MsxQ|FQB?Z z0UXUz<tfjiSa<ylE);Y9jI3ky+9n*v9UO;aI~)gV<xo~vOM;l(6SVD}8?JE`^F$*% zR7@5=fu&QaXM2C%a=C`A7pg)*mNoudW{VADM{`l{bexiUA8s`N2S2BuffXGl<J(JO z&UEselx*^kZs<AlzIpcey2OHKhLh-7ETUac-Ox8^7VI^);v?@;xgfhQHYja|Kh6#C zue_3WwjGaV?Vr(})_ln~NdqzCEj6YWmbJ@nk5lfildhkN!)QGpoFS#~%da0PwoN>{ zWY5O;BJ=W4u;h#Gb;Rch^W=~jvrw_yeH!DDNLxB(fkvkVq?Dz@4V%02oJ%t>WA{d> z)qfH`*b%{=D?`yQ%n>WEE8v^1`si+QPtvWfhl?ueoOwGP46Gcv%cN5I_)!V(X;nap zb*vPA(G?9A0amDX#d?=cv^Re;_6rNa_T!Z}Zs!E<bVHN6bgH8z<<l{Iu_MpE5`m_E z)6nU%FAfMInl?xDX+~&s&fS^((7O#bw+zBnCp7W#C_VR3k!udUo5{<w0t7#?nkJZ@ zk(_S-fSV(-Inz&va|U+DPik#>{?iofT&d6FYn@=@7G-P{%&yNJ{aDGT9_D>F<IXQj zD8W%5b6Rq+?1-BC!0A@--G2sz47&sQ716Y9y)G}g<AU}*bouk)aiXW-O3G^1XcKmx zVh68-tL@)Hc-K6b^G=gO!e(K&9riRg4$<|G65UxR*bauN*sH2VPFBAvG7Sc(-*pj{ z74^r)tgrI0vr8oVhZjg|T}M3AvIcP6OX+CY6h5=qiEG}c^MQRK*gtC(Y_$^mmLG3q zlPXn~UBmd5e<_%rpD$0kC&A%aX*fC4o(yi)$yH&ypq*ANje7YCeyCXT(_@p_%+r_K zgmrX3-(Q>Orq_Uj@Hu!+sfCixzPMJ*dFvFj_?ed>j=vU&3SAs=*W^y+l{2a#sLeqN z=`jK8rVhp7Hv367HpD1J!F$-O#!YL7l0w=f`fHTJ#-mZ>-aE4C1ywPB6u;?4YjUb^ z<u8-Nu*-B8bPDdnA2WB7Q^);q*z`4gdOQ?{oaoBia^BFRgfuvkWX$W$yR$Gl;JP6} zXc;N7yUHb~3+m2MkpZ+av4j*3IPk%{6|g+wm(1x&T$|y8^EaAFj|+D|gPSG??2N%E z%S3)=8HLfgCRl5v#ebaop=ZAwQf~j2vICA&l}j*w=#$0?zu!>1rN;cT^H6YAu%t~J zgLt{7BN=OT!dAfnd*66K?)^@1<c>+axB58gm1R-&Y-`*+X@=|(8_sU)#-eSKF7>kA z<L=Xcjg+3<iL*5|%lFoniOi-U1^-z?{Z{DVEsyh5u{9MRCmP|6Va|NJl7tgb4M!cR zhAjzRSp9kyOAaHrWbiC5RVj1}SZ$A)<BV}i?nk(+v`0QF?z_vH7D2}SiJW&=pY})l zV&K9pFu@QgHKjkt4fN&-g?ZAM$H0&824njLityvN0k-qIK{FN=QU3Zc9uQa$LG_nm z+U!@*KBo=&g^A9_nNs<s#}TQ<T<jj-sJc73Hc)|Y7q0p9z1((hE|^}~PwGQe@nN1a zwW|}(tp$#>y<<4qE<FjYJE~~d3o}0CBD(%>B)IeFCH?YHl;^CNikB=e$VN}vQs`n& z^m!}%k!y|Vq}A=S-X^NJVo*5kpE!gD_*!9A>Q`_ox=Zbbb!8<3@l08FSMGhs5v|{) z;-I@{V3EE#S$VD@5FL~_sg!g*%%nNvcY)8HTV#B6Hs`l#hc5<=pe1XZ*ngK9?W?#5 z9byks_QY~BE_8th;{335$5~lb8jW(NO#F0d6-}Hs8;b_ag6?_V^l?akTzPK{|2Oi0 z6tu@02mLgqKFa4r?rS7IQk~7ys&lx*m>u#;O9#P(=_UWn7rwmg{Sfm{0W4l=V`9^A z9-R9gaPJ5CZzmI~c$h(T&oub()<D)a+AJq7%;cxPF2dK2CV0zXG@n{Dll?7&W$m!A z@<#)wqPlgWU~+ciMGu-qZYGA;T$w3ynZE2&6@-1fy7LFsjvQWDBX=7f$>rs4sIpiQ zTtpZ5ZSPk1(%AQ8;yM>pSG!8yR{NkbLJ|GuIP)h<b1bVEPHi7pV(}u8KifQ;6O*2i z92A6izFP60-_F>)M~7<5g2C>(16oaY;hj5|z!7a1$UCdSy9>v|T%R1aO4h^Xoel2& z-_C%TxhC?8q*Jtb^J@9@p`LhAX{BVl;36f5h>qsbZ&JQl0Cy<u&2652gEf!EtVhWP z4+bwNPdlcD2euEuosY-CnfMS4+kXl=UY|-%b^|%6nx%@Ld2skv3svmCLqpn3rIJCK zY>;+^;@joJ3;STsm_3q@`Htb&-v@wm?^5aX3>VyysK}@EoT$h(AD$??(~@gy^iAiH z)c^epI&$6+uPhYzuz%fgOqa28yDX7Q=o5(#H@qZ`C4(?^js;Hqro`im2I0AM8_w(= z%l-FW0^O!m+|<_tqb=7-n^#W4ut%FIC*v=@vo)3ugs+3CzU!&5-DvFS)t@K7$bz1? zF2I}2SR`jVG?Xkr`&?hvv!8<Fw+~~V)Oz7nor1f6>GA^2v%+`T4kI7iV1CL@!I+Ik zsI-#;&KmLtI~R11G)CjtR3`J!vMsyvCSx=C-w_l3X!ioH2d;-jk)c?mH<1(0uYi^R z?SNMH;tyoY4{z5%7wwx=Fib_BHhvPnT`5YdpQEUBmIZHDP?Xg-=F(#I3>;z8ldjbl zNP2mT;LG!CG!@jq=J;J&v8)?74*v!<tpjjq_DMNSd5)wo<(0IxH;Y}a8@1#+bE&J) zD>WzzKi53@R`y8TxgZq>3`iDPoAEqf?BY89>Bk4_ZLu(T3YOfF@M4Q8Z{G5yY{#QN zbX>uRLx&B;%xOv3t2T)%64SXtdjfY0)j)-p2{=Nt5B^80^l78L{Ii!9!j@$AuX-r^ zM&{GDL{t2jEat}do|gYyZiW}n+G0y<S2SF|Qcij~kgEa_m)#aQqxgQDwD66rc0L}j z1g<LAS~iIy4I_Dzo$yBG4RRlM^KW_C^hiD~1z_}4;X8Rg87*=;^OAYJxN3<9o*wj2 z%+t0?y*_l|ONDWmZ_poKj1S}wC-1|Kgs1S3)S&B#=Tb&^64jm%cQ=dKteBdHvu0#K zALsW}uvBoHx|m|SgFr6U^M#xKgtTQ*B&u~a!<tuvMgM3Xycm2?&Uu}~UcU-uxra%4 zQu-w9tKrRGW>mVX{8hlAhlX=XwILcWTMlu@4e;HH`84J6A~2s|NG94&m^^9*YtJ5! z1Hg@|RBSnHv?eEqZ=j7M!dTs*UfSY02B$Wb(AO)DlvkMP{!TEvLbI|k_n{{GUintm zY1szJ<?049GCcz=O*83=nF&<=6!*u#wpcMnk3&R$cWV7WZ2RFpwRPVDZGZLQr9IVn zigBs@{K654a<BoN-(q&9&?;-^u7o?DCLCB=AnPo0LyDUwI;K8Q;9$x@VmBJNZnM1B z)EcL+yhB=FJK??<J)WJAEAQQ>2iIrp2R;AM?0!BQeLrueHv`6qcd;IY{rV#hIj@If z`%M&V$$9Y7ua**rN8-~`7j!tK$m4H@;zadiu1TIly&_M*jRn!*{jD2%zFaRjG<Kjo z-GHN{zOZRx7r`(6L}>@^(|D&M&|HP6|3(82HP^{U+X#mWInzbaH($LsnC=Rvb-~$D z`1a#WcGU{z#$)1lo$W8ZpBc@o)AmaC&wdIH*DUy_F22{r$7RK-?RfE?Ir6DF)?^>B z7=qRw0Fx`bDKm8leuZw_GC_&60#qc^<~le%ZvouCnM5(VD)e~rP50_0R#36SiAS9G zqnSJ12Y8f^<+^=cU{wzZ2i;4cEte|@>rQ}SVKogYIt8Ds#^7~_p%C`Yk3Dqtap~7y zyvD<db8kCH`bB~{TI55gH-)gqmw|%2eU>h2`tpbOf)myEFnr4oWEJ7RJGy2hD7D)F zo8=Gk=r8fuoD+jf+6UsGv9me8rlCBl^HN%8wGfWJ_2*OTpGdL#>U>e958_pGEPZl_ ze2i6bnsoqn-BAIfA}+}PCcCqm*=E=^awyx+nZa)qmGED?9)j;Q9G?#EPIe)+U^S@( zR8v&&%4Y|WzgPukuM2454;$7?jKi5jFGJ5?ZP?d82^Ej&<BPN1u%(@{TpFr|yZ_DP zLH7ADbJ$O~;prnq^iap(bYl#x{UqNQp2EHAZo<RF1a6}yL6=owI7aLh4-5~)?=V_) zLoD&-Dtr7nc)oN}@ieSjqKDzy%b+2DD$hT*iC#WSVvqC&I#bvUcOLcNq=rN(yL=f; zYwpcE)eck9e`V!&-+GJeU0eJT5(r9VV*WI~JN9k9EEUGC0S;-Pi@$97*qunMt99qH z_MT88&Z^Z7$3VWH4d1%k$>)o^;KMl~xbj3Vyz8F@y5@(ZtMzdlnZALJELu$Fy<^b) z@*?5M?j+bfn_y?#$FjRpH0F+MFD-Iu%L{H5QwNV;6j<4xX9hTN$+vzua77Mp|6NS> zqiwM1?mVa-=Z|X+=g4Lw$6@%WKvY%w4z<GRbw0B*MwqUpH4nArjT1(YO4J0dko>Xn z&}_0=XC!)T9%Pt$hK^?_@w-*l{ImOJ`fuG@__;+FSNl3)lyPNQPv^;;KcrC}QtyOG zI#YOFkZ{>u_Q#Xnli2<JGq<-Y^W?XEenWu620Hd(6WA*|V)(UMu(;YD$405MiQt3f z##&Ir;B%B~6N!y;2lBh^mgqIj5>(VnNNYzw-uPL)-27XWtedh6(hFzt_XWMl^vYi; zxiE#M8Q-L*og?9maUX6Pzl|jC2=03C3G6z&8#cWXo`qp)JbhCX4-~Gvr$2AYA76FC z%_*<wkwq?4ttgiUxvHW{>s!))t%crSoX~E9K1YUar10fq==;qb#4c0dMnW(5C(i3h z$?rFLY3(k@v<6CDo-94zC;C?t8lmfzkz8f|Rc<OB>0WALz;65RQN+<X&~|Kp_z+VI z`~Tdg>WJqw|Km9JKGqF=3w3z(KRsSo`k=h|s~H;^`>;xJ79IT72VJ&Iq<|`Qw(Z>y zJK7xw$?GO8?HkPf9$$om^AFO2&|Ts=R0+lTS(uaJ$VuPR1ou%F-v(TO1Jxm@AL+zt zeT;ZR$6$GPmf%#r3}q*q|7cy~1DO6W66c8>{)V=$Y}(ZrBYSFc^sfgH)gs(0q4#LP zmpO3cd`EftLoIeqv}MB=rs#O6J!PkWIDft-i*xb9jj@G-8m7tXCW*6?ViM*X4}*h6 zx~MeDotFmc@V6}wC~AEq93C%rZE1=aaikLBKD?zA9~+#n*_+hPbiw6|2VwyV_TzLj zI&s?!ciQcdw_6WpMVndl`{+dMf2j^WPtJorH7~)>c@11|IY_U1FO;fG4#*j{3H<B9 zDXC;^7w(!phfb~z<xk^!mNx}SFsIO%%_h!;f^$xw?YR=x4wNZhYX&wgaOW|9dQf`5 zVtV`Vu<WU4$p5zK;r7S2pq1TCZbU8ieiBRnz6!?V;Z3m4wKv;70#Ms%%UbF6RC`5_ z(;wWTG2b$9-P*6zqac&@x|vab)1^>1)|<aQ|4z}Gx4<_$o0p_d<uA?!(z>)ZFh{8a zQHc-6^n6YO<j3GY+L4;2EVR}uf`_xC<<Tz(@ra}5taxxZR~%R1C3oF8*JCoZtrFdy zk+D#4tAWf{)=R2?`tsnXTB5Hti*IeV<;iOgyT4I7CHI}If(6Gq!t#u}(lz7JvPP&P zx4Eq?$5Vgo|7|FKQ|^W1OhYA$Zj<p^s}(f%aH8M-P12|^J3gk@2LH<n;?uE~=-2nA zVCFrS^wp~+`<FHHAoos!CH#w4sXm0H-Tir^vnLIG>WML%hVz#_LwMZ%?zlr5Pgf>J zOC>rNA@sf_%bi-mTpG%EME<C`XaV(@?S<3di08nMHhAYkXBruKZ@_kYUoJk?3WbqF zd1=vbDYKwI$M*{71(j3q#@|_RB>z49m|+hKb0T1ec@N%xS`|&+SHSu+XWYF7hbXdb z6AY;t4vWo9Fl;#B)l?Ojr8a}-&-KDTnqTPF)~n?Aq>=LP<Z#E$A0+Q+f6UsM$RXw1 zXy-B$F7-pcye1wuF0|%<U###lwv#pgM8hfPTC$orkUI?W;e+q=Y1#J|aDL7+dL7>v zj*WL0xiJ}v)kffjZlifWZln{r$KmiRFFs`A&EDe^aJ_REeq5`JOY}{6wfRK2aQ~b% z<xL1jm3pCdmw{-lID<zYHN)PcXK>{YWt?-%29$o^m25BUr?@O{R9!ljPm0f~*x#8w zEVFq0#ylX=J6&Si53f2Lh2M1p_;mdjXqy&}15cY_(XCBD=7lhP$vCPp+6jJYkzAu! z40{|4VfLXMF8Z&~?c^92*qYFbcWmp+8#L3gX!=1IooYyCh979usbuWh;Wy<NxWT*d z-aOOB7hM+w;nB)QfV8$8(ESd4iu9F~&A-yUNNsE@U4r+|%E}(xT}A%84}s#&a=~aA z!<+X0faPAMtZ=IzD{s6HX$@Ck-QP$ItVn_rT||%UgqRgmJ#Fvf#`lc9af9<5nCX}d zFLS%0lCC#*zu!nFn*%VEkpl8p(oBC79Jb;)82NkPrHoDFvTq`XfBFDgaqYSG^Fv^R zTeQeOMGDm&hc`=gc;y-utd{@M<0Gpm!N?w;EE>X5Tiy7S)gAa6)B?`8V(@w6eE0S? zpXit09rr(aTWJ2o5q!tfA3h{D!=e|lVAQFF><%9X+kR7okJyFxEB*m9krR`Liw^4A z8hVx&A_b()z@sy#ab<W4tO)X?j$qB-1Y|KI(ic`Z_2r0^a>}gtmS2c-qUjFdO`Z&R z^YTf$y37wPF9OFnORTRZJQ??Ep+oR;a{KN~_tY}M>`e!rB=+nlmrcQ2Wf3^neT>LB zdkEiDdv4d~IwYtEV1sIr)a$boTh6%%NoIoSzAK_ExkDjL?UustW-o^&Gb6!vsEfO~ z;2~@SAJ$m!!<*)eD31v_OO-=y;Ff+kU36FsdvDEPDD@@lkPHm|ppDmd?I$1Sw(_c* zEc*#oz_XkRnj`+--#d9@1^%LZJ2M_LT?6m9YV*UYQuxt3lG|^e%wx1V!KH<XIOCQw zTPIdY!(5Zm(YhBO*%HBRDl@T1*ce{6u>>B8ys6`pSv>cr83qp+gSzow;X>|MEY(ZL z%iXLfBXKacw%<e^+70ky&JaB2IEt3}AA#k*lcZro6nMk@9yrBNnH?82(X8Mh`1D*K zJav2;``Bx+&E*4>+$josc2uXP0>Q3)9?g#%4$^;rx}t`y3%=gqDxBi3RN^rUQZ04l z=pkYrRIKja`R)=}t5ZlPEmR=w;UPMn7Rl?1o1~-<Wl|5Xp?vqcvl~@(0`+qV?7q;2 zOCv4OaBdR23Rm8fFJ0L0;0;<J_K#LgW%7-|{V-Hth5PJIfc>hA=>3+>(!zPJ^gFZ# zQh#a^_DV*Do1-{Vw+RN$_U7?N`wMqaDTH{m;eIaHsQOc1X=Z7&WYJTB_oR;G9N`;T zbF(*3yqCcZ7cNlg<O2|RWD_;Dw8QF{MX)EJ4Q{LJiAFyC_{{nKocih>+5dS+`|EqN z`<WOX?HrF^@~k=k*<ESp#4+f%V3iaG!ZrEYmd%b3JzHyw7I)jRLGV8MP*z!PbW9m% z{q^O0*WCD^vj(+ZD1pH$1>g{J0!%Lj)9&bGC|%qUooe*aXNx`j(6|BBM@G?nr-$@o zL>6}YB$ZhT*Vp^8?(}5q1z1pcLhcn|%Td+M6l<+YzhEYord%ucc;&+$-bGSoiVZ$k z(wVg%E|Sl*uaw7sSVtq9u9sI<xpHh=0{;9^Oam1A@}dqK;Z85Xqw;p-<eORe(!Cod zY**sMd$-AIdTIHawG+`zf4i)?DTtMxT8Lh>n-o7_D9_vf22wJ{!_wc^rR?0#)b87O z+}wMhn9=2md*c_nyIJ(1rvbh;-2$$gUeo!P+3t_`U!`UXbqu%C!E&V!(&Dj-oY}#R z<p1_l>K?%bM-?<Sh@>;&N2!OJDzA=vDZLnZ0h}J(hPxXo<sUKiFn7*WtkC~JJ-bDg zb!kb%Z)vL`XF@l5$h@v>6erlq+Uw!Lu>)jvMD#S<Ory+BXDHxhHp>><Na@iCIM8u~ zoYVg%P3{%K8}_^*w_~^I$81&d?_(r5j2UwHg&H#3GYx5r9~%}A;{IQUacvJ9tUWi8 zyB)WJa>3$s_jG}#J(<`yv=`4)a}v+D38;223zaACkzXl&Ce=7E{>%SKo5YzX=Y=5$ zzn{u2;l<FmewE0PA<lX~1|t{OQ_!Z*;Im}AH0WJhYFVR+FA}>$>!C`RcY6@T3V-qO z9y{UsiG!pydI-N495c1<1F`FmNIX+CiKlN?geRU_(0-dT|MSu3C*pl$D!AmQo+*H4 zZ%>YySx-?jE{hE81N!|e0~#JXV_oNUu(i({fX2@Ft-7Ap9~jAUrC=|;b>L9Fv!v^P zhB|KymmimP<QuQF<e135{6^#qTuMX1=T#;)D%G8-+hfg>#l9wHtRnQSwE$0l!F?W} z3V-kR5UiIpwC$dZb$gf4&sQm!Hg+Y|WsU*+s@YK9uNN;~n83RK&F1~##*}R3g~yhU z;BAGTSk-qXsg=0n&gmh%v0yu`v^+!|zxd+JZ#yaMXdf<VNo3!(W}L27KtTnHpb;Fw zN@cpjW!fTdlrPdpx1)4G?>wBoIu#y>_r?Iv8oAoh3ww8Vz|(o5Xg6^@7$=!x*?%ce z+1~+Xt$9LzI8e&GdVoGZnT#313#Akr3psz@9Z20LW<5J5v0c{*{I6*e>z+Lz*ZOqB zU3xuu*|6V~>+@Y&D!S-lSCaX6{aOg#txa_&)cEdn3ABvAO1G1>+)vHCML~lfN`dvN zXr9<h9<g@-KiLt<uT96}kIMy=wJ(hI+=Nps+m*}Jld$6EI%)K9KhFO1M!few$Q3rv zsX#dC=brcBvd$8EAFyVliMp7x>oMK#7KNJYPlJx|K&>9;OKNW%(5<E?hw7i8hqasN zmB<SnT}X7`ng!p>7{HzueW088Y&WkQ2k(p5$`1PX=;xATJ|##7g$mkevpEJ&KHESR zcOHSi)f(E{uOm<MbLV=&$e0(<9%rTAhVj}Fc(i^dju349@Aq!Qz5owgEWla?+X6VT z&mQS|nqW~xy71ZJG2E*(9drNl#u$qM@|>7j`YSTHCua1Ahi2()<x?jz`4M>e%yQYr zU^eS~{!S%l+}Z0-Z(iQ3AN!n$;H9aC90E%qt|S8&w?3q6Ys@%C&x7I5IXI!e5!z4m zL4`f7bR^V)^jh!Gi=(ILe+`}gH`R9>$74yHa_?kTsiw%+qi$|0+|TP(cJn19c4LUG ztJb&*jU4)RjyYN;9Tkp}yXo7qX_FEs_w}l@Elqb7`XXmb8<iw|L3h^L`#*S}*KhCF z^YMB<!cu+$yREH|;CdSp=FG%p5h)y3+e_AO9~qLiE~e(jQVK~F?<-r5nupz#b<r0x zMghy~Q=!b1DOC&w^ZPAzq?ob=H@q@Q{kvmu;7p(4m|45j>B_3W4mo!#Lop-Xi{s`d zv$sPN{kUlf|5F+UGYuXh3v8#=#X+2z)+u_|YNgRXcVNhP1xC6RLXOOlFUA9z^~a!M zRTh6In~wEQ7T}2y3*6M&50|@#NhP`{^x@{bf4+>?7stb87Cf@`2dwRP!k=yQWNceP zK~C+`_rw2E=kNxsPKw1~oh{}nTi}v?HMD8H(NuqmEcDTEB0#}AYlJH}U4hE0URW8t zkk_<M2hTnuxxKhT8J#YuHVu;eXcX?=16*5{i}J)q@EFU+s$F#k^=vig?;DSXiTgnP zX`q<ra`ZmviKV-f;6UsgSn*^kezwjH({`EQ-rWFxC|Jv-!8Z)CXP0uXf4t}yYNnc( zz+cZn#x@VOx-0&f5&hB(<zb3C=gW%%5_v$KjmOPb)85|!Q7i7jT^wocVx1w#kY<Rr zyavBU$tgi=1c$OywB0l&+2<tU^Nt6!!#oljD@p8kqfq%Kfis&#Z+Yt&3@*7sgYvzS zX>dFr5sEgmna7}f$6-i*o5ADUZ^PsN*6@OH&K%-Cn`fmyg2Pp(V18c^Ry^whGfe=E zT}p-({!@5QgatOJEx_I0i#>wA1da21tkCNqZ+$-KYdqP>N5b=sQ@LT09kP$$eQw#{ z!Y<%#Cr{J*yM7d^@`04Vdn8+wNcZdPptyb>@5OZK&MzOc+jUFUtxSi2Asg;X5E;(P zFg!1I7U{wjQlznGrQ3aaS{p-$x@zFGPdll62f%;Q--Z&SBhD!El%~C8*iwI$EVj9G zL9`!QhFfro;ALDqgxh#l2u@t;j1z^kY2c$56o35<Sr7NX5ZIvZ!Ue$uG|)kfj^<{( z!%|BC-8q<xlT}+#lNL_JS9apLY9+e5M&syk5|*cDqGT7y&);3g<~<o)qLx8ueg}M$ zxR#1yjFLm?1pZ=W0_R-NHXr+>5LVqBk?Ptr@rR=uS*A#1-`|8!r96;>)v27^EqMPb zKc4S(9^$M;&)(zX;F{~lc_jr>c~&-fHbr7@Zv)(@I6}kfF=$C{Qyse2Y&f8L4AxJO zdyY8s#=>*Zl(~#6AIN#IN)Oj69B|JzD~_{`MJaSYY|^fvGhU8pER3d{5xL0n57C+e zE$aT7fLX6n#hvYkP-GRyE>^*OTy|eFzDcFci)GmLx|({NQc-?%Bg)kFSU4?`ZMBoJ zqr8eD<W6jgErK(ZdEy4Yg6h8g9EaNPLH4B8l=0OhzNL<!kw3QMQ`J1!9M(<VB|+@; oEE+Z8F=%eI#MjT>QnLN1>eWeGI-94bDJ{ETdYYr*W`2+V|I>6jw*UYD diff --git a/mindspore/lite/micro/example/mobilenetv2_quant/Reshape-64.out b/mindspore/lite/micro/example/mobilenetv2_quant/Reshape-64.out index cc05ebffa8..49a744194c 100644 --- a/mindspore/lite/micro/example/mobilenetv2_quant/Reshape-64.out +++ b/mindspore/lite/micro/example/mobilenetv2_quant/Reshape-64.out @@ -1,55 +1,55 @@ Node:Reshape-64 -input Tensor:mobilenetv2_quant_B+1280 +input Tensor: (int8_t *)(mobilenetv2_quant_B+1280) input 1, 1, 1, 1001, input type:DT_INT8, format:NHWC, elementSize: 1001 input Data: - -77, -76, -60, -50, -50, -40, -53, -41, -60, -67, -77, -72, -65, -73, -62, -67, -72, -57, -71, -55, -74, -69, -79, -72, -63, -75, -55, -65, -62, -69, -68, -59, -59, -79, -64, -48, -63, -55, -71, -67, -56, -64, -64, -56, -71, - -59, -72, -36, -81, -63, -73, -61, -84, -64, -68, -75, -61, -77, -71, -64, -61, -46, -82, -73, -83, -85, -62, -75, -68, -54, -61, -56, -45, -60, -50, -68, -53, -72, -52, -67, -49, -73, -57, -71, -79, -51, -61, -72, -81, -60, - -60, -68, -79, -56, -60, -48, -79, -69, -59, -71, -54, -58, -67, -75, -76, -82, -75, -75, -41, -45, -49, -60, -37, -53, -36, -49, -65, -76, -58, -73, -66, -58, -75, -76, -77, -78, -60, -74, -66, -76, -60, -72, -62, -58, -64, - -65, -66, -54, -60, -74, -69, -75, -58, -71, -60, -77, -65, -72, -78, -67, -69, -77, -77, -87, -68, -75, -63, -76, -73, -69, -61, -93, -70, -64, -76, -61, -73, -67, -76, -77, -75, -73, -66, -59, -71, -76, -71, -83, -75, -55, - -67, -69, -75, -69, -66, -58, -63, -74, -72, -77, -72, -59, -69, -74, -65, -76, -61, -64, -80, -83, -58, -64, -61, -65, -49, -65, -77, -72, -66, -59, -65, -70, -69, -69, -72, -91, -77, -80, -76, -72, -53, -82, -76, -80, -76, - -63, -78, -92, -79, -82, -53, -58, -70, -72, -78, -63, -72, -56, -81, -77, -70, -70, -71, -51, -73, -90, -65, -68, -78, -59, -70, -72, -61, -80, -69, -73, -81, -81, -70, -74, -76, -56, -62, -91, -71, -64, -69, -71, -63, -84, - -80, -74, -87, -75, -80, -71, -82, -71, -74, -73, -76, -64, -55, -67, -71, -68, -56, -90, -76, -82, -71, -81, -77, -78, -70, -71, -66, -50, -71, -75, -69, -67, -36, -60, -79, -58, -45, -64, -55, -41, -66, -28, -60, -64, -55, - -63, -49, -53, -49, -55, -65, -60, -75, -69, -76, -63, -74, -69, -44, -49, -65, -62, -59, -78, -66, -76, -69, -76, -76, -75, -80, -68, -80, -86, -86, -68, -75, -77, -77, -79, -76, -84, -85, -72, -67, -71, -89, -70, -69, -75, - -70, -85, -65, -62, -59, -81, -73, -56, -54, -79, -85, -70, -77, -72, -62, -74, -75, -87, -87, -69, -80, -75, -71, -79, -80, -85, -64, -66, -83, -84, -66, -60, -56, -75, -67, -85, -61, -60, -62, -79, -57, -73, -62, -54, -72, - -85, -74, -92, -71, -82, -55, -86, -69, -57, -65, -71, -86, -59, -54, -44, -69, -73, -35, -76, -85, -89, -57, -53, -61, -55, -50, -63, -81, -71, -67, -45, -70, -65, -44, -89, -82, -36, -44, -96, -68, -89, -71, -57, -65, -65, - -75, -98, -89, -78, -68, -74, -48, -78, -59, -55, -62, -54, -73, -53, -37, -80, -79, -79, -76, -64, -70, -68, -82, -57, -73, -53, -58, -88, -96, -59, -52, -58, -59, -67, -58, -59, -58, -76, -86, -36, -56, -42, -72, -70, -78, - -67, -69, -63, -59, -63, -68, -78, -56, -61, -68, -66, -56, -45, -59, -61, -88, -70, -88, -72, -75, -72, -73, -85, -60, -82, -62, -74, -85, -64, -75, -55, -75, -85, -73, -56, -67, -43, -66, -83, -65, -70, -79, -66, -70, -57, - -24, -77, -71, -66, -66, -86, -57, -81, -77, -87, -44, -85, -67, -55, -76, -76, -78, -75, -74, -83, -49, -61, -70, -25, -52, -74, -77, -64, -72, -73, -73, -53, -83, -69, -68, -22, -79, -80, -73, -64, -61, -58, -61, -79, -62, - -98, -55, -91, -52, -74, -74, -82, -56, -70, -70, -73, -60, -53, -84, -76, -51, -51, -78, -62, -82, -48, -68, -57, -58, -59, -73, -71, -68, -92, -72, -67, -72, -38, -69, -62, -32, -63, -63, -37, -65, -76, -84, -28, -88, -67, - -58, -58, -73, -61, -66, -91, -64, -76, -57, -74, -55, -46, -78, -71, -43, -22, -37, -32, -70, -72, -43, -51, -68, -69, -71, -83, -66, -82, -65, -94, -68, -72, -83, -76, -89, -63, -85, -69, -86, -81, -64, -72, -80, -64, -62, - -54, -94, -84, -44, -79, -65, -71, -72, -59, -62, -87, -62, -76, -93, -62, -69, -66, -84, -66, -64, -62, -46, -50, -85, -78, -79, -54, -70, -69, -57, -53, -80, -85, -65, -68, -80, -62, -32, -71, -76, -58, -76, -44, -88, -60, - -68, -58, -57, -49, -53, -75, -63, -98, -72, -53, -82, -50, -63, -72, -53, -68, -49, -57, -45, -63, -92, -73, -53, -74, -82, -64, -69, -57, -53, -72, -43, -56, -92, -71, -62, -91, -74, -64, -75, -84, -65, -58, -69, -91, -63, - -66, -70, -90, -31, -59, -50, -58, -65, -73, -43, -72, -60, -58, -64, -74, -85, -80, -68, -64, -53, -60, -74, -83, -85, -86, -48, -67, -58, -37, -78, -27, -60, -59, -69, -73, -51, -87, -66, -85, -75, -34, -35, -71, -86, -66, - -91, -62, -58, -79, -84, -70, -57, -89, -85, -34, -49, -84, -77, -75, -90, -50, -51, -65, -72, -52, -89, -78, -60, -81, -84, -65, -69, -67, -58, -67, -75, -57, -52, -68, -54, -16, -51, -49, -77, -64, -65, -56, -55, -38, -48, - -62, -63, -79, -80, -51, -72, -73, -57, -71, -59, -86, -86, -67, -86, -63, -67, -54, -83, -47, -70, -76, -77, -68, -89, -70, -12, -76, -70, -59, -44, -67, -16, -80, -89, -67, -69, -90, -76, -31, -49, -70, -89, -57, -58, -31, - -48, -59, -86, -83, -84, -31, -40, -88, -30, -43, -84, -59, -40, -68, -59, -80, -85, -67, -69, -73, -65, -57, -78, -80, -96, -82, -82, -71, -87, -76, -67, -85, -79, -74, -72, -87, -78, -58, -36, -64, -75, -97, -75, -90, -77, - -76, -56, -63, -72, -57, -56, -62, -64, -57, -60, -60, -68, -71, -84, -43, -71, -86, -65, -79, -84, -81, -100, -48, -63, -48, -68, -84, -15, -47, -49, -62, -65, -61, -49, -46, -71, -45, -46, -80, -48, -39, -50, -71, -67, -63, - -65, -74, -62, -72, -72, -88, -82, -69, -85, -73, -31 -output Tensor:mobilenetv2_quant_B+0 +-77, -76, -60, -50, -50, -40, -53, -41, -60, -67, -77, -72, -65, -73, -62, -67, -72, -57, -71, -55, -74, -69, -79, -72, -63, -75, -55, -65, -62, -69, -68, -59, -59, -79, -64, -48, -63, -55, -71, -67, -56, -64, -64, -56, -71, +-59, -72, -36, -81, -63, -73, -61, -84, -64, -68, -75, -61, -77, -71, -64, -61, -46, -82, -73, -83, -85, -62, -75, -68, -54, -61, -56, -45, -60, -50, -68, -53, -72, -52, -67, -49, -73, -57, -71, -79, -51, -61, -72, -81, -60, +-60, -68, -79, -56, -60, -48, -79, -69, -59, -71, -54, -58, -67, -75, -76, -82, -75, -75, -41, -45, -49, -60, -37, -53, -36, -49, -65, -76, -58, -73, -66, -58, -75, -76, -77, -78, -60, -74, -66, -76, -60, -72, -62, -58, -64, +-65, -66, -54, -60, -74, -69, -75, -58, -71, -60, -77, -65, -72, -78, -67, -69, -77, -77, -87, -68, -75, -63, -76, -73, -69, -61, -93, -70, -64, -76, -61, -73, -67, -76, -77, -75, -73, -66, -59, -71, -76, -71, -83, -75, -55, +-67, -69, -75, -69, -66, -58, -63, -74, -72, -77, -72, -59, -69, -74, -65, -76, -61, -64, -80, -83, -58, -64, -61, -65, -49, -65, -77, -72, -66, -59, -65, -70, -69, -69, -72, -91, -77, -80, -76, -72, -53, -82, -76, -80, -76, +-63, -78, -92, -79, -82, -53, -58, -70, -72, -78, -63, -72, -56, -81, -77, -70, -70, -71, -51, -73, -90, -65, -68, -78, -59, -70, -72, -61, -80, -69, -73, -81, -81, -70, -74, -76, -56, -62, -91, -71, -64, -69, -71, -63, -84, +-80, -74, -87, -75, -80, -71, -82, -71, -74, -73, -76, -64, -55, -67, -71, -68, -56, -90, -76, -82, -71, -81, -77, -78, -70, -71, -66, -50, -71, -75, -69, -67, -36, -60, -79, -58, -45, -64, -55, -41, -66, -28, -60, -64, -55, +-63, -49, -53, -49, -55, -65, -60, -75, -69, -76, -63, -74, -69, -44, -49, -65, -62, -59, -78, -66, -76, -69, -76, -76, -75, -80, -68, -80, -86, -86, -68, -75, -77, -77, -79, -76, -84, -85, -72, -67, -71, -89, -70, -69, -75, +-70, -85, -65, -62, -59, -81, -73, -56, -54, -79, -85, -70, -77, -72, -62, -74, -75, -87, -87, -69, -80, -75, -71, -79, -80, -85, -64, -66, -83, -84, -66, -60, -56, -75, -67, -85, -61, -60, -62, -79, -57, -73, -62, -54, -72, +-85, -74, -92, -71, -82, -55, -86, -69, -57, -65, -71, -86, -59, -54, -44, -69, -73, -35, -76, -85, -89, -57, -53, -61, -55, -50, -63, -81, -71, -67, -45, -70, -65, -44, -89, -82, -36, -44, -96, -68, -89, -71, -57, -65, -65, +-75, -98, -89, -78, -68, -74, -48, -78, -59, -55, -62, -54, -73, -53, -37, -80, -79, -79, -76, -64, -70, -68, -82, -57, -73, -53, -58, -88, -96, -59, -52, -58, -59, -67, -58, -59, -58, -76, -86, -36, -56, -42, -72, -70, -78, +-67, -69, -63, -59, -63, -68, -78, -56, -61, -68, -66, -56, -45, -59, -61, -88, -70, -88, -72, -75, -72, -73, -85, -60, -82, -62, -74, -85, -64, -75, -55, -75, -85, -73, -56, -67, -43, -66, -83, -65, -70, -79, -66, -70, -57, +-24, -77, -71, -66, -66, -86, -57, -81, -77, -87, -44, -85, -67, -55, -76, -76, -78, -75, -74, -83, -49, -61, -70, -25, -52, -74, -77, -64, -72, -73, -73, -53, -83, -69, -68, -22, -79, -80, -73, -64, -61, -58, -61, -79, -62, +-98, -55, -91, -52, -74, -74, -82, -56, -70, -70, -73, -60, -53, -84, -76, -51, -51, -78, -62, -82, -48, -68, -57, -58, -59, -73, -71, -68, -92, -72, -67, -72, -38, -69, -62, -32, -63, -63, -37, -65, -76, -84, -28, -88, -67, +-58, -58, -73, -61, -66, -91, -64, -76, -57, -74, -55, -46, -78, -71, -43, -22, -37, -32, -70, -72, -43, -51, -68, -69, -71, -83, -66, -82, -65, -94, -68, -72, -83, -76, -89, -63, -85, -69, -86, -81, -64, -72, -80, -64, -62, +-54, -94, -84, -44, -79, -65, -71, -72, -59, -62, -87, -62, -76, -93, -62, -69, -66, -84, -66, -64, -62, -46, -50, -85, -78, -79, -54, -70, -69, -57, -53, -80, -85, -65, -68, -80, -62, -32, -71, -76, -58, -76, -44, -88, -60, +-68, -58, -57, -49, -53, -75, -63, -98, -72, -53, -82, -50, -63, -72, -53, -68, -49, -57, -45, -63, -92, -73, -53, -74, -82, -64, -69, -57, -53, -72, -43, -56, -92, -71, -62, -91, -74, -64, -75, -84, -65, -58, -69, -91, -63, +-66, -70, -90, -31, -59, -50, -58, -65, -73, -43, -72, -60, -58, -64, -74, -85, -80, -68, -64, -53, -60, -74, -83, -85, -86, -48, -67, -58, -37, -78, -27, -60, -59, -69, -73, -51, -87, -66, -85, -75, -34, -35, -71, -86, -66, +-91, -62, -58, -79, -84, -70, -57, -89, -85, -34, -49, -84, -77, -75, -90, -50, -51, -65, -72, -52, -89, -78, -60, -81, -84, -65, -69, -67, -58, -67, -75, -57, -52, -68, -54, -16, -51, -49, -77, -64, -65, -56, -55, -38, -48, +-62, -63, -79, -80, -51, -72, -73, -57, -71, -59, -86, -86, -67, -86, -63, -67, -54, -83, -47, -70, -76, -77, -68, -89, -70, -12, -76, -70, -59, -44, -67, -16, -80, -89, -67, -69, -90, -76, -31, -49, -70, -89, -57, -58, -31, +-48, -59, -86, -83, -84, -31, -40, -88, -30, -43, -84, -59, -40, -68, -59, -80, -85, -67, -69, -73, -65, -57, -78, -80, -96, -82, -82, -71, -87, -76, -67, -85, -79, -74, -72, -87, -78, -58, -36, -64, -75, -97, -75, -90, -77, +-76, -56, -63, -72, -57, -56, -62, -64, -57, -60, -60, -68, -71, -84, -43, -71, -86, -65, -79, -84, -81, -100, -48, -63, -48, -68, -84, -15, -47, -49, -62, -65, -61, -49, -46, -71, -45, -46, -80, -48, -39, -50, -71, -67, -63, +-65, -74, -62, -72, -72, -88, -82, -69, -85, -73, -31, +output Tensor: (int8_t *)(mobilenetv2_quant_B+0) output 1, 1001, output type:DT_INT8, format:NHWC, elementSize: 1001 output Data: - -77, -76, -60, -50, -50, -40, -53, -41, -60, -67, -77, -72, -65, -73, -62, -67, -72, -57, -71, -55, -74, -69, -79, -72, -63, -75, -55, -65, -62, -69, -68, -59, -59, -79, -64, -48, -63, -55, -71, -67, -56, -64, -64, -56, -71, - -59, -72, -36, -81, -63, -73, -61, -84, -64, -68, -75, -61, -77, -71, -64, -61, -46, -82, -73, -83, -85, -62, -75, -68, -54, -61, -56, -45, -60, -50, -68, -53, -72, -52, -67, -49, -73, -57, -71, -79, -51, -61, -72, -81, -60, - -60, -68, -79, -56, -60, -48, -79, -69, -59, -71, -54, -58, -67, -75, -76, -82, -75, -75, -41, -45, -49, -60, -37, -53, -36, -49, -65, -76, -58, -73, -66, -58, -75, -76, -77, -78, -60, -74, -66, -76, -60, -72, -62, -58, -64, - -65, -66, -54, -60, -74, -69, -75, -58, -71, -60, -77, -65, -72, -78, -67, -69, -77, -77, -87, -68, -75, -63, -76, -73, -69, -61, -93, -70, -64, -76, -61, -73, -67, -76, -77, -75, -73, -66, -59, -71, -76, -71, -83, -75, -55, - -67, -69, -75, -69, -66, -58, -63, -74, -72, -77, -72, -59, -69, -74, -65, -76, -61, -64, -80, -83, -58, -64, -61, -65, -49, -65, -77, -72, -66, -59, -65, -70, -69, -69, -72, -91, -77, -80, -76, -72, -53, -82, -76, -80, -76, - -63, -78, -92, -79, -82, -53, -58, -70, -72, -78, -63, -72, -56, -81, -77, -70, -70, -71, -51, -73, -90, -65, -68, -78, -59, -70, -72, -61, -80, -69, -73, -81, -81, -70, -74, -76, -56, -62, -91, -71, -64, -69, -71, -63, -84, - -80, -74, -87, -75, -80, -71, -82, -71, -74, -73, -76, -64, -55, -67, -71, -68, -56, -90, -76, -82, -71, -81, -77, -78, -70, -71, -66, -50, -71, -75, -69, -67, -36, -60, -79, -58, -45, -64, -55, -41, -66, -28, -60, -64, -55, - -63, -49, -53, -49, -55, -65, -60, -75, -69, -76, -63, -74, -69, -44, -49, -65, -62, -59, -78, -66, -76, -69, -76, -76, -75, -80, -68, -80, -86, -86, -68, -75, -77, -77, -79, -76, -84, -85, -72, -67, -71, -89, -70, -69, -75, - -70, -85, -65, -62, -59, -81, -73, -56, -54, -79, -85, -70, -77, -72, -62, -74, -75, -87, -87, -69, -80, -75, -71, -79, -80, -85, -64, -66, -83, -84, -66, -60, -56, -75, -67, -85, -61, -60, -62, -79, -57, -73, -62, -54, -72, - -85, -74, -92, -71, -82, -55, -86, -69, -57, -65, -71, -86, -59, -54, -44, -69, -73, -35, -76, -85, -89, -57, -53, -61, -55, -50, -63, -81, -71, -67, -45, -70, -65, -44, -89, -82, -36, -44, -96, -68, -89, -71, -57, -65, -65, - -75, -98, -89, -78, -68, -74, -48, -78, -59, -55, -62, -54, -73, -53, -37, -80, -79, -79, -76, -64, -70, -68, -82, -57, -73, -53, -58, -88, -96, -59, -52, -58, -59, -67, -58, -59, -58, -76, -86, -36, -56, -42, -72, -70, -78, - -67, -69, -63, -59, -63, -68, -78, -56, -61, -68, -66, -56, -45, -59, -61, -88, -70, -88, -72, -75, -72, -73, -85, -60, -82, -62, -74, -85, -64, -75, -55, -75, -85, -73, -56, -67, -43, -66, -83, -65, -70, -79, -66, -70, -57, - -24, -77, -71, -66, -66, -86, -57, -81, -77, -87, -44, -85, -67, -55, -76, -76, -78, -75, -74, -83, -49, -61, -70, -25, -52, -74, -77, -64, -72, -73, -73, -53, -83, -69, -68, -22, -79, -80, -73, -64, -61, -58, -61, -79, -62, - -98, -55, -91, -52, -74, -74, -82, -56, -70, -70, -73, -60, -53, -84, -76, -51, -51, -78, -62, -82, -48, -68, -57, -58, -59, -73, -71, -68, -92, -72, -67, -72, -38, -69, -62, -32, -63, -63, -37, -65, -76, -84, -28, -88, -67, - -58, -58, -73, -61, -66, -91, -64, -76, -57, -74, -55, -46, -78, -71, -43, -22, -37, -32, -70, -72, -43, -51, -68, -69, -71, -83, -66, -82, -65, -94, -68, -72, -83, -76, -89, -63, -85, -69, -86, -81, -64, -72, -80, -64, -62, - -54, -94, -84, -44, -79, -65, -71, -72, -59, -62, -87, -62, -76, -93, -62, -69, -66, -84, -66, -64, -62, -46, -50, -85, -78, -79, -54, -70, -69, -57, -53, -80, -85, -65, -68, -80, -62, -32, -71, -76, -58, -76, -44, -88, -60, - -68, -58, -57, -49, -53, -75, -63, -98, -72, -53, -82, -50, -63, -72, -53, -68, -49, -57, -45, -63, -92, -73, -53, -74, -82, -64, -69, -57, -53, -72, -43, -56, -92, -71, -62, -91, -74, -64, -75, -84, -65, -58, -69, -91, -63, - -66, -70, -90, -31, -59, -50, -58, -65, -73, -43, -72, -60, -58, -64, -74, -85, -80, -68, -64, -53, -60, -74, -83, -85, -86, -48, -67, -58, -37, -78, -27, -60, -59, -69, -73, -51, -87, -66, -85, -75, -34, -35, -71, -86, -66, - -91, -62, -58, -79, -84, -70, -57, -89, -85, -34, -49, -84, -77, -75, -90, -50, -51, -65, -72, -52, -89, -78, -60, -81, -84, -65, -69, -67, -58, -67, -75, -57, -52, -68, -54, -16, -51, -49, -77, -64, -65, -56, -55, -38, -48, - -62, -63, -79, -80, -51, -72, -73, -57, -71, -59, -86, -86, -67, -86, -63, -67, -54, -83, -47, -70, -76, -77, -68, -89, -70, -12, -76, -70, -59, -44, -67, -16, -80, -89, -67, -69, -90, -76, -31, -49, -70, -89, -57, -58, -31, - -48, -59, -86, -83, -84, -31, -40, -88, -30, -43, -84, -59, -40, -68, -59, -80, -85, -67, -69, -73, -65, -57, -78, -80, -96, -82, -82, -71, -87, -76, -67, -85, -79, -74, -72, -87, -78, -58, -36, -64, -75, -97, -75, -90, -77, - -76, -56, -63, -72, -57, -56, -62, -64, -57, -60, -60, -68, -71, -84, -43, -71, -86, -65, -79, -84, -81, -100, -48, -63, -48, -68, -84, -15, -47, -49, -62, -65, -61, -49, -46, -71, -45, -46, -80, -48, -39, -50, -71, -67, -63, - -65, -74, -62, -72, -72, -88, -82, -69, -85, -73, -31 +-77, -76, -60, -50, -50, -40, -53, -41, -60, -67, -77, -72, -65, -73, -62, -67, -72, -57, -71, -55, -74, -69, -79, -72, -63, -75, -55, -65, -62, -69, -68, -59, -59, -79, -64, -48, -63, -55, -71, -67, -56, -64, -64, -56, -71, +-59, -72, -36, -81, -63, -73, -61, -84, -64, -68, -75, -61, -77, -71, -64, -61, -46, -82, -73, -83, -85, -62, -75, -68, -54, -61, -56, -45, -60, -50, -68, -53, -72, -52, -67, -49, -73, -57, -71, -79, -51, -61, -72, -81, -60, +-60, -68, -79, -56, -60, -48, -79, -69, -59, -71, -54, -58, -67, -75, -76, -82, -75, -75, -41, -45, -49, -60, -37, -53, -36, -49, -65, -76, -58, -73, -66, -58, -75, -76, -77, -78, -60, -74, -66, -76, -60, -72, -62, -58, -64, +-65, -66, -54, -60, -74, -69, -75, -58, -71, -60, -77, -65, -72, -78, -67, -69, -77, -77, -87, -68, -75, -63, -76, -73, -69, -61, -93, -70, -64, -76, -61, -73, -67, -76, -77, -75, -73, -66, -59, -71, -76, -71, -83, -75, -55, +-67, -69, -75, -69, -66, -58, -63, -74, -72, -77, -72, -59, -69, -74, -65, -76, -61, -64, -80, -83, -58, -64, -61, -65, -49, -65, -77, -72, -66, -59, -65, -70, -69, -69, -72, -91, -77, -80, -76, -72, -53, -82, -76, -80, -76, +-63, -78, -92, -79, -82, -53, -58, -70, -72, -78, -63, -72, -56, -81, -77, -70, -70, -71, -51, -73, -90, -65, -68, -78, -59, -70, -72, -61, -80, -69, -73, -81, -81, -70, -74, -76, -56, -62, -91, -71, -64, -69, -71, -63, -84, +-80, -74, -87, -75, -80, -71, -82, -71, -74, -73, -76, -64, -55, -67, -71, -68, -56, -90, -76, -82, -71, -81, -77, -78, -70, -71, -66, -50, -71, -75, -69, -67, -36, -60, -79, -58, -45, -64, -55, -41, -66, -28, -60, -64, -55, +-63, -49, -53, -49, -55, -65, -60, -75, -69, -76, -63, -74, -69, -44, -49, -65, -62, -59, -78, -66, -76, -69, -76, -76, -75, -80, -68, -80, -86, -86, -68, -75, -77, -77, -79, -76, -84, -85, -72, -67, -71, -89, -70, -69, -75, +-70, -85, -65, -62, -59, -81, -73, -56, -54, -79, -85, -70, -77, -72, -62, -74, -75, -87, -87, -69, -80, -75, -71, -79, -80, -85, -64, -66, -83, -84, -66, -60, -56, -75, -67, -85, -61, -60, -62, -79, -57, -73, -62, -54, -72, +-85, -74, -92, -71, -82, -55, -86, -69, -57, -65, -71, -86, -59, -54, -44, -69, -73, -35, -76, -85, -89, -57, -53, -61, -55, -50, -63, -81, -71, -67, -45, -70, -65, -44, -89, -82, -36, -44, -96, -68, -89, -71, -57, -65, -65, +-75, -98, -89, -78, -68, -74, -48, -78, -59, -55, -62, -54, -73, -53, -37, -80, -79, -79, -76, -64, -70, -68, -82, -57, -73, -53, -58, -88, -96, -59, -52, -58, -59, -67, -58, -59, -58, -76, -86, -36, -56, -42, -72, -70, -78, +-67, -69, -63, -59, -63, -68, -78, -56, -61, -68, -66, -56, -45, -59, -61, -88, -70, -88, -72, -75, -72, -73, -85, -60, -82, -62, -74, -85, -64, -75, -55, -75, -85, -73, -56, -67, -43, -66, -83, -65, -70, -79, -66, -70, -57, +-24, -77, -71, -66, -66, -86, -57, -81, -77, -87, -44, -85, -67, -55, -76, -76, -78, -75, -74, -83, -49, -61, -70, -25, -52, -74, -77, -64, -72, -73, -73, -53, -83, -69, -68, -22, -79, -80, -73, -64, -61, -58, -61, -79, -62, +-98, -55, -91, -52, -74, -74, -82, -56, -70, -70, -73, -60, -53, -84, -76, -51, -51, -78, -62, -82, -48, -68, -57, -58, -59, -73, -71, -68, -92, -72, -67, -72, -38, -69, -62, -32, -63, -63, -37, -65, -76, -84, -28, -88, -67, +-58, -58, -73, -61, -66, -91, -64, -76, -57, -74, -55, -46, -78, -71, -43, -22, -37, -32, -70, -72, -43, -51, -68, -69, -71, -83, -66, -82, -65, -94, -68, -72, -83, -76, -89, -63, -85, -69, -86, -81, -64, -72, -80, -64, -62, +-54, -94, -84, -44, -79, -65, -71, -72, -59, -62, -87, -62, -76, -93, -62, -69, -66, -84, -66, -64, -62, -46, -50, -85, -78, -79, -54, -70, -69, -57, -53, -80, -85, -65, -68, -80, -62, -32, -71, -76, -58, -76, -44, -88, -60, +-68, -58, -57, -49, -53, -75, -63, -98, -72, -53, -82, -50, -63, -72, -53, -68, -49, -57, -45, -63, -92, -73, -53, -74, -82, -64, -69, -57, -53, -72, -43, -56, -92, -71, -62, -91, -74, -64, -75, -84, -65, -58, -69, -91, -63, +-66, -70, -90, -31, -59, -50, -58, -65, -73, -43, -72, -60, -58, -64, -74, -85, -80, -68, -64, -53, -60, -74, -83, -85, -86, -48, -67, -58, -37, -78, -27, -60, -59, -69, -73, -51, -87, -66, -85, -75, -34, -35, -71, -86, -66, +-91, -62, -58, -79, -84, -70, -57, -89, -85, -34, -49, -84, -77, -75, -90, -50, -51, -65, -72, -52, -89, -78, -60, -81, -84, -65, -69, -67, -58, -67, -75, -57, -52, -68, -54, -16, -51, -49, -77, -64, -65, -56, -55, -38, -48, +-62, -63, -79, -80, -51, -72, -73, -57, -71, -59, -86, -86, -67, -86, -63, -67, -54, -83, -47, -70, -76, -77, -68, -89, -70, -12, -76, -70, -59, -44, -67, -16, -80, -89, -67, -69, -90, -76, -31, -49, -70, -89, -57, -58, -31, +-48, -59, -86, -83, -84, -31, -40, -88, -30, -43, -84, -59, -40, -68, -59, -80, -85, -67, -69, -73, -65, -57, -78, -80, -96, -82, -82, -71, -87, -76, -67, -85, -79, -74, -72, -87, -78, -58, -36, -64, -75, -97, -75, -90, -77, +-76, -56, -63, -72, -57, -56, -62, -64, -57, -60, -60, -68, -71, -84, -43, -71, -86, -65, -79, -84, -81, -100, -48, -63, -48, -68, -84, -15, -47, -49, -62, -65, -61, -49, -46, -71, -45, -46, -80, -48, -39, -50, -71, -67, -63, +-65, -74, -62, -72, -72, -88, -82, -69, -85, -73, -31, diff --git a/mindspore/lite/micro/example/mobilenetv2_quant/input_1_224_224_3_uint8.bin b/mindspore/lite/micro/example/mobilenetv2_quant/input_1_224_224_3_uint8.bin new file mode 100644 index 0000000000000000000000000000000000000000..c451e38634a6b476a2fe1c2a23a6d48d730d346e GIT binary patch literal 150528 zcmV(tK<vLUhXut?z|5R7?-So<%r_{Y7}UPxeWM5*u)Js&*>F?-i)E+zW}}-ogy=KA zH=i(?i+)4MEFN?X6i(BKQGBRYfQAp|GJV|3Kc=XK_n!xPjGAy4k-C$03s{t%xcxiV z2s0JxH~_42BB$CHRc(*)w=6t1)jJ+11mYcCk<QTewVz@$b=@6nXmHb?EP_{C!b}ZK zoRu8;xx6=mTmoQD%X4n60c9=JvuPp#x|l4)_3$p}y*Vo2Et*W{iAg}3uX_blPg1_S z`e+%xgBwxQ1&R7vgfOOz%HXdNqJO~y*Urak9zg{H4qHe)b_)d_v7JI?l25L+{9RWD znaT0;GjNl%Tu%^^j>!__8t1XqiIC1Xt`jrF@lEm1`WN~tb$TJ+BFERW&811M9nogl zMKNTT<=4gS_G^C*>BqiaMa^Di^4z9#-XAQcovHD!aZrx?G?IKP><wWFY9U3>a;;_K zaL#tCXOWZS4^w9-C5ZT6;l1jqpIKIiah=T)rG>GBLqDBkFmumCjn(K$CnT+qIg{@@ zxmf0R9t?xt@~qAZsJ)K-SWtT+Kf|gm*hbCpT`)|815%{nhpu0H@hsxpc%y!lVQDCU zejRq^Bg7?X3frhu!2gzJdV8i>7UQpm#QQ`9uM-xXpsAPwjc(k*Dq0EbXy8X<@$fZJ z+-$I8cfq!+s0^kMOaRINPs&c=V@t34@wnA{bJ2sQ5;H7LHfk&fQC)i@QTfsaIq^i) zbkOK=+H%j1K{%v|_l%P~5PPe2Q~j?(UX*BX?Mqr3z1Hn=fx1prvG=@AfTu8pAsW)4 zEcY-O^zlscN!k4v;Z&K2XY}==C+oB1qrFV(Gu>xl!y`@^bk&H}X((Vd|IVF}p8Obf zJ`c8Hyw(n(-^P&Taq9`2V2MMVo^L>+)k#z*Ga$a^b7~(^wc?972TSVA$LX9p>f*Hg z3}$6<)a|cdAn~a}I?2K#)!no{R~W0c<HZ43yhwm$V(q7T2L-U?ZFmD*a1m7UgZtuC zCgk4d-aA5<z{UR?5YKntLU2g+PIqvYM4kaB55wdhsyzlnX;m~|LTP|n@vI{tS)ybt z{uzS>Q0cGfL7&TyN#;mCmNNxVzc5_sWezkCPnL&!qlg4E_;ox7VL1Hi5(P6241EyD zyjIp$t5K8k@b`DAzIxUN+Xyq815ZtFs8Ec)H<YqP*mf=4(#jVQ?h;XoU+|WY?E$gM zXi?z;)P(&<MYma9leS#Xg)ifrB%7lD&CuBE184|k1$Hd`P1g#&j;ZXSz6M*bUJL08 zWmxM04fAPgU*#tyToTbGaJ+bY*)Y0DRaiv!kdit_FV>a!T-TkK4MFAdn<I}z2QpjT zm}-KB(j%uhh{by5)Kv<dN_=B1lMCszud4FLQdA>sc&4`DMlt4Uh}F^VQNOC6x{c^- ztaeBufP(+v-7yi!IERjnFEE-IwyO~`Lhn(Daj6$^NVw66C?l!Gg{aA?a0O)6%kdZq z>ISS`hCHmpxB~H1Jw{-N9FU_;*Hgzz((r9wo`J>agT8&Oo#Ly(dm;;#XUS*qYe7+W z-buoM*T}fx(}jgtAR<;@=BFntr5L}HH}fXXN?)Wh{Rs|#kz<(h^b%Y_vENCgiJ5Oh zI1EI(Pm;v)aLSAuDh@AVZIr$tLza^2t0z1++{STj)-YBrU^&=CLXS6ikA`n>MmE6p zwk7f@I@6ezpF~?q$fwTH#JB_oKU|O3p-VKUsra7ca$(XHc6SqpzgRv4-!?wwbs8C> zR<*p-N3wjSc0YpE1rE##6m2Qf$X^XeJ(~L3p1%_@wm7J?f{l{ZFF4%@)T#Rs-`eo_ z7*7wnXR^nqu~cFhtecB1{$I2%nmXMlR@&H^Sgqd-+m|Ibd!RY<zl*-xUN!<=Djs^t z=Ktr;9}P0L?P!)(N0aAB>H9moR$R3Fnp~r(yX_&+h7NkarW>d1<1;mrhpgCHL4$Ap zc}F4op0#FAX9bLs(Ai@77z~LAW%tOg^L{Sq%pwUf8&Y0>pk~pa?L_as#=0lG$<03# z;DB}IsI(DI`fPnf_s0|Tdd>T0CxCYq;<=60ohH0rw6J^Bq^7wwq?J~F>k-Pdqv`+S zo}6UrrIW>~qj-;fBxbtds_*#OsNcUg4Uvm)wiEG8MBq`W`tO9qyJznYXx2<zqgLlh zx3mO+Jv$(_X`maJRO_h<Pw|}psY`u;?;yy6ni>RPnqFDT-4BXlpYfrQly%Z)@B(Tl z8jm?CG?jEq`7Xg#8d1R>$VM-SQ_(OzUl7jVR5F3eb4i~LC_+;QR{vZ$tE@3Sz3*K} z-#8D>l~B!cHC7v?WqXwDXN?Q%R3;QcCNZ>B)Hb<wRni2~p>rw&Iza7wpFOfC(lrz} zMB<lkBm*q%K`_;lP+T*9bS6*ag~pq4?>wh2{ew1H)>*9cr`^&<ejJ8oS-{j90j<+z zp44z-86ah;>K-FZv<GRJSC@fs!jY~2@v>@ikAx^#Ytj)nG}Z%Emi2VT=l~rOlVSE` zg;i{Q4nk=;H|ne!jP|Gw*o~5Kj%_BAM?6YJ)hBu?5!YuX2A|Coid9yAt4VK@$a+<V z-^l^h(m#ucI8#)CAmupRYZ*ESfzjymh#o9{vyvsZ2Lq3x!R~IUSU0r<iKtF<?y<cR zKPrs1J$98@70uO_l>`{1G`1UW%Lu0WV8gYPD8Uo%OpYwZm|v3DvO;bDecaz1MgJd_ z%YZ5Rb=Qt}r!R5rXDvye=RLSWqfAURg;vdl>rfAA>&3q#u)Q@$PdUqg=_;yp(pOX6 zlp|gdghzf}Ti4Wi(fk1+#5!jVc$IsaOYUD9ih58?>Tt2MfW)-iX)@wD>n66cxTp-} zq5hbm9{Yodwrm9|U<-3OE~cg|Nx@?V=}+Yg3lEXTZss8&m(*lll2lwvTcW5@7CeGw zf$;`zh!(~pm825X*gB*Hm4S{lO59(UQX;z9<*_>a%I49zT-(^Yb5)P<TB1Z3ce-7Y z-SLIo8u|{n<i_o2yCmFLWdc)}qGOc!Q`617AJVm+{6!>zLW+3D7VW*ABU`8T&OlGU zX0KjU4u$*Y3Jv6W84q>!*VDQ<B~<C)66>AGU^+SOXS#rGPHlY@PccFQ4`BiDxAFxj zR(_4l%m0SS-STb|z#v0>h;I$~Jx5pmkTNT$On#ERyd|6kAQl&CUqJ()e;a1!1WhDx zJL$3LYvx=p0UHj~t>9uM;?9s$;laJnWipx+8wN(ihz+oZu0&KXa3^`v#UULZ<(dra zXVXNrsyJu!8KKbFHN=3gavnZ{fgyzb<0WBOT?~0Vf#rpP?U0359r#K{j<?hyP7&`y z)k3fSiYi(@s-py&4vauT$2<(1)CP-hD%A#KaWlK&O-!|xc+PhtSyE(;y&?B8uA+W< zjoU}We=LmsU1YB9Dy3I`Q&v5l-<AwpfV8C6OHE<GI&K7hLw-1vIEbi^q}Bev&;ZN{ zu2DWf&MmGLv=)U3F~5^M8rkHk>`{kxr-b~Oi>~b99!^&ox6c2z8%iOtoY#GwTzWQo z&}1Wck#6IG|9?GA98i@yxXn(|qdyv{gw6547;|wkQ$l`cQU;~@cUZuvoS)0%MuVrr zJh0Xzp>z>k#Y{W=>j7c2_LKKiXUmbQRV2QWtOkzsU(0O!+ohRgWbYn@ABn?!h49yB zyfO(oKaT8~FGNFNganZ9gCEA;+5Hn<Z{0AD(i}IhfF$$>`M#}uuUZFq6*ea&Djg@1 z0RP%UG{jd4j{}o9%WXGs6C~a~hDYua0sILiGpS=aFvr4m;_{X`phY72*qEoxJn^p{ zFiWz|Ki_i6S}?C;rgk2%te|bA=p8NSeWIC<^)&o}H_0vq=#3Dt-cmo)d7@g7lI1V) z6$+nuAP%ca{LzY6>tz@!D@7fcSS7HAX}Fw$&t93xe7{7*453*$-}l7`evj(0|8u1p zLnKrBQX(EvO>*#}nq?*unNH7%Ss?97DuGFZj~-gc(`GcPy-bhKaoP4({)LuTXd#n$ z%9lg?dFj(9ua=cq0zO}leN!(YR3ALr6iYs&-GuG;o{uUipBilPq`qxrG?;FYLyJL` zvPnm>v030-)?C+a8$PWVZhHT6bb|*HhpD*c!q3H8I2TSPe!9$6<au!=d#jG)aLz2l z!6@@Qg5XUda|UqiLbcW)n8-;jJ`!*ZAt%(l?PV}|)z~4P+er}glQRi4zxs^nj=$++ z*(vzMF{x$C2SK5L%1K}vX{Bx~IR+`lDVygA><ua72=K1A(jkC=(y*b%iq*5E96U;v zy(%MHWdfD26tjMa2O<0Rbwv&}OU#Qwmh6NQBl3T3iNN^8$s#T7-pvT<LT5Den8#EN zCE-OHeTIjllXH+g7U+}6kZ_X(Bt0Ks{updHdF}*4WF4UT%mTAeosuy*9F<T=6{_)S zyhQNAN`VKA@d5r;zfIF$dCR4e!;X<hPE=<pNG;8L9|6D?m9z1U>W=dC@O9(Ecl`ub z$#wEI0!4Fn1vEYCURDLdGVh{4E|m|uuhi}3wX6@WNE4arWxq<M?vfB5_BdfysBC=; zflh<JImD+q<xeYqS?~^*+0;of&8%J|Qlu^_BMHfVio8le17>;>&v|!EnWm6h`{Z`_ zlG)&kHYC#(6xz1>vq=vZgfel8yU4uRVG#cyjN4y9r=*=;V@BZtMmfQ#sTEJs>R_l# zfjf>41OHT=i&S*%Y9o=q^<!JZ?aot5C+R06g)pcxyUtv*qeXJOxn~E|!ciAtZ^_4% zsZPJAzxFkX%dC9Iz;oVfKtA0-RK}a4+Mf+aBV~b_@6!xjaD3R_evVDly0Es(SSc04 zHcRO~{2%U~Q=*8x$&ki=r6JsT3FZ!?>Aw^*6Ok9~P^12#QHyK4<ar>S(k*=p>I&Ci zrsA|X!+&+gY@~J=cPHM*NNcslo?<7`aZC%KW{OXVoW|hnc6$}#CbnC%v)V}|0!py8 z3Db+ZA~eY3^nf55#z4m8!+6<B?Cn#Sg(-YEUrKS&r*BM|!C~rzQ;XsbD?c=5N!l&X z+YhUSjsRq5`yz)7UP1PupqV3D=i7<Pd)_{9LswBWZJ^d2ltwwyWI&O0=oZh2@CpQb zkB)2t7dInTp>)GSgV=pFv~@ChJULzl5oL^HkKpb*cO(C$uQ~vo9HJ!!tsf&KlX6Xw z=?CGzkkDI?m$`~{C5&!M|7%pJgzn9{%Nk*X))v2D>p-BxGNuQMB_KG@oSbRj3dIZx zIRn1(>j5PcnK|!&lWE?&>bY*fO1OA5=6bPwq&;Q{8W#v!fP&|E%j~k$sJx#>VuzY& zopNX*W5Obt3#kD)e$=Eb%q4SxGmR@oqKIpTtm;-<zCmwy9}8#+@k+45<W+}&k0!tb zouY$~GJC#hs1Rj%s;D4)wVgn^MK>eQx~*5HU;e#Hwg@`p(-jHE0|T5QWJtf0jM00h z2!oYjLUVM*OWWVzpM8e`E<IAf{_YR&({|S98U{251lIT{gvU=vH#izz#7Ii$a$Yb% z=Z8DSJsu&~j^G>Lhh}e84gl#C85_-c;Xq@k{zfFb`Ogu{MWpZzMxyQ{uD@2kdcbYq zOtg}cSodG@*1_7^y5G7jK%+2{^9*v`I0Z^7`}Eb{&iEVVLC5m6#i2KaB@NpZE>%i^ z{>cLA=*yrFu8XC{2jldy5MX~4H_%^6>wr2Xry&x7MhNb{6}*?^w!-4rn`z_7EdOpo zW`i%h8GraPm6U^imttA+m`+we1*B(^;nII#u=OxzFrZs~MsQF@l)ZAeuMJwX(0m=c z!Rlx@gwl$P-N`LYTf8d24#lN!rCrxkEK`sL3~q#1XUI^}ZiYVJ-H{e>0uwe=#DySk zbZ?K)^`B)W10e!K#^Y!$(5g??J`u(M?Ht+2O>K2qlLa*W7&q1>^+bZgKY%ZJ{$bD5 zCwUst?1tyf78}}>ppR*^Zv<o~TDy{H`~}}2{m8kPFcL?prC_+<ANY6_3WRTXMRb#^ z4p|WGVP6}%c*R=U<L~-nHD*3+dY`bj1?nAuckB8-sjx5q&^!V1eoKc;w(svg&K5-` zffeIwVRB2fi_T}myEz`7?i1m8@%uv~I2jXD%&NZ+IS7$rjy`qVOIoiFh(GZ#V8z9t ztkbEy-D${ShZ&2>G!o|N{PAu*!s;0Wr?<dqhfjJ&riPHdeIg|tPs$w9k%eXm=aW~J z*&c=PoTXo{Oolbe=I|m*Om@4Juq2^!1TBm276_}BmU!~C@v3y16}+$K3LvjrmyRA_ zL1_ydMuZ3||K+Vlz_Qd*St%~Q0Ty+x{UfBIk+@XKqK0e5YCPRY642G5Q(o%i!7At2 zEWnaf!iHdYKrV1KiCdRHXj2BCuTR_01<UMNf?&cuzO(xNtdY8yO7m>+fZLh5y73_V z(8|-r0@5LM+R{ulO;JbQN~KU!udHqj@Mbl3ueeME83qM0S}*7UA=chbX~Qk1tMYbg zT9h!sdx%oBP`R`_-ETscY6_lvbdS?4O^JrfD1FM}l$$;8*6J?YY=bFzvl)5TBgx4l zsOh*o;j(E>yvsxIB0k=*lwgEg|4^zd`kS-97{>6Eh&BavzO$ZOv3C*M551W3m=`@2 z*HSrnf1k%x2j5h&SgF4A`G2HkS|NEPso}M+mM8d)b;FH0I%Yrwj3g%yG0>ro&I32& zj<rf5_=pE!RiE8ANb|3VjNqt`DTn?wJTvv+%G_svya!`J-SpBmQRjWi_RQz-EDgEa znA1mRW!&)CVu}B*+PVUHe&C;ZT4#Uu$7@cfFD$xG(lQs!wX+<bO3|k;#+4iTzEk(o zP%@CR9~g<VUCEi~QRnjnU~)3_B6QBvzysavOgn|pndBkB={K|$?M{7LXzd0qDU6(W zl{2l1_givF*k5-nw)&)(rTg%ds4`L$4!iEpwQGgGs4@i|NKxpKj~>VEwo)Qroq@Mx z#|ANbqRF3At8xB}m@j74>qjRnBCOI<Kdx)#%-9&iaMn|`v6xD7e@$@PF>V&v=9+r! z!5pc*HdetRa<-Lo5^dEVKX{|9VI(8F-J9MHDRF>d=BV;4pae;$@k=FZCN+fKP0?Sz zqRxwvcr3fbIe>GNuwQ&&Yjj)pfAb~R*9>ci?v51X+y)iF-kPw~zv3m|&<!I;at3xm zy(!|Jic1x;>E=@^v%DL-AngKMT!M06L)1sEVCmtxjNoems9jkbU(wy7)vw(?oXSmq z{=@L`M>$bU`^jmFxb-iD0)KFL>B~$)sp(r$Yc<<aO3l5;{35<aN)XDmA4R2m<ALBd z7>}a88-@<6;0hjN!7e>;f3-8=vkcNLfa4Buv!OMMYK8&#SO^FfFc(Gri%jxA#8!4u zUb}T5?ONp|nhMyyW~uizMpqvZNi49A*&0#hIN>REu~fD@88(hdf2p5}`fF5jVh2zy zdR6`;9NRkRbL)aAp;7&L4Fgx8Qudk3iq9!JMbb?k6Q7?4oeWvUM2NeMUyu(TPn-ow z<r%J9X_r+jVmX@%?=m@<darwM=hD;ZKAP1E_5PItEv!GR{W+57n2V9M%xo`^A;vQ| zp)_t`u!AY^wZ{nHNj&bH%SDDvRQzZx0W~&1^_`K$L$Fu*LJAGz`1s;qu#`|A93G}e z&TNM!f^L9$bsETmmYlO6XW0ISt5UI?)^5+Dt!DTv!xK(Xz7&XK2>Ju<A04+q_cpD> z7T+j;98c8l{inI<938VkfJy4Kta|bso_RUE;L;6{yXROhckcS|cymcj^JP~)2h7@E zbI(Bla4?d>$lG8V?u3c<e>$8>y1YZ4slZXJCtdS*vX42!U+zL3>y_JDBuT-8YVA|Z z`+{LlmO@YYL=2!d#J{H)><@@(SD*=tt44k&TRD8%XZzkSX2>B+0&~3EbMb0j2r769 zKpEhrjrZd0?s@}aH%*xKb5VN8Nm^KsbF2yYf=oB^b3`mkA97EoYI3-l&JnVv>KQPL z%Exf-OKsn?QpnEGCwZ0u75t9I8X^KBK4+^@J*!a&;8ka|gAWCD7A9OiMl}hA_}3hO z@Y*4@L?3}*bLdL$7o`OiXPo2o!gfGfx3FZ*t+TU2x9gXdC-cly`>fEN0g1Rp=rGg( zC~AnEiWULo0wD?-u(p<w-%Rro9z0ZfFm-ik-PL()*->}0oBiN!x|WFE3G1uH{54X^ z3#|jTjL`u>6$!D3nE871(qG~&ixKc>gf@tTD>nPw?V~YQMg3R@s3pw(;~f|<aKYiu zcB4qEz^u&q57Z7n2x(6xW)FEa;Vqi}M3C#plB^t>S5u@j8ZqP@a;6oR!haH)iAinw zA@0W0nnx`?-kVPtorM<;hiF;4trd`FqVxK~o15!ZLbo+n1-e6_s43h!#tr#VPMlnL zDZAvihC^y4aN<W-=Y~@=i^CNCz`)DUqQisu`qOLO4*}D5=70!<<+>qCKXjZU;_xIr zy~B~tha$_Gw@+s@TuhAO)}@Ts(G#&b!1+2=6!j887{ME>>YHsR_^HL<ppx^!=94-} z)@4Zaqdj2MMcscH@yg3U9e1N{tR`XnXT_Z#L!*hPjqrU<8U22sbr7(3T)$Zvn2q`K z&6P{$L&eKeH1oB3urXSvw~sN-*{02pGf`4`_}JIDamEHpD?eO*K*PwCoACW}zBhBO zEN7pXk%-5=Ly<SFfLi3j+`<J-o8~mltzq9g*MXVZ^zW>q!y?AHVw+f53-al5xi%P1 z?JX#ODu1w*PBAvp+q56XjN?y9dIT>A6iO(OO0B>iQ!>C0S9*5DoZP4IV|xPLTg--L z*dKpS%NvW_9?V3L6$?(0$<unD8-@IPiluse)-<}dY}ZLbNDj2f4q5HD1I=!y5gIu0 z!4dlHR~8D>;j9*gzt_B<q&=&yK(a|R=_1LN-@Fkp`bC;0Tn|U0)F2SY?rxYvKf^8Y z+IPj4eJc_OZR2HL_pNVZii_iy;9jR5z2b4ietEm<0*x8R-Op>GqSi+X-+JATt*<{i z8oaj-9}E&vM{=8!2Op_X_Jp4uuO{lNU62|;)dM^H!E55HV>2WI+p~*t+Ro2!7_`uI zTs}O`J!-z;#9I(vleIl5)PEs+S1ci03u&sE=}#X3;)faKfq6%fh&emZAbwQASMW(= z{Kmf&!<#QI9{7~(--y``5@jlE#1KH4tzFc?mqt<<*h{0EW1Klxb?e8_A%y;lSyBO; zRV~|!%D*6!-ooCD`22EH)4Rt)ZxP<`XGW+d!djPVb4FkBtTaGR@LTT$n9~99rderY zaW%5q5?o#lf(DKTw-+A*%d@u}Dt*XZx#z~a_A-5JYiI+xdiKM*1lspQ9VxogdqXRB zgJ^s`JuzT0g-K(hEai*EH@6E>%e>zIn51C!u>|+3NI4X9KtY+Eorujd0h}!!Mo_rl zsZo&~IHlhlf91xgxSOw-m@zFLr}xESu$^L9l;g+MjGk}IN97NGJyx}`8wyk(N64qc zGkc|^#Kud#e4t(CsfFpN;@>wrutj=9LOYDLV2h|8-L?Zn7ZbqUa_;Ja{Jrm~Q%``b z_>JQ4{MTUle=QuQ;IseUM%rB+59wi|mrcVGi|;R|5-t3WCNB~}ku1mfwq*`{WPS3r zA>2ch2#&${`Rn8A&gzu}kqv5Vo)_~FS^-n;Ak9#cJUG|BeUs!3AdLeeY;x%8l4}S> z*7oySTg{q-X|Ztr0S0GXKun0D?rnJU7;We2m2t6iMOElwAS=e^cKwA+gYAwPhMBiH zUot*+x^LW&H$G(w8~b-P&Zuk1^}Icb4hUv=d$y0ua{49Pi(!;LJx;-E!3c7Ch<^Te zjQLhGgsV_^f%})u=*;|)|9#;Hik`C)1Xj-()S*tb?Z2<T45d)o1S{pyZIx>(8$BO> zg#qipc!n^p<V;oP%+a^t9Z;Hos~|b=eo~B&As?teA#O(xYbX9?xS|f!lS4t-qBHw@ zZG)x!uk;-W&jn9)@#NY63Czc<qfk#d@kHgB%{ev^5_V%ccc`w+oUhrrW>dbOli$n8 z`q2L~H%QOl_W`J6DC=9y>^C$+afz`egCi&)TQdi9R<}rggW@9<%l%%dkYT=ou(p~3 zeLb3QqB;oe^DNoXi9(1}P)tcvK0j>rdrPM_27^v}cRt!xYLTP1nC&N4*l6lW>V;lj z6`)RtZwKPWz6g+3%3@vrBY^pt$C6nhU#x6)ce#iVv_%Snn`1*CjGPUZCJhqY`{rRu zHxqjY!IabDIL@ZH2;SmA$Z!DyF!@*08F*qOK3j7DwEDhwq%T<;8AT!Ha>23QA0cdI zE2Z<NQEeWol_=PG^9og#*t|*p+M%tuII;EY$MR`qbd76VRIETws7&TtCO8Um`G{+s z5C4Ep>hTlqCf*{(kFs<c?Qz!DxswL~DK9n0H<aFBC{cH&um<9~f2HK*kv10Yz+IEB zoyS-EX_n^4^i<o1yq%etKBXcV=o8hq>B01qQ=bxo4}})tEd{^z-ZA-l>I5gWOQv`J z9CV@KX}mUVgF1EnekCWbC<UqP)3KSD2K#^cjc3DlzOQaojQK!7t%$bqMnaIXuSK1$ z##5y!T}?&(>Mp~TdpA=3)Y)4KThDM%u>R*O28oCvovv}|VCnKkK(o`nJzwFH2yS{h zZqRkAD%FN--W4V0cb?C2X4R1BX3ket<={JFLZN)q<1Tme)rQn5+r_9@mn!s4bC+B) zL{cU098$3F4B}Z^%bp`yNCHO^YhfH-Q3{Yp+;g1O>pNOhl^7hpb-|isgw#DC-JHc{ zksR12sCM&h><d<YxygO?I)T((aEvYp6yMFJ_jc)7LFdXjegwd0*eJ$5U6xV){EXwf z-J(7WmdIXk&1nchklH;Z7#Q)WISm|QBC&8cB626rY9_l5Q^m1cBe_&^Ze);o3ufC7 z?3d)Cv1Z9<$KZD|BaBblcw~4%!rh1PXwF3KNm6|zaHtV<&+CJs%Wkw}fq533hCHLv z75XqA<^KY~oE-rA`8Z5^cNMKcrOE|eeq`fYh?+^wpX9Tz&$X^CsRtF1k^GS0JdU)r zGe|(V##~9Hsl&~p=lS>OVJ6lYi8RWiQ2xDoAg`eh7O4a9Z2yl+5W9gDK4+YDt4lf6 z#>?`BlO3I?T;YPK>;!FFE+MudTEU)I*qC)W5-ODzyb;Gy)~<<q>c;V*+x!^eFn!Mn zFmHPdZh@|@*X9Iw?{nAPE|!6mlRr<Jn~FH5!zItb$KT`(S{t*edL^?txBs*F(}zmy ztX*SD&5w=XW}U!8ICuLjf7B6|bUA9f*%YQg&^Pf$4c0rngUOjuJC!{K`6fIoVlLjq z@)rbMz?T(UJSwf&K%zban0{f?=hhSd03uON$<B4(7)4gGJZGh)vxCt@{=K?VCgT3F zIcw*JC@ldDOGPGq8H4`}c2=?u5Fb}-gw~~6y5~Fb3(|&TpF6fB&TnoCxG*EySxO_e zC`lvYyY+iJ^AH`}prh%r?TTe}3+~128oO68%>4OFq(@w?oWTR5j+reqnYM$L$W@<^ z_;d|5ET#Z4^-NWQ)Nrg6ELO2hfyKXbWMp{Tv-56~Wc>c=xR&)mm$F^<r1JFu2@7ix z=*LX5?_puvIA)}|{4AWu0*8ev!D8r6ywlq6<y#F~>n!(6XiTL0jyvMj>D@$(FYv@k zFMr6a#W2RtQ~?-Zid~x1Ceu;ZaW|?TSGpqX!%(>GPWx^7_3q?<4-~AYfrNRMG14P# zC6)>Ok@cC958Ky8N<^OLy>?3PNlbYVj0A`~%JxTvq8aaw+1xo&zZTsFa12#+1V8A4 zv~QF=JZnc(ty!B@sK~spQ#K2`gN31Ez$8l}?aQ0?Le+{T{0ldM&8Fkh|Bg~t<hIBI z#h1jnrirfvA(yjts4`uZORLdb75XMn{K20Hic<5cTNNB%ExUa|n@zfkzFs!3&&2E4 zfN*><a>8&DO71Lnm1UQ+Z@NaTlaTPf^&h0SDpEUm@3>o2{Vv1hy^KY0DIf)@_!Yft zKYqSkphH!PEM?>ZHOQ@+xjf|)^#p?1%DJ`_i{foJoL*2r>z^xNzYA;#eJ<Y7z{Z0% zst*75H)xK+#}=ev^!Zz(igEv1+F>EjP=+6%oX~EpeTN^FK|NfI=X8FL(MJWnhl4Sa zn4mONo4V_wajaVsK{ijQ=!I(bU$XS@&O7KU0ZdGA%;F`-j}BI0#teLqhysIppmYxZ zaA%y~ZH8nC1bgKgjvv^`?3@6f!qmUHmbUH7OwHZ=(ScWz^8HP_2tq(zg*m^BWzo3Y zkDUC0O9a06ZC=!Iq~DIk%kWN~7V^)HTVXxyZIs+dWlaGqQV2@Bd}wS~yHERUo$uzv zCSYhU6uvs@1*-2N6rIJ?vXkEDaA^Z`GH*g6s8^s`CtMgR2GN@iyzxtvZKEtY(ArIW zT#<1+JC|Qo((+MpdDY^Hoq+z368{k~N+SyIu&FL8%+0se_$JybDXK>`F>@PeH@K6v z?qW^(r;u^%5)}>Cc#a6m7Yj5(f*C6!i-#Oh6(cV_{;8@i!enAKMfvYWxhI%vu;eLd zho}D9IQ9uSv38`$^f~&H3y0llJKnv*y7b@8y6lh18t5l=c<$QnG7~t!f%Ykc6g4D1 zIu1n}?u+c7GV~n5hHUo)%(kL<S34a3FocafiqM-|NZNRT5mA$g?zf&|(hbWAGzADi z*edRIF4|;5su2YVQ8>!`sO<yVzDH|J@qu~w?nZSLH|Fg{f`Ze3VPslvs+nu(CjVY- zNp`!&Y<_`6r<wqagzz)UUMKzK!F$WxfHT!IwD%2-&k0+Z16g+MOXuMzD?;aBtq~*d zOVxd+F~{qdP9^{0vD)M$N9BEe0w?ih$5c!|JaPJJqLfcVFAKmr$fB!GQ$Ta{2KD^? z+EDvGCfT)4ydNk?hVG5Uf*jd>xlysFs=uwk8j|5(7qIHB*W>M^pX2g2evFAFiVp8P z6^0bC$}zY6$48UJ!vxgsN5#NX_!kKQg;{S-^ne1?X#a67EntBOd|Y^XL-A&$>oP#* zNS+4_zBz~2j8ur@n6zgbBjOr-dN&13_0ql=jom0cKdWSM2GjJNC+JEg(zEhd@QQeg zsRG7bFNq?Kled~V^PG+Rt<o}S_OjL3eRPh3X<=vX(q8j>(y8Cyo!fp<3$}l9AOanj zcj<(T-P=<Xd#_mcK-4;FZNJ4|Vvxn^XWY_Od%l2odyK!SqE|Ir6nR%yy+&D;B$!a^ z|FF?^lfWEC786SXu*8X`>dht|?~vEkXg$OoW%nm-qeCPri-VY6rF(QyQdFXX>@#dF zEQ#U|lqGet?HOuUY$xarHIC@U1|vBh2KJu}f-RyG4of5scyG&cb9Sw9gddzdZa>bx zYT)hswS?&rA&uAj@Wx<CK=AiKl01+Gfk05>RMDi5-shR=zPa$xjQ=a_R|;cB2lj2< z<`mc1scbrt>dX36zFCBF3kYoC`=Y+wr0J;_Ox1Sk*vca7lwfQFf__AwIiQ<Ah?UFj zb$fR?oZMD5bN8I8(TJLaZ-UH9s|Zy?Zk4d7>DuOd`IHZWDSW{rpa!AcAooPR(wSla z+vsvWsplH`MO{mL;hrOQw$Wms<Sy<=-dC7w&>`cn+WG3z7}F97+-SoWLPApE-VA#K z<cCiYc)_2PB`R_jNAvx6J!IkK8qFq<+0|bY(BZ}V<K{BjvdV`CZ_!JdL`R83iXTRM zh(w2o?`F+3)^-i>)rAw*f?idUZa!Z%*(e`^p6!W1A}Oi+T>NUVmk5AW{c|9Apq1<r zn7-0=`DY)2isAg-8Ja)y-a3NG|NQhQ0pMIH4zXo(F^eU!n+V5q)0bA7<J~_Ok#VG$ zx(cU2mv4p)t1Sz{);~S>KXZ~;!wD=YyB$-EF95`%o-Ja;Ih7pR)6u;%lwv@AGskM5 zdCMo<0PrlX4Iw!K1f*?B*VZYo>Hp~P98Dtl`X=#9CJ{V6Y6QG(&K}j>yJCJtByG=t zq<F6LpaoMg`vP#a(q1FJ&$C+292qySSY1o)|AhULSK}Ci^f|9^^RLs|DLs(_CYd!E zjUkuPjp*8uhx!74JWi1Wn`mGcP7Vjfp<;!7$uK-qtSX6BW*(SwiN%6&He!)lWDX|u z?O=v-EP(hfMEI5VmWifeNx|wA$RECJ%vfMwby91V%GF@JGOOdk8SjKW4F^+e^*$(q zNy<R=>Zi}5bMJ-j+L9e0Q<}rgxZz(N^eDd`hdY%d$yygY{1`QT<eX{mwu`YibbTXG z0xdKy{XJ!ghr3utiDqfxPfwWW3EmcQ_NX;xYzzI4wy|TFrH?K=lJVTE8g#;DBblMc z32gr_NBrBc#?YolU97`xkXoeqpFgp!4UL=pz2QhOw8jX_?y7l^c_pNsE_E>%0tVPW zQ7*5QzyTA1!yxrf2CB9W*dTkX1*i5Hkv#Yk_L^}va}5g45OW&Ux5(SuDN$F20=^2S zS(RGf0+F)r7237RzVx5mJBnvDNGgFjP(kurw}S|(nOUbLre>ynro$sNHwPxxR)z<A z&2Kyv&PaoJJ>Go{IwCdkvV7NX<w>+ot*HvKry=#^r-f!Y;~?P@ik@JJ()Djo$66z~ z1o_*=%=rfJk?;J)xgF^)`D$!U9ANnxMP*fdmmUUo$n}M46zA6;?lM>z@yGtzkpEX} zXVDCal2u&%XJF2P4Br%cEyrH+tbM0+KuOPIL0>@IFRBIWn_k%RXwWxKr#<0h(rm<% zzfXKTZApd%GI#>oQ<v)CHMT;?#_h0zC>W${uRK^%m8ulvh?<EekP`b~6xO8+U0byp zLT4&LQb+0hY-vgZ-Zmf0++IbJcMUwTYpDoh(0J5O<~^v==qbJ9((D2_*vN?<C<lo_ z7$X#cKqs)l(#E<W^}pHRuxbsCxL3qa5OCjUkmG~8vCulEG_TM`LtNkr6Oq!Rq<m<u z@7EWG3_fv4NRj~mUE<*_E$Mz?--*?f=z<6ys>*tWqqRkz5Ch?cV=3uoVecZ!K(Rj_ z7HqDA!~4VyiTcf1v+NQUxc72!gUuDFoWZ_|1FhSYUWN{B>lTzcOS%7auWdA|;!`l+ zGyZ*9M|QN=f?it3pyx%pX|#Wv6Jbr6@ju*ygBHmSGT{e?d~B<E(IC-H+bbU@?q2JV zqqU&QGt$_%h=~^bjT5P)c_X7^7;%ZsU8qK~fDsjPk#x$k>fHFf6)i0z!);UyiKkVa zT^27c&uAP^Why*hcS3_c=)z=u((xjq2M_8btpgI+rp~tg{H0qveXp-K4Oa$KY!f)c zL>6~N9dW%|obHS7%1sS;4PMZ$=S$R^ECDr%XzB?TW2j%&h}}I1qD8*Yn6GhAx%Ai` z>E{<Pj`<Dlu;Qb%5m{C&I>Bmp()g;HDoX#UKhP_|o>pK&xcHnq5$Zxz;wogNg5tA{ z|1KOMza~3OS&km!d>$UHZ*3N$J6$|jXI+=yV~Q^S<bjrIvuiFTJHo?)tw)V?rw_;9 ztH|TJl{KV4&u{SAvPghQzF3k3fWGajX5?Yao7^WM&EwjP{ZCD78jsc0iW(Y)u&_g> zrSqlH$b{)x*cU}(okx15kw_W-`Z&1i{pp68l}L4^I%u`qPO<Wdh-ZEcD7|{ty^X|$ zns=Y+lST;rn-+&+{Ak#dpjhwl&^GQcymYTTG@FGEH#1#hNuW~Zathi+3dgIYu}hB{ zJr&gvmwC=b*W}nTYptx1f(66qp;L#@+}0$-VVDvQ9b^|ZAXSNYm<|iMR&)lPU{65X zK3Za{MpcWPkf$W;JW#1%-xOcmE8N%wcqji2P`ZDk7ZeZ*9GZ@qCsLZ>s}XA%UOd@E z>(m5BWB)51<La8L4hZ`1C(V^?r{~U4&2SBr=cX5kk_7!QT7E^=g{bW@=4uE{sXsp9 zs5%&H(kz6-;9R{hhLw_ex*943Kj|MdWBFiy3|4|%;C5cS0p)BFdoUA)WIs&SZ14s7 zAe!Zu%W15pcO}?CA}fmw38md8nQ$OAjMv%qRNH#6_$@T6OAYut5@TA#KL-03#RCs0 zaD91@2~Xn5X4=ET(OE_#)KAK2sB<HMAII8rW#krlm_q+2qNe3#5pGX>Y18HY_*uXT zgLH$GjgD)S>%NENHE0$cmW$Pi-EiofYq<FDCGuVDySe@RMHou%o3+f7bH5Z(fbO%P z%jRt=&T=%2h&{k34wz*bPH@(0&1MRHu)$GvoSvz_{9=mG+1GCyxEyDB0HpRU5LGmB zF?7KiV&_E~3pW(Xp;{TJSMDz|Abg2z(qkl>QmnZ;<D~!1<#Nqz<89q*Zz--PXyu^` zxY=sv%}{^E73+j1V4ZItZ!eo?OQtsHpi0<wo@U!d*qF!*W=gebq}E4owPQT%623Df zNm1%qfpd$wrbPEJdT^uq7)ZJ-PMld??nt^{?DSw{2o(N`u+Xk1N~O45u{_c^VIR8m zGnY@XNc)AJ0<&=D*}H(HmDC4P#FytO)H*O{w2vtAxcqKzylF1@^g@R)f^W+#VUZPT zRz!NwjA0Ob9}RX(_znlw@5k5oD~G@WiSIF*76N+|lWVF!R>W#*pjAsV18;u%$57=a zKVZYMd>>6{oxhFrd3`Ss#C#=U?A&<d(!vp|sw5M65dr;@X#=XxH)4ffIh7z=@DiWZ zho|rx0&l{vdL+clm~8mjAOt)!_##PygI7%w<*+R7!0e?Ze4lW!2Tng51VA^Uz8*hK zbZqmBjv+9JlwFbAM=y-be0zsec;wQk@Ot3v{@57rO4xuWB2*q0?tMg<Fs<maG~3&_ z-cBgb8}5F2bo`)bmzds3=Ck=j^fp=xn`a<?-Rc@WkDh2sz&myn>hVd)yD`oiY8~7z za0IsuHVXC(as^n~rat*lFmuXUQT|D7kq}yrbX25|v#HR-7NceMM#%m8@@Q;sDRumz zKRrS+YVynaQJ}8YGb&VF8w~}Wq~VcEyxEqo%Ooj<rs?}y@;i4@-@ym|T9-sB@J$p1 zg_K&h&mnvDi6vdgT3Akys<->EVaV7EJ{~Au6Hq2)5z?)kM@mUgow9Ag(cGFfU|dPu zURv>NT^7px?`TKb__W(BvTLVC6%=|2?{#4m#|vm^ji8i|m)}v+!W)qn?;;79*g9tR zV;M}c*z9unt9b>>o{?WJRwr#~LO*aEcQ{C0UHH2i=`Qxw>9l1IXel%j{ZY=c{c|-M zLZ&5vF!7sY!i{tL@@cYM3r0a~xp<2oH=$=V&d;iH#!MLYVfsAtede0jxT#;aa&1ff z$g9ra`y-I2Z#r9pQ+8aRKJ&K`bgm8WNJ}=*&Yn}7c`)fxs-(SVwu_aoMgAPH>;l2) z%4_8rU21b1UCl;m*bq3t{*5Ea#*x8{aMA|9{HiA@v;Qzn5hwmUEbT%o>pG~(=D{j| zB~VC`sb_v+kf}}54(&Naaae!Ncgge3{t?i>nFJ~fYy|_ha9esi3Rx?KC9*57;EAde zg#A%Dh#??-ut>B@bOX&>aB{C$q~75-!;20TqU}E~%UMt@-O}TOjhky&cAw$Sl);jQ zaS$E`zqq>7fawDo497OfunHTcUbUoS**t@YI8|l7TWrg#Y_7Q%Esv^a)?!a#`)yQG z>C-_?>&jLKM+m`kBBIR!1v<%2<O^p9Y#oR<f-JSRAW^6O^`>@C12y>`%#p4s3JLMd z*b0%=6lcaBombIP)wQs4+j9|q$l|y(TzOZai?Sg^P=ji=_4=4OWc!NTj_mP@DdaST z`)5aajZH8zKVsz8bczu8B(7wdBF60l>`-p9?9RFag^xN*EQu>*YeUlZGl6k!C?q>a z%vzufP|T!NZi6N-kX_@2i-cDLLFpP7oxUL@tfhb;h~+v9D(GxxA1P)Sz(X0B!7O!A z+wnV9;UCYQ)j?uMXw&RwM%Uy+1rDXmoyNGM>U0E@?Y4j~9qfE9lnhtd@@r`zBavgf zNm&j=G~#D)N14f=Qp7iNm6v3`h(ww4P|g_o)vx~VzxkUbP)_#PMT@S1RK+8vjBk`T zRx0XD|HArHdkQDS-3*YVZ#n2+!<MR%#}#4$VVQWlKS!F?i5ZpUiN@&l_Xf`=K@m?( zTZyS>Gmwp$xXs*Pk38lgFZnQ~8~fMs`2EVKulo=|Pwl1~-BM?ZRwq*U^I39L#^X8( zB4e5E?NpJ&w-oGEJkMLAhT)sqY|85{Re-8#591vbO_p&MLWO8UiR(fQeNv^Byj?c4 zM?WFw1mcLo@<W-A67_&%UuexiEbm7G<!P?@auVML!ue{XA}&X3zqx{Nq41rr<TS$9 z5y{@@e|FzE$V0C#i;&&(0^_~yUezE}`{OV#@I{>7FqJ3AI+St(58naF;j!4nA+uWQ z{g><A*ZxO~RKT3Bsr-55HZJ%$19%qs6^h#Vd?lFKfBOn<#URz9`au6~ovw3us;aUO z<WVL6h8sEG_}p>&VYp@(Z1!+@#b6h(M?a$kX*Zq;7wV7?&7f#_$5twoxaLs^r=}f$ zF@-^t0tV`8bkj$Tr3x&fTDt76&)+q7@Uf7=6WkbQrTH|+_^>ad(HcR73o)4T1{!tv zjAqOJa}Qf=s)1xr$O#eFmdtVUg*mF%CZluH362b8B(d{jYmVxx1~PC40(Y0HQToV% z>drRW7+?F^^-BrO|2DhmE*$%L8)$^8gYCNmigTw&+{(QH+PNk4NeY9df+-V1^W#KJ zAGgESvu;oc-)*#k@&9pCAc%nZaa`3COdS%n`^r?D-sMcHdz<3#+)x5zp+<l)P+-z+ z>Ym6<>LHfXo)uflw2eE5wKq}u@?pEe9IXVOXo<4THlD+SOoMcrUn-(dVcGTJP85B# zXxbZ?y@29Ixh}0+$cjM^J)nYUEuzPUN*cA2PRDAUUs=!*0FibpVicCFkd2(44vyN- z?OIQF8W`3B#Tw3$|B&8(v|`b$6}Bl@S?i`WLIP0~-D}X)<C0by>J}8cSgwb+b4iN0 z-u6n1VH$u#AC`kTCzuGknXKe(bzXh}m7CVShx<uGxic6%G=-4Q{Y?o<<hlWJnMs&a z-7Mg8qU6MO#UJ9U2tCOeOwzMBrpNAz*;eezaK(vJoEQyY=OB@4yF>iKS$dzXlVh<r z1C%TbndPh@zJ2oE7u4-QMv$JX^uW6tdHP*$;aYN8Hgayy=A`Cv0^{6PC`*PVpxGr{ zRL5?T%hKfj<jxdkxahpf@5yn$*p1$P)CIrCf7xJ?1o<(QISV#v`kM_LGm-gv8bwKg zhq)Hdh!z`5QbJfr-9fAw<<CC@&E8Ky{5)Z{qQ3MOOEQwC)%+05pLMFBK6+nc0tqZY zBxo8W;}U+1CJa~WlA0Q;FAz8gJK(iOW=<M4?>GU93XdlhZ1U)4pEC{y-uAOsv(VuM zX?bJxCmje3x}>uIk~HZ)B)LtJ`O6Gnco}*m@>*HWS1JRVS#l;iuxSKv{+T<?Q@P8E z(Ni;fFmX6Aa{?_fH$9O*rrZv}pjnr8Fc;D<Yy-42CEx$qG8!1C9NBvXt6uN~#ZdmX z^`sIV(|U&$QE623mXPpZ<U}ju_*qJl2%M&Zm%YVL?T|i%0(u`9DT4i2?tBmq1EUMm zKQ*&Lh7D6QAE6z&n^sIu(Pi4x(b6H~FE)I^A8N}re*L<bNnl-)6C;4zoY>S9^;uK9 z-8ru~B-sR_Q8>xI@W(NYi219tVxPIiza#EN_W;zZsx-dt^i!(ww+_|1-|@Wf%6k9a z&0hP%Oawu%Is>IJ#5Xp_(eTt7cEbhp7%s%7uV(tpzV*ru4(X&Isxv#^1rI2*n95<U zh=RUM9GvJtF?8CCzz&E81u$`0Fka|g#_YS<IEvETxs^^8JKE@f5@FKU`Rvd;&XG1- zaS(o31_|aMR{|}P$#g2(?#bxrp=d^(Lr;Yt;#9~}&DV&!a8L)+>(R0NUHk)}D&B~^ zO<535uFto47>ES0K{5C9(Xk~5hn&MRb}!!>G5~1&Fpd9E6-~Irr>CWD@z}jgY<!P+ z4ZbAk4DaPM1x|^rJSev&7vb94b0R4eNrbDi1TVFhur2I}fLM~PSmZD7Ic`1X17TK_ z`iE8v8y`{uc9v+acV+%qsi8dqQJL$3M?_4H_vADz^$9Rk-2LIZ!?9?#Kn2IKQWOLf zd#!nDj2Lk@_$bk&7G#UtSf4w0A<SpW%Ev`mBReq0XC;bV<))Mrp#8yRRF`jF=qPX8 z-dkrgJ<+vQ?%`!UZZ%?B49Ur(UHS+xxhf&zq(OlEgBr{h{BJjm!9XVPgT&#=T&~O` zfHRc6Hg~Nytm!^=zg+S}Rv3YZsb=7I#|DuaQ*261<Y&;@M*&yL)vBwfn!Zp+CDRt` z2~#2L^G0QU4y|Xytwg=S6x$%`u)8+`+~q1?IDBa%!;=anWGP+rpvc;9ik@o#CJe{_ zZqE;Q6{h}08OHb}wLFW1p+{;HBO2TjDSQ_sK)`lsK{|xS$zXG<*oC>%O->IRuAe`h zU{KZbh*p04GkNC!Sw-_<g)ZT3)hUnzx%bTS=D4YAEEz`-k4<qskC-GPb+2laPM!FM z=Eoz=RhZ4{A0y&SqEa>LZLyV(W%?kADG}vS#4^A68nC>YGsiqxYJG6`3E6RByAa|) z5S5@ZZF6nSEw{oyxoJQ5Au59|Z9;j`$LRL;q#B~Jd5cem#dL_+1Pp(Qv!#^GloFeu z|CYDZwdYHbuC|EP`#Hd}rT)g04g?RF<=FK>oJ!<Px`mDG^N7)=Nvk!kxL8nOTzIv~ zYz`rGdqEgXccoONA)$4TfF%f`wiH*AB)rk#Q7tN1QHGu)!;RlKoR(`uWo_B(l+(MA z)?TsA{Y?;doJUsY)fWG9iizt9hM)BCd~6eJPXRrH`Oz*Rnn8aj-EGTB#rE#;2}?~e zoMs@Z40m5rQoz<`--aS_>na;IxeLiRDygoX%A!Ay<erQ&E@$GKyoj`lQ?^(`=CgYN ze8dm9ZCm_#-oN`FT{chFx{Si-k!XR2uxzz6?GMtqzOS$~!rE*X^HV}CSCFSR&+Xj` zTtH7+rlp~V%c>}01ZjSe$bxPvIu)x=8c)~rQ2?KW<lfYgo}599`=z^OK9kL0?0pcG zG{=R1qVoq<eZBAyaMawIO`@{1&FlM>vWtbci~F@YgW-e~w7fl9s0N!l0p3%-_m%rU zx<v4rmaEJOnwY|LP=0R{uM}_A(5xJ(zBXXdnP=1ZSJz5f=De^^%<<IAxaOxD%)A4# zp{z;l2M>g}Z*$FZk2WY7wX@Bq|D5ggfc!;0m&Ay6x?tHC84Ks4d9M3^ikBupv~;@l zvaQa8w;BsGp^h#m`R>{KPU%khs6P8}mPS=~ER8&JYz5Z&4&l36-Dx*?{A5v=tbRZ2 zfIoML8gbb^?!YBH(2x*^d`G@b-{vc6cBs9<Q6>}$i;<GH*z*$<ssUq0K7fh&0q=$j zCm5VRkc}C1%+OY1CQg{noU%G6=iB0~Vpb5%T6lhEc5`MIbk<#CcKB>5Nb6@5!biGd zFQaJP&X}gyS61Aj5tcwD?GynZS4RKRCaFCYOh*74K;*yl;i^+>Tbz{;FwC)%{M(;e zyTAaGdQmDbQT_@nRuSo1ER=1reXC>npdugm8O8l`*)a;8fWZ80WNK%Wb^?DWaYwiK z6M%=L$ZdZ7xvBA3P`S#D80MWSsAZDGuX1xGX4n5cw(<h0;d{dMmrz%4Rm7f;*C2cT zE0j3(!50DSwPAJMEciaRR~6qQyMD~YjpTSX1bz~;sP(>nk96RUBW1ym5R$=AoLvR7 z(ir_#nRWHfcNgddqwxN?YTLf~Esc}i<$s-_bOP?jZXW;xgv6z<gr&t1+bYuf-i16R z1>d{r@z{Sf49tXstX<J#D4nSnxvis6PGb_E|H*qn8&K96>asZ?d;ezX<8Mgr=!_MN z@c0|owed%VqvYjut9vLcK9q|g>wjd@|D_OvXphZzm(i<<-4<i+Go-AN2lpN^`I_m; zHhMIT@quxQ|DlV$D!ZFj6o25bZeTG3{<2C#n)puC()Bxqhzqj?Yatt$F8@ii#|~Nx z3RYm$>JAvkV8t?X9JhEy0X0n$CPD%`bSt8wNs_NP?moz5B4Vg!*hs24OX7tTlE^L1 z3{GgV+zgL`W#DUnR1nEF1X5il=E$<2gD%!Ft$NT*+dgiV<`bxf3s^Z9|9tdmE=ad7 z2jZIv(w=1ZFM|1YWg;>>BdQ{gB4AQacf}IW)0kEFv{|<*{NJBLkJlDnbhrrJz$6$? zS~XJnOk)=ChjJmfFV|Vwn&3GDH9#cs|KG9f%<cv_u9Qh%>88gGN=mG<3qOFm&vO$3 z^KY{aNHPBcy>`|y41&e@-qRRVZ}>kO-hoxB?{D=iybfpkqo7806qgg~&%AqTPX}5^ z+5#Ybq{vzO2jRaN9yIXm^mhVecUtZ`1BT#?ZtW)_eaalR$efUcd7evcVScw1f(f5r zQz*^~kk~pZzJHO5?A-q;M;Mci{gi0La?EBUWaq=u=(?5>KFjntaS=0=6Ok3wz`K+* z1ZkX3mO__@Z0%cxy_7R14}$z2&^h(kU4ZW!DkBjDRh0-<qP;h>jenm_ElAf+E3?D$ z9Shtz;BkK!A|AMxvcW3JHCE*6f<@~vsc$zRUZK{QK#NGvk_bG|-8ii_>`4o0R-zv! zby-42VSy#w#r@UEK;63cSbqFlH&|%XY@Gu4Z*nctM#2;ljoa_^eC#N{|C+@=!O->I zgFQW!mjdp4lb<9Wy7PDoiI6(F%iBGxwv>_(CQ)0BE&DNS0kpPb_iAi=&&XGI4qZ)V zEkgQBw@1oz;XA!Ir<<LC7DdgSaL)f%9IVm)ki9U-8GXevGYC}X8gWmS;!k)|3ZY_^ zy|_QXG?+JOYG>-jpF7heIEcnjOHjEhrV5-#*wP%Bt}=0#1JHH)d=nsLwcduH2+K|^ zeG4mV0!^Q1lA+AI$&Lwl#Y>)p0=+zAJKV<}IREfj$s<@xc3BN$#-1UucEKdp)*sla zD-aI8$i6|V+RoJ;AR7d{h*+LnU9E%`LlF4IHb(v(G!kPgo6)}AHaB9))EU3nh%Rub zomyRU7nTiTHkvkB38a{mg&U%Vf)d1+%tNRyWW{o#GU*5%@KcAl&y>+|rvYn3IC&*a zo?d$}+I1vpYNXuJAA|2`Amxj4-5IJBWhOc`iJY1XvpR1g3x$W7ssRbs*nz;3z(O?4 zw5Ah8C<IZtc2e*GPeU5O_vFamZvyYM&F{O8AP{hkAnI@g`R|y%J(gMd?o^~wn(I@I zejD+wa?9qg;&@JDn7M?g(~Lk%t(jZ+bfZq1AFy$dg40ujqeTNl^RHVg!JS|A=C)IV zU-70!l#!A{13MpVpH1k{E-hJ70}R+IN>fJPS*Z&frxs098oWZ?V{cVvlzbh81x-?u z6*(rQnVwd2e@0>x5LL&#w8(Z8@G9)n#8`PW(-wV{LH}=g4=%0_t6n=An)44!S&ukH zQ@YHW!rBbM;||t2mHU7T7*@pB$5ow==c?UPW9D@7dmqcjeRc0?tM@5)RT@(kq*(~E zD-=z@Qq~;I+zL@z*>4c&+fAC}wL)}{irE)fn-&b2itVD*LuuSvz;uThu)~tQ^X_S) zt_v0+iF|5dK$ljIQ??M@5;0gpy>PZUfM>Z8+q0>EeVK_s>3ws=qHQd4keGytB@SzK zVgnrj58;i~`49kd`{K*@yE|EN=lC_@r!BG>85w$ux?lnR7iQuOe+6kJFU7>-|5L9| ze~4>b!Db0P%C&_mI2po{n}x`?>wzgOmpBRC>!`#}zlUGNQz}@y+LsBhIg#KJsM2+t z&&ETCU&%{}D_UOpPS=!8)Q2QwI2pZUKPTrDSh(QD@&fRfT5i{wJt2@aVspl%mXEa; zFJ)3gSYWGg_PP;eg_->qvj%w779VC(tC&B>TCtjsk$dPJkvpKq5)H<e$Y^X-PLx<^ zO^1&(7-xeIcr)U|KQD|Jqv<4B8Mz)r557B!lvT^J18Y%m`1hy-U-7<#Adr-hK+noU zt;h++#{;*#-v7#fGPr&A3&~^h0SyHu>)+m1p!*h*AP(n-F2k1pq`iWwXoeOAJZ7x( zELrM&%-26B2<a5$D%y27LO_s9WA`?S&U1{Q3*%TS**)A51gz`^vR@OyrBfeWt~8kZ zh^o$)Dma>7VrcW4QC+_W>=C^`9(jkWrc;Q(mjBR2jaYR)2**QUDDQw&EHq}UMaADX z*2NDa?H53O7J0#r1&wQouTvs(O-2drEPl}KM6y&)3+hxt%IW!%F8~>cBxdxbCrxO< zTdcGl&|l={fcXf$t)@=QN?m%G@S>OhN8{8y6lx4g5AAX0zcCrhhV?xk0k?+G!5r8q zpEXC-^Wl+YB`N>9d_U9CC3(dr%VR9@g=!g`Nnt2SOST@gpJByD4EB-_#2GE6JIkne znCKQ;_rt-7<yr2h<?)cZRM}M{yO+5U{4aNEH6Exz!?1?DmkRO@do}EC|K)x~f=Wme z2Q2eP8uLX_EGIQimd~%E-9r)R7%W#XmKBrzq{S}}1r@Hc9$DL+&$Hh2tHB>oN&M4} z>=BsEwzq>rXe=-mi;tP7^W&8LGh$(#Gb2XWQi?EGf6*q17a!7*dq#-4|6EV!zLCBk ztS!Ho$uH;YtWEBYWlAQ@30T3Ag6n2?BcDoC&XXB1zSjGwPRixMf?PwONkCyh&f6dl zXn>E8%lXG>4Ko5%^1C)-VYB@HV-a_A-D?j_E-5zt_gHl3_00ow|7AlfWE&YHl-$Y6 zWiSyak7U{P{b-TKmyjL&kmCJb_OCpIFQG@<W#;dt8PNWc)8LeX^5>EJ7xhdkHshnn zm~9=V_(^5#1sCq(YufqXkgLKWp6a!(;)PUK*R1wpT$P^tPbbhA;BlB`e7^(>Lum>0 z7eufp!{T-KU*N@Kcyrb@q3A~%$=v7WwlsS@fT^%HGB|M8$xy87Sn(#8|IRl7xH)); z+$y;xe`bP%Sn{~*7c$q$WxTbm6Z&dTt#J5iwl%`2mtc?`nTtuoNFMPYW3p2z2!6Fs z9<1@*u2=kX?0cml{>qH(yIPl!>eSX=Hx+&8cgE<Lp!xF8#r(!Gbgt)c{*Q;`T6({Q zHn;(&XM;JV@YBReW1!Nudfc%%9p^DVp<~h|OU%9tCbcNy`Sd(&PyX31XHUN%d*if- z^;2NjujrR!)TuwK0r;tzA|v78VkI^<?LN;db`#(JX%j+==A$4xxQD)B#`Rij4C<oO z^ezC)tD17VwWjg%eMy8cqMB2tq8zvckKRWTs!Zq!4~QJ?4$hfxQild}BS8Ivw*6~h zaayOA2)rm?7ZH90WXM$dG-d}lXYNqnOTU8D#`E&+tG+rgjNCXiI0f&RdXL|Gv*_$! z{T_(SrV_>8nz}It;+CSkH)aL}zv+aiMQai5$==KliK{a+wf>*2Mq#!HbZ<UfK!aCr z*}wO=vA<6+P+FzUtCZ~__PberK9AG6WJez)vU3$zjy>{e0@KZGI;uziR@Y?6qWGcl z7xRR#P!s|Bgzje+^2sMdQchG&tEofyhqx7L)d$u5EM}pDJeQ+lz`axJaF+L(EPK-$ z;-Un;2kW!@U_2Bu25AxBY$fj6>aHr=kacNT{n`;7P<`iicQW&rLOAipViy&*ym(!` zni*6dVi1<d?{N!VLf~9l*>wi+m8)aMASp1y0Er<_$0!mzo5#t2*cc{^8Pe5Cxkcji zJSWXr{Rl~u{z)NM^-;7=24TzEo-fzVc&Mg}JI8oU%c3Q)O%KV0!5bhiRc8Gewt1If zNG10w-Ex>g3VL~4Y9_)HYLf;e#(}rjNAL@$!D)8yHFBRb$YwIF3=$xAyyNQsxLOqv zq;RS!96<DV^tG?Alz#~-QpB!(CWe-x{}L~iOn?PpJ`-YvHqac!8Eyhne9?jn5zd$G z`po9Hku_eXMD;hU=!V8#?j=jH4hl{^XUldQG_&OaRc)4O@qo%>SGO@oH7l~U+f)Hh zLcb@njEdz3a{=_L^t_9_86@XvJrm=vNy>kUnVr1TMAfjJVv5YqcOzQaY7XCuEu)1# z@%JB}W&vzfAwV(}52_(!|5LRVb)0=X#_T&cT*ffRe-151EM;wOE}6R~g(?-<rjEQ> zCNp>a@$fb(wxVq}T*G@JXo_1$+RMj~ZpUFj--Q84HhGm#yJ|9JG!VQ32$6rQefh3( znPd&gCm(1?C6}xAQEY_e%Ar0D8xifpx@tZ$s=GJBxUfY{)m_pXb<;XL;;L}yWrdg3 z7!T}+S{L`D!9?zc;)5peQKwXL?w^q0flfMs%f>9>CmFkdQa?a?bI`I}`}KjM%hm#? zvFQf_)&}zMYMxS6_Ry@TpAYcrtbIrl(`f4qWov+~J3LV{_ko~bv$UG*TfHNpSELYr z(OsRC6mWc2-XO3Rw!CK=dK3unQ`!_bTMZQFd+Yfcb&%bx!+vi<yrBQr4<GV4<(~(q zADr6ulLxKHR)A+e|Dh)r{l2&lk_6tCa811&0W=Hhv1pT{=l9apWb9DCL00%*x-tLp za4%o`_`y;#zn%1em!8obC#(k$AbYuxQpq7S=M!{yvL-9E<jJ69Dhb=X`FAHTWZq_= zN+dOhoSM-Zp_w2NMxrA~Z4;hrhlK4UJV=4T**bg)YvS^GR+W4=F_bO2xV}_@<T*mj z4n;uob+MA{r5i=kHdJdDU1+8P20G3Lq|_JSvdIVZnB(N!(dB77&b1Z5@&X>HE)U7p zA8HJ!-Na~NUKlYWYB%_N#2i)7r{mI75Ij9oh<!t<9dQ-q>3}WDRviHy<1oA(nPkfK zpdX{)$-?+K*wQ^(kqRR$SXAF%3Bv&y?-@$&tA&Ax8(*0L4XIol-1zbeBQ)DFdywYE z@y(r^$(lxe)aIALtxdl$Zq2hJk5)6J)hSa{G+*uqUfrfVcw+6-RD6w`oJFohk@FUB z2vN#s_Prly)Vgl`j7B(qFidT2ktZfJ0`)4qx6Jvfbwi6^Mzo6@-I8S@BqVS89^WRa zzenrJbnM?L{3K<j!kMAIU92*^4cKw=@$d8a{}TY9WL^&&C2_MCn0rFcZWFq~=)LZQ zu@ZI1k_68Ez-q_xmiBZDQ08aOYu3{sPpqRQW=5P`Bm`t(l>=zpFI2g3Jg$gK2_Cb; zr0X8+p6QRmMv*+X@vIT`!otm57y#ONRC$4jmr#?zQup6@PJXV?Qu&e&=GC=|el10I z3h0tfI2&nZsUdb>K!>+Zu3;{d<R!CN>jKS!rR`a223nh66#beQIA_z9?4w5Q`!eMW zMRzo9>vsd@@^co9Wakf_oFTSVmw*GIh~PH$PDuJb2Fe4G`h6P2ng32%1xoy0lx~O} z$DuUzbC5}P1w#d2G~-$vc7qKd5t^0|@AhF#C5)%lRNT<mfFCiWj(4odw`3+}R>B^S zO`5gBhx3tRlTu_apT$7qc6Arzg~y2)-=Gtbkq;03=(nBem{s{O9lz0g;V4VxKUxmT z*57Zimx&xR#{{z{uI$rV!K@#OgH~dIZ$Fk>f6IFG=LtxYeyY2R)a*$R!nn7TFP$~u zcOJ3fXr)!GBy+}r8EOBA!eNMjt&n>6Bn2J2h#3wdOb}L=)fjU7GdX2OAsLU0&<O|f z2}(74G%~?g%%ccs4rBSV(Nq9=<$l+SIG*omf(hdI!FMrgK=iieNDP$8@r3<4X-8|j z#rdunp((vUHaEwcj^B%KqSi9kNFey;;=|F36`ez+VOO}!cQyMW>}yc|CDOfH1Nd?! zPD96EX4VMkSO(|hxt!v8TkwJyw$PpJyxk-eD5Zh6Lww*l1dg~NG(LRqjc};99wrqL zx_h+7lJP_}yzP>+InBrO_ix+ZMlC7@K7;r;*XCXp2423eJ!^yu<h4k|M;6n%iWg*8 zAe=O}c*`P^?>h^QQ;Vi$ZI{HdRu94Q+w0Lxlc+f{N)u}~#@7?_kha4HApV}Wrphb$ zrvY^E@;#CRZew^KO+vi(<&A`AwHqV}X*91h3SYWAPK(2{aTvVnH3Yq<%@s`Vzft1Y zfWt41$<_1ng$fG$3MRf!H?c2Ef2D)*-9WeY5N5+1#vc2VjJWxv+^UBC6T_fLb9?*> z==%?P2gOv}jFTq%NB>nU8WSnQGpFSXJzZcjl0bgZN=y*iu$ErQ|9D@Mrs#f2q6;qF zC4>Wh@G4i=Lsw`m^IwW8e2C3_yG=YDEZYsIvnK{qZuqCocT%iLbLGo_Y^dDV$|&P3 zm$!+kb48l}Gekl_|0yJU6(<2cHc3}VFrHwJV#{v#bdzIU2%*|Cddj7Irjl0gLD8HO z{_F#+ovj8QzVjGRZ_=Gi%N&Jg)ozkIs*2syvdXi101;#2^|h3o(CtX^yB^wns~3p{ z5Txxc`M#bS<$3v(_#yp8WQj1j&H|7VBo@_JZxuX4Y(tOsO%eMwj-t0yniP*6VQ|Ji z71auQ1g0q_&V>^#ax_$vEGD%YLM~u5{+k4V(;$;ko*$0H^z?w^6l5Vih(q?B>B0Q( z2FIDGDk0l?=jX$|m)sJ+oZuZXnohy&Ac0ciw6FZ-*^Nmm{1qn1d9MX?>0=>bgL624 z1B~0?qVhqX%C|s&yU9Rxx*5;Gg=QD{Zg;4dQZ-?m@7c~7RTfvJkZ%Q8Zeg9)0q-pc z<cEHL)e{3om)*Dn@)N5|J1iyJRgnu}gY`TF$ntTdch}+9pqne1*~9+4Uvl8FgHWpP zrIulf!?&ID4&|$M@}XZHh1T%x%JIc?Ppo0`@Ff&h{w1V1l)gO=`v-7J3Z`e$4+L9W zf6DRNS9#!)k5j^NiSHli0a@nH4YT+lAzKr`dZ4#ifzJaT@E|!D#k!2pTO3Cmo{jbp zeA=_W0ew}Bx768uRh)obL99$Wm+c>?w_0c^P<ziD-G#`6j+)B;L;EW&m#>-+y6l6I zD*t^(F7_oC)@1)UR{;V0d3&CdSPssX@dA#AadAgsKY`;KfJGKwvSj!QpRDO_@l;SB zQ}?w#`+AgO#s_QFwE>orrc<bjKQf16kM={nCRbW-3<c)1!S*>2KAA6sI(oh&<RcP} z+-i`p<@n)%IV(hO6AvnL?*C~0YM`)=BkgV<%*>%cQ$H^1frJnngh|D=fC^()5xp?F zjW@a%!=@MqN<`^?nVw}?HW6F5@-gslKZ)kAg5QR4Q5W00eD7A3O~gVi*9ffE<`6>7 z|KFCl_9odht0r*Rh)T>NnX|N?VjU_Z$A0Azi?FC=4Z?{gY}Cnrfu1FTI+_+WJ7)~1 zj-&cqL+>&!U!idUUi^+vfp4~PYm<LNgJ*li;MGLDxd-^j@AGIM5W1BOuY55rs14lM z)BnyJ5_W4ha<Of91$S`1VFk#G^N6pLbpf+P3nm!qKB%Hb4k8vM{Rs26D>%v!KnHd~ zb3ynVjP$c`td%Rfi`P^Rx8L%tyJ*xR1%osEtQkD~;mxQIY^ZjNpTJ?m6?{WawiG43 zO%F>OJN)7IetC-v+DF(~DZQINT~#qllLTjAWIjtPN?-01$OV7JGwM4IK)3lb6yBOn z8<$!Lx_2!p&N5$16BGe(+m$W;7EP%CzkI_TB$!MtIj^N*eM#U5a<0Jas*S~}q=50r zJn>#SH3NV=-=p=1YXK%<j|f5+hE%(!xr|SQVfFicX@CD@iNX=<WrX|NK3>^hOD+(p z3^u#DFoz^;hbUwzW}KBV^v<-az-4rftGM@^33t+^(+0OOM8vqx4ut8X`saIGwBxSA z>;YK^sT!lAGOmo;$sI!K4q}#A0_Yez44vbz^|>S^bkS?h7RToa0>a-$8$WMir5ZlK zlZ4p&ny6nV<jQDwx+RN(iG%PWKV-!0r3V;FjSj@ymaMai&|xiQj3F|OGH-Ix3=e&o zs>5kNTNG89JiA9eepY2$2j{Sg>mxo+A?xi1D1Lrpn1oddwh4D)q2NfE!1J6SEr<vZ z3r=<W-}>crCuCQSM*n!NW=Kttg;23w+J;J9NEe&GFQ(6uZS=}D`lSeX+Ha2#(ZG>( z>0v(pMS@<S0-$UDBkOoi?wtxas>RUBr+{`=yP}?_JTY)Ksf;<)DKf7X$GmTS+ht*B zQd0$x(N-83fH46P>mBT3jG3rD>lWPJ=4Cg6jrz&Juk!gCxwGVNH%k!eOwZwh5hbHS z=~s^bPcrnB6cLD)&=`RU8!#S^!52c=3?XaF%lM&tIg{7bWz%SlTJgd8%!M`D=<BQ` zIMA)Z9VX^VKZT}dIRo-7_8UZYD#DMs<fvg-(T<+`h$|^Z`wC=W8;B{K_^Od+Me6ML zi%LmwwDLX#$5APp#}6l38Zf3Mi8E|-_c+z|x#FC#ApY09z+SJEjfhTT>aBua)td;r z_e8Fqs%>E|+aMTXDS6j_v=rqKFaj;%-=kr|OsN+XgrEi`p;3ra^;A*vd-8K`6sk10 z!m*Qlgw0sELt79KD*)~=LS~yFra*LdSFpo|MwTvz{WT?g=hZE!4sMmN2<*HIBJv*Q zwJ;uNYg%C%eZo8b4)J>JNQhFFlVm`wn~VRmf6Hbbfzn+@BW>)XusWt?9zjmP74A0g zvJ<3z;FK$0){cjkdS6_{0r*9+*F8QL9wdg}(w`oi_=cB0+o&u?)$DoDj!vqMRi!D= zBgkAza=fd(F#2!#Jw%tgn>G}@DA32TGiuaTR7-%A7G!iNcoXhx%Gah2;iJIey%J2d zb;aeOyje$zy^ip0RG<6JT?>BFH~yRS7XSLq98mxCEZcN{)1M!pk48NCD>vPqz%NI> zCu0fx3<>h~fBQMxfgAV!?CW{I2Q!xHd1Rmm6u3j93^W83lOJ-mE{{lZudUPLM($N! zl58Nh0-B54Ny5<BrdH0#T2%W+$2qn^QK1ZrrqM2;JBIzyW(&gvMtUwlH9fKq@sGHt zu-mp#x%yh)dSKgU^wHP&;xEm<z{KS@&7BdzGtAoSZo-|*q289QB&hmqmGE}et4257 zIyloq&FFpiE-0Y5c!EOuJ+UhHkOlU|hsB5DHLt{lT?+HjJb<}5VJ?_I`H#h$Wrizf z<C?~HNygL`vtX$F6$nc$yA3_p;_tM;v*lAdG0s}k2ZYLO+oST|C154|kV;l<?b29o z${)0IzfsN@B@Ao?4`A;5SqRJagcwXH8;G_D#^?B*t?$vImTRCer=U-mzfWn*`zOY? z$7u~8xARFyQoM{8>qZc_^Jgr|GtMgpUjNj<|5+7z74rVvnSlkx!F<sBQO10moPdbn zk}^@%vgUb5rRAQ7P|X(^I@h-}j`LW)qXA{VcPP|oB;E2s5v1;a?zXg@$sC_XgVfG) z*d)Fr?Qs%*iS|gc4m#+ayC8-7u7*lq{FS=;Bhe)-@W%(&;{m@B=U98Fl-<9?h@BHp z8elINmoHBt4%N|h+Ud0;>M+|V5byZ)bMTJ0tr&E)Yr$k|j<Wx0{!q|1Yd!Ha3J_a{ zDlxOnh@5F+X3eo8X>>j!3`EZv{D0KB=u|>1FU^w8?QS+!e<pi(7JzOeAM_p`cMBFM zuZOPvSX&zPR+H5@$R<DXhgxoN1(=I+d~JKO8eU>e7pPa|0wC?ct=91Ox3*AJ-k<$* zWxJCU;`FC9>VN(sFmr_fAw-Q*rQ#9MSJkElTbc)dI;`d{QRf2>K%UB}z#!kr{bK+o zHTI#NrTa6^7#GN|RXKc4;<~UbH_Q`!VSdtW0Y<955E>8mNx1A1)^>d_8$p88H}7Eb zGAX}+gYC%I5DwT6F*u3Xb+D_W6`9tJnubXD4M-Y$Rv!;w@f4UwvXJrwZ^osV%_ttd zOeYJR%YoPpi5pSoHfMA&5}}jcy{ys<?@yxyu(@ku{FtG_c7q5NG^c)KPaMOUG%OP0 zc7R`8eUYG0?6n$N><Yo1yF7bDR{cIhjT|Q6&X#Td)y2&k5Ie+BW_awp%;?E+9!E&q zx~5Pc@_DS8hzLWbqHGa}_nv{u$I>d2Kp~Uix7d-e$kC0pEF2gIR<qW06910E?~-n4 zJ)0a+E=Dyp@Ghy3PCZkz1|(qSg8v+$Z6(<mU<C$(iJAZHCDz(Lb|*E;%jTTA6QIPb z^`|4VC_3K~?K3sJ4%hmPG=489YH|?iJ(_353W(C}OP3Gq5zNNC<#apsV8wc0Odb`z zRw(&u!%k`oIu0Y-8vg!21eUR$fr{uK5&2@z1iK}zx}fubU!4_qV2xwW!_qtN|4cO| zdk~>`yT)3vJ&oU2jxmvNz|UQi<IY(2dr%5q`<!?|YD~cU+0&&x-kzx`N+Gn4&{+sh z!b50qKqG{t4~V+_%#aH)<!EA5)Dd0K&I@g8k9A)*10e9I8@`n*qUQ}-x~#I$J*d9L z6p8Y2a*2a;r(0dk^!47*`f?HEs5(`^k@s61sEq;wTPX~mF~rO;=aheJfti3#3MBNd zT|*Lf_Ba*W@6fEVdFBh;M~A7!?A~g0v=`wOCz!|zKB|21kuf6mvi>|YHYBisx9Nul zwe01J*$95UrL0o`uIfe=o$upXlMpds8!^|w**}xH@z+2W?j;19<ryLBmW(6lnoK7+ zHtmG3jrYfu{cU`>8j9E|;HJd}dzwySnvXI9FhKK*QN~+tiX-#MzBlEZN<E)R4noc8 z>iWT$)*>&ds)j0FrDR&h$8%D%Rui8|$QBUaFhe30LdP~ql9-`)-|@~B!9-9RW$s>S zT*pZuymR`3idmvhm)Pe6of45AP8%h7*}3iCJ7>L50u{ssrC+t<5+LvOqE9DVTWpaw zn}@fH-#Gq+cjg1tUQ4sWAN7i+0!57dr(jLm_K3MNyXTN}4Uwv7l1i>V8n_;wj!85| zrFykJ|D)L!*O`=~o^UhZp!FRM4P)p(!!;dz1CN#wr|yQoNGQrvST@vhI=X0sCS-2$ z>|%BNtG(|QoG>0JxE%G=%m_cHW-I8(gAZmA8Z(~q30J$gNI<wOn$jy4y17CT?qr{l zA}O|)W@c&#_5eB$p#_VZperr;MZEV)Fs2oiI?8DR?(7#<CN%B~8Eq$nmew6~l!)h} zIbSY0%!~}IXjcY%hR<A-B`3laYG}Qxwd&RPDjLF%B}YH;h(Hmckoi!y=T*c>wgU%< zAQI{{YZiViRFEAR__?en26MO}6X<pf_?(YzBTv~`5oTHjAGm5M8neh!g{0}wS0X6V zA(38(?W8`NqIAx&ro=|OiPuht7;K;|XR<BGivz$onr;7pBabUD5tSX#!{RQ#pVU&I zb^h>Z{rLhq7TvwZAYBd&)&2R3p`{@2NcP=t@O9?W>~w$47rinw0=Iu@AytEIN;@`b z#co}AmIkL0OgEyPs&|b(&1>I#NOAQW4V+6g27v!P{x!R~aLD5YxmUofGqWh;5oAs# z%K^a)o*hxWtWvLvn1Nj^#ahyxfFl*c;QX?MvV$5arPRe87Hf9|TSg$~F~s3*fs?hX z#-h58ES2b0bl2n-*y6@C1TW5oK-94XIM)K~&ZNGb#Ac8Tn)lQ?bhfAQ<&Ns=CnA#3 z5N;`&TXA8gzQ(IX0X%;c<N4=Pgt?Q&mmi^`X?T+`$Dg%{x|;5*b6~sP8kgqMmn2CL z9R*YAs6}!T?lE&?w-lyBYIJ`4dbtr20=Y~H{_wqT4Fw?af%wIJF@_})mJ#>gNzB64 zk>V<{vBFU8_fvBP$BL@8RpqP{jRb$R;WRxoy(5@X)=cC>AN;Gfg?-)~V*!<xl8Y>t zu?4MEC#EUt?_vjuRY7+ft@2KUZ;&#Tj{c@R%>r={J&&9*d9#p=brhK&*QMyU#|Ho@ zfx`OHz}rAfhnhMzcv2}#^+?|z>vf(am$~z>VC_>7?zH;5;J1MojVeebSD+Hp)b;mN z8yXLm_#>S7S*akLMqiMg70@nbz`J07l*x3ojDf=cNA01aa@e<hs;^^>18W@2Ve08~ z<TT)5^v8XYKaw0GvsIerrMlmc0ydn@FT^k>7$^oOqET(Si4_CJK=WQNQU|bP#7ZJH zN{GAMcceQ%5foinGvtHCxBY=_7+!Ma;*4>nKBM0Wh27G5r*hcw*k~^Z^f*9;z(6?q zDPrx6S1s-@5-*!CEVFcDKbv=`zm+rhtpYPFF3mP`LIM;mj!fP}xso;d_)N)l0Z1P> zSxcb(!T5tKbj&w?w$Oa26cX}ql5D3#*u4&-^f%p!@eZ5m@-7%DI0_N&9Lytx0hLHf zQ%|Y86@}|k-H*F`(As0Mm?uvv$xLHLju-raJz`P`2wY|aEg^0RY{kHxAPBA-);auU zYsb&sAP%9LHSfyO<b2g|Z_IRTnfYdA{&=)^@US$MnW<X3uu?{%rGraKi7r=e6R$T9 zW<C7>%X$ke2qXKRf71W!LZ_kg8C{~#ksUgSYy-;~6rUCrUm)Xdzd}EW+jJUwH1Y@$ z!q?72&-s!1c4(&VQgd!={<UCWuc!rd-O954Cc9CkmhMCq+4N8=AtU4-zK^>=Ek)%U z>m6CY?WICLAdz8o!bY)HCFHxkZgUaGc5YR0H@r?hb)_LNe96v%ln~eHIah*Da(#u6 z`SDwCe@^JFv;pGxFw775Tk&eMI%8A_ve5>Nk~_|jmd?;ps#=b}72mA5@tlEPo%PgB zF^4+bwfiN$aLtw$HKjlE$5RtXmpxGgjj#m$d|kD?|5T#||62YU3To|-9a}XXl?v}% zhyi@M<;MoBY$$F&i=)uHTqkD2(Kn|%Se|_{Gvfq7hZVRO!`NU>thV_(6EVe|Pi$08 z9H&ie-PWi%W@`i-6mOSm>@RC7&7Ddsf95iQXdur1Cmhs!o$hW=3u{(jZ4#R$r+y7J zXJcZU-Uth;UFlY~jbT9g$Zf3@@d}uHd@YKI96ba>%I{Z|sYs{xq*LEj-<IlzIo#?i zws-uB-pXtWdnqD)(}?sjXBnm;AS>5q=y|-F`rl2^<scP7c==AMbLWHgK=#V#eZL<$ z?(ZcHM7T18Y}WFgv@km4D3&ND&m%PXM*LSPFZqKCPeY!p2-q1(b#F<WqaXbgp;vNx zX6g9rDK6z4z%MDAIgQ!RtGfVH9H`%FQ48_`u5+q=%(6lYL%teI_DR+RD>A{Sg!lxX zkmr-y&lwSvW^Xo6#;b*j@>^~<VobYD^_pvCHvfpv@sSgnXG5Z2N}ej&uY>_3p<o)! z<+8{UeGzc=G9_VU!$8Briw}R`&e#YvhtnkYYTuhDqNifLm=@-_-TNjO7z*4S%9M~G zCbssu>}ew+fIAD+?v+Xb7!>|snVNT;UQz2I6V^cb?AG*Ma_ire4drK-Jm@I*?ae9s zR531^5Tv}d-(Y7w&tUf;W*+onaI`dUlqmsO%1{#mU-0sX*y{G$%Rs!Mlvi`iW@h_s zU(GL+N*We2w9oZDCO$EA7}z)zFpBwSxZPdrgg8Jql8R5CB4@spWMVH20NN=%A6UzV zScNDV-xC*UM3_tWL1dcr<hpxG^#!rcWS)3~-Pb*<*r|^l<>2{2BH5^#yV}t~H;T(F zrvC(~xQ3cb>2NT%WsibKkZ%~?JUo$lDP<cUal!R4<vX0Bl&raP>!wp_Kd%e=LF`ir z?2WC@yCwnhxVES#6Z80i9BU255J1XAZc*-i;5VFVbIr^gQl}^<v&(oY^lHm}woTae zRrmUKqI(TL@ySP-42sh^Xv9eDMssB^hQC<avd7<jwbp&W)d}o@GM!Wt4k`R5Rq=+} z&W($9y+E>R49_Le4_zs`Pg29_?#_e}CP1Fblj%#u%cS)PDyOTU)ce<)&ZJ{*Hg8vi z<1n7%u0k6yL^See%3P=lO74|%Phf&X1hm;DXih_;f5#&pd=ANX(*Ks~P4BTh4svPU zr`I7m>s{`!q#=d^hM_Z%0e>aZK)R1JSJSUBhJI0rj&_7-qo8_TLWmdx!j~dCoE!(& zY?m`t5c0p{f>h7vEi0ZPH<)5-A(HLyv^xxGfjPYcVYWwnBG0SLyQ=#zvD<7n6x`~& zjHVoPNE43AI*Tj2xuS>pOZ05)J1ke&JO%#i%%V>t$7IoE975@dyXJPs^bTBstF2iS z_*MS_(H6XafX))xIEj+ghtWC6%~?Vl?z{ZkC@$>*{V@hqf~y_uiTy3<=+YgsW6>+l z2Pb=^F0%)w(m{cz%aNTzY5hfYw!(%XsXu14$+v9mF06S5NBe$Mf2kY7O0<_v9>+BD z$zLO2?3d83G1v8F1Js20@~h0-$Pl5^N6}Lt4|!Fv#8kjp_rW9ZD1G;)Mb2<Os8J;Z zr{-EJn#1pX>Xu)qVmQG>^ZM;&)W~3f82<^@@~ME(QvW=`t#IbFsH;XDb2yl`mq%`@ z14vdVr`dSo5OS{a5n~5!3G4Uuo~dVO&NRvCUWDAY>ZQ*J)sx~94w(gUnGzVU!g7g@ z|8Jj=r5**(`A`I7dC**aaw5hsTs)S5To)X6IW(+N<JrP(o`K_mj2dluA)rU@UiG%H zL#x!MBI*<phmQU+VrIWj5mL;Kkgg+1)HJ*F6QQswITI}qXgdq=enF6bz(Ix+v#A(g zg)>A>`UG3R=q@@+-P;H|dSfZid9W6S5#!C2Liu_)6=~8;#y`~y`XXRXLBVX|mj2#L z?<HL;abEd{cO&Da1!&}pRaTNlsQL*nujZURjhwKT-jj6R{7KW^dSnhc2h{0tM^FNm zMq}r1`2Wa&qnLFZQ3Y3@UQaqzW)s)1HE6-#3C}^s!zR=mh0DD<-#M(W%)L&_hcQeK ztTx`*eT=2CG7TZ46J{;8@h@am4bEuC@D#k3{2(sd9;eY8&QqEW&3qjDaa7h=A+*Bi ztX3rvSNW5%F;5Fwj&=@r^doi8wp}IS!chG>Zx(nW+rtUZR)=7S;-cwi8|H?v#Xw%7 z*luo`5u7|u5us8v*QVJR_6>V1z1G}`&3Y0l<JSJPB-Z%~a^6M&oPxn>w`%7o3nlFB z1wLLqw4rfhIQTC9D0Z=AlM6I+^8Q%5_|zvej<G_#8lmB`8s7c)r{Z+Ic$i|_F`4B0 zjHW4`6shMP++Z{kX>MLi2y2d~b_n@NS6zlpv23MR&@6ozSg?_LOKJr1O%kC{9k2t7 ziSTX~L#rF}hV6s;-YSvv`b{(XTtII)NMa69F;3jkFb(2yn&V3EFpH^7+W)e<XmXf@ z45XN*nvh$Q#Abs>#omM$ZM^T$_@p|J>oZ{gf6#$LHJboADOd)U;LG1X6WvrM59WgV zxkT*h;*oX9hcx(q-J605A0E))ng>zun4vR`mSOsuG8tNOhng0>)TL>SEmp0@kr%ia zRI~X^k{`p&tN*;R7tU`4sVc~%Jb7}7zONS5ihZF+_QiP#0%=cU5cH*IC~&ylmV6+^ zInAHs*kV1EXa#m?<{9o2V(Vi5h14#4$gc?J&C<)_bC|bnqyXxA0um($Kd>$Xo*}Hc zCTe~T6aR#5C$r*7?*1@5Wj8`|#sW%{2|-e^U}u>XR@IB)W3X=jOJI}gzVXfF;~~m^ zPc12=ONSDP=T!-xFo2x##D4hD$7k^8PRBi7N>&P^SAvWMe-E-|BbLc3Th$rJ0t3V) zDwwQ8#D(hYX+;Wtdy5SqedUyWguGy2ukg#!r;F^ZzUF#d`pRO*PRAd;6$8l`70714 zrrCK9^gru9=8?k9_d~KwG|FBCQP}9BM#W;O_%c>eq7BUm9^Y<l`-CbO>jc3^)a4gf z80O`DJ5Z12?3aI+`!&zmiTN(b`h<vhZ4j_w3LnUzy^esqFzu`nOhN<NN^%=dMbkIw zP%+`Es=tN=YSN2yK=S#1^S{BpZCdcy#H0dLh)dr%LPduO;iY+AfqgX6;rGIpt!gBU z@kTbqXyXDuC8KVk9N%KkL?=kQnj+iy-W!59emsa2n)Jo%UMZ6}V6jXeeWs<JSpVlG zYO1a9;nyFods_#{CNM`zvyPp``x|QdyGhSmTEhOl^_NQ;MO!#1YjgSB0``)_+acWl zjZs_f&5u2nWJ(K$83X%cF6Nm~q7^u`2zXA)n%}!=*&CWa;$sQ!eY))3!8B65<&bgs z&_aC;Lxo^M|5K~MBEW}3k99J{4+tFIYEJiWgzES9p)Th?Xg>&ftprxy`qNu#eEM>^ zOVzUfJDobbp;COp$xD5vWci1^X>0J5$`Qi&S5~<%7w?_NQR}ex>velpD?H)#h3h@t zR7W_E!hs51SQ0)+y%9uWqh|^Bjtqfd9iT7`M;L*#JQz^aypo~4np_-+WN77+A@`G1 z-AjlC_A!TPO9D`>zB1-Omoq13i`We!dgbc&l`h4q=;p_R2^q)~avsbDbyOGk>gSs5 z-@(;Ix5)6$<u*J3oGA0#_Px&&1CHKryHNe~zQI3@Qm&A)Sa&|i)|Tn}(eta$C7A`{ z=`_P9j)`XGJ*idxcQS2B0nPT>CzH_k@!_4R42-!duarD5_1LeS<NT?h!>tCE(=VCW zfSPZDHnBv^kSKh(TsWH`78x~V`pfYb&Z-!i!fB82TLKW`px?b^ps#@u>{^7%4Dx() zr0fm*4ZUkzV|v%fI7!veV}Yaa3U<z({+q)PUe*W^^ml`;B(bA>6WFs&13iy7yfPYX z1rtd*@E`syxXZPPF5AS%DjUrG2d8&>G2feI%YEH+@MCW6^xfq`;j(T`O{^InJ{wXM zPx@HpN_QNaNXNUh8J3bS#0ZaDPa60`8%BERyVd-Xp#O0Kn~G?^wnxb4j^`%==$bz{ zDZ>oq>l2NxJD(<W9UJtDAO^FV{{`gz9c|?x{_?%oR|h#-kL5Ik`06@)z^eGXm(U{n zsE;{zF~P?ZMGw|c_jE+6XmEezDtXYQZ#xjj*&Bt@>d-<lFEH_;Dnn3Y#@J@Uj8Cop z=vo`~zpT4nim8V=d$;K?Sq8PPw5qLKA;~~HHXO<HD_DeZdleV%DjVEx^Sep7?_3Dp z%7>Pzwp0i%(UE1$B~voZ_QEwTlwl&e4cN6Ij|KvBSPm95%}U9?$RA8^j_Xiq#c*`r z+uCaCaijPb_5Mgi;b-L%N0j7Va*tDl$5T!K>jk%)<|9IpW^irmOa<?%+(cttVagFk zy}s}&wgYDpT3W&p6Fa>Klcby&US^*iyHw9(fII}K4?fJdypE_3QW4RGU{PYB4>c4f zv*9q?8I<d32|N3?D?6SrPbD!NUHj~w=1+SF;x*%cbshfuBPJ~W`-oOLLqBIM2~|mt z_gRtwIXO;(b5`6naGdHgNr@S!@7UgQ4U|dhI7u$nP<;M=$Feuc>w6E;sLHLSge69c zm7Md$+U%Mymq^8(-5WsHqW=|7vs$%Bs>8L!Wi{PuU0w8FYqL15B*M45mx(>T$;%5g zRleWth$allXOyX1svi0NdP`8`{>UcY@8~IssI!=BEJOOP9yB;f0=zS-@}|h-K6I-s z&AYw8rC)VH0e9$C@Cl@)y@c=?PaqbJ)Za_+2%v(rr2*l5C161SqF|!%g#am8Ywnay zkH^uMVruvi7=f+FfK>G#kMJyX6iBQHpD4Z($3;}f%6CZd&d0N{d3#({qbg)EUsS8S z8zw)!GEh?7pm)OVf@4X)l{Pk}L~}*7!t|F$v83JWDNRVM(wWY_F>JEC*s{BPc)pF% zh>XF>5dHh>6s%p1?94NA0ldc6hU`(|`^LLhTl*^_yit~6DH~ue*WF}>9|Ka-V9c5u zDrY)E&C~tRZ?QBKD$bWKKW)F*$PksVi2}vVp5f2w3oX-RHp!-XaLXCc^0qaBOKy{U z=>)!$xPNKbbqr;<;{x^pPcO5V5izVQv|nGTJ*|oCQ+IOve678t(kgBc(&C7N+wYRX zC``gng!xlrAe2;ZGc<s(hzLEgWT<^Q=UpsgzG$Toa|=Qr1O~G0I}DbaR7^GzgJ>vH zG@l?kRhv{Zf;(rdU9GT|m#I!3&NXkWoUpa^OZ<}++8ko~49S}f6fM;Wc1L<6{NIlD zp+v#VjGSxc{xK;P+iLVb%&{>}I<E!mazkMbpp7r~S}c7@UARy&=uQj`rR4Vk>veA= znm3qBd)jaFd}O1en&&DNv-<pTYmhq{f(XT^EGY1P3Ge$jfBcy)t*Wu9B}dpD#RzDO z*;w>B!WbO-b!Z<Wy@ue{4sp*Rm~s5<I51yqCFW6m37+<lSnK~yJ8q6pGDwvDZ=r{x z1JB+AkAbkd0Zy=s#jaGeW`}UPctfEtl7f3`^QgkCbIoK0L4W_P=!~+oO2%ty>1CZZ zf4;0X-QTU~v6|`36=mz%I1gc*Kz5PAGEK++x@#l8;k0_Hf$xFVd9>cs@rVf2u*+yF zcP%Y`&cRN~d-#T@@94+ybvy=V7tS84qpK?#z%XEDN3NeZ66#rVZa-Kj#dri)2bf+w z>znAYX&+ifCI7Nkbc&>vL1Odx0|!{f4t4VoJrLumd4#P7xLN~k5aIYv0#*Tv7P4-; z&x!I<9m)r;Ss+g?yt<LX4GdW<(#x%_#F`^uSUB_19h+J484_N3`*|v!Hmqq46Je!> z+}887$UskA0xoAre#?u+eyhm+pM?VG|1uf_T$}XH`q<SSWK#q7EHW?yjCFB4FSu;F z9zt6b`y0T(9r1o+eYFgw2;pg+bu8Zb;P}3FBoBg$PQ)!cZ#tvMmRu4#P{@B0!!%gu zOn&hL1AJvre#aBu11A}?R@m=!Xfb9&W=5idy1x}op}(s#KDS_Ddf<Miotu&<-S>sC zUqsC@2_}m<T4;S#tAC^xP2oHfP+H{w@QIM}Y;I<E=r^HnV&i>PE#^uW-_C+j@?t!# zLqX3OMD^HfHiiH61c{L3no%JBRDXK>ppcD{;dG&6EyJ#E_DuASBcU$=`+p4xhj57o z>phs4??$+)gum&{;}KyfRMZzPxJFrgJfdt}KmGIN6P(Z^xeXAY13Q4$|7)I4j#P6x zus`z=>2%+;LLs(g)S_zs+Wj4&^(pFH&R64nIyUfrwh^6TxH9usyL*Tk9r(p&unTPI zH;U+JX~Zf#_f1yY`gO;X_y$DgSn^QJMrZx*|0Qc4N3UY4fQXYGxy%-hTxoqV5*$-E zk&dSNS!|4x3~|7<=)Evkeq;atM3a<Kj*3jg>C>G+Mk-o<%C9-~OK##l^-jDAyt2z> zhzU3dpG|;Q;_FZWMUYWQ%R0oUmI({*Box;E9f2cEb<aEt_t@d_DznVcRr$wB1+>H- z7+PoGtYes#&HZqa#qAOOF16@&p!<`lZ+y+-*?tN@?H3O9mxuY>{Q;i=c8VREB{Xi& z|0{?)_z>aF_r-x~5QoRG_oN1tmuuC*u-*kHn~>T>GUw6oksfFGb^(=*zxIVLYSh{S z)A<*;cfebq#wuUsmWh;C9YZ=h=g-H%^;$u5)8NbuB&8neNkjdUQeBk)7=n#2Y+$Ke zD<;ROJYsw|>s*eRp-vh2gqzizFXpnGf8!NQdungeU&vQY6Q}EuwDt<P+mY!6nR2O3 z`iI45$Q0EVq}*>nW&-|l`l;-C<#QqTU^%ZyW)G(kc7sp&0r4AkUUh;Y9trZpBQRL| z6As9Knp#4wDB1`*&_h+i9-R?yZGA1Kp5dB0xDd7rtwIW6wOjl4e*N2tfA-*b-|dl7 z{}rr_+wJ;D^NQv6KbJuR6UC3&3V5=DpqWNeo?gy5m81hq7YXem8SdbK2%YK*i+d!d zfP@)g#6UcCT@$oya_<H%U&E=}R2+Q`(93Ni7nzq1Q?|x?+h)<G_l&9ctIRad#trv9 z3|RiFblKaT$qbn>;V&isCzzXf7Ev#i6{#Nofm|3}mMeZ%GMj;djUG7VxG<gjSksSU zbl8ZATqNk542Oa4=<D>%Jix8Y8GVyv1xP!m>hoR|Tcx^h89iTQ*JZXJ>?nt>(r{I^ zIfS(<yU>)5{n?2`Q)ky+dHh=3WgL$*B>jFCH4cjCOMerMTAg)KyI)818li($oQ415 ziXYJd(V@jp1|7j$0Moi+tjmMm%R>6<^+bl+ASpaQO8Wb<R^|w?-i#?yY)2pn$_|8C zZ=9dh{hzFD_akghTw3iX>O097Eo9W7+M$HXLW%jepOn8#^I3ZS6BK>0juY^w^Z?>^ zr?qC`@l3K45$n|~LKg{%&rNOx@kZv3an4>TITkZ;K>MT#ZC-y+;!d|);cspK!2(Vz zzc)Sb*eI(k&4;2cCTldJ;x*K9#hWEPS3WSGu0A5|q)p31E(~_mXNq|8=S7qh3XSI| zi{^k(TChKvALw~zLuBE!x9ULeJ*R9Gu}XoJ)MZ&~69g^I7?0>9%V{>JD_q19#hPiZ zM;roOUtLlg#5N9Z2*~im+U|^&B9h%O5+UHTANSrjl=&gk%RE8XzGz%{fHnAl1nz;A zX#og5g4GbA=g$;LuQ&KeAHoV!5%e9CuQN`(3~rVMs8V>7L_VpPc4^rPD+7>YTDDLO z+cw<p+wH;zlcDZH6;p$=w=>>aeBdSsM%qczbwMHuut1heOFIfkZl1U)LOFFdFglg* z_FZoTdg<nOR|XY}aMJJZ+(~J69jJlp>13L<qdw>gb&%dCm;Z?nIV}f(I^L&bF!g4W zbPs?PB-xb(Y3Enq)%|BH_1<KFf35v;+v^@*ai|&i)SanE8G^u`8a*bviR8g!YEqwY zN~2rOwYAF_Z8BM{y482)y(D_g-EknjhVh`^eC)@?9aq719V=C5gHpG_#MEzz#Sm*Z zc!6!nH@^VWrJ$&THto>%Qd6F__`k?0;oPU@mlgl(F*OKjEVg*TJR|+(*e^(5tu#G) zpHy=-k3%e%oE93K`v;oE?j+aa;33MzYb>y^?xThL%1w7DglLF{rYAiF`tuy7zgDLK zWO-Y4JR*|>JJnqJ(q~w8bV74`9)(v|(4#0rot(*i2?Urb?a9mWbmm<>eg|O*ovf(v zjq9ur1S?zmPpQql<`x;|nlZSN%yC~@uGUA~>0T##4G5q8=u($RutH8!Y}IFjG~uBi zbfvYEAxD8`IQ)&}GVEYDdr2ubF|`Y@UEr##PTr?W>`Kf31Un~__HSDHs;>w%1)xGp zmfR_GR2nv^86+8&2WF2eX?-6<6G(7Wg)ju!{)iq6C)p;6-qO<QvL`{%WN<x<6q*gK zyPvE2%!yzlNIH=5YPbMRK(fEHY-+om>f?AO1#h?}Ri`~!$++7%@)8#enl06rBiMT^ zoqhx8stNByVT5A)j}K~RUks*uViX><xnk65&*Gn$hpdnqet(3^JMAgm+dme)q$CFl zLEA|%_*-A6EUz%f76`CTg^v0HDm2{HDV-#l((ezB<);o1r|$WF6lNuqoRBn-oZ@F{ zarBp>^OLB7zcaRR{JqFG_+QbwfWN2QTU8{PFif|`9P<f(3fHnh(jxRzubj;X@IM4W zjW*CGv_I+R_cBsz2eU@k>|YA)BtaU3^ST)hodRsJ2Hm{hn5kGyy!;^5g(JvS<KDh; z1T<uZU*xr${|}f@P_5~kw<*E-hQ`)Pc5mO2(S^KQ<Uvl!>k>oQAf{dxylj4+ntIeD zP4bFhRaO+Gr#zLRV2LhUpPrN!H1=34d`|f*58*sQ;U3GSIoeu|E76EG(8c>7I@CP* zltuP?2sqzPAysIk(0;b3j3Edw$BV3#dlCnwWWG>HG{G?#8YJ$+IqO|MLoEhc{xA=s zQ<lQ#5FbOnc9b6`wd}%hlOSIO^%A@>RsO(xlP07Qfb5I^e*>S-TZs>8v7!G982x^K zT}`anW?jtVgHy?OhhMYXw*vE+daZC@P{AiK@d$qsbaLNFMg&*MmK$IY!o5P{*QRJk zY<lVpZQ!W4=-UVzQ`&Ds<NJk~dZ%PXSk<!bUV+Bon1_xznFY6ko8QJ{b8x=OY{?%J zv&gywB^;Pkp0EDRfA6-a_G{5o>Jiw1tJWeg^lOkyAU_n(V*xW(V$g+?p3S$+;%fp1 zXCi^5KN`RunJ@k>@-QBIK+LGKdfdC_!pDHhCQR^(!sL_uKEdJw-gWahD<}39I?@sU zEgTNA6Qn$vlr0=sUTaDD!tK6#rB&wg$dkK{KxpGiUgRp6s}Fu%E-Es=Ezjp<_QN2J z4S)+BYxQ>bL=PMFi+&dKOsxlVWfP43)DdlcL;`f1<nECRU=(~Rv*La8S@?U4d&hk^ z14I)y!wU_vpR2agf#1x1n$0idm3owlc!59t{U6Z(=dx4CTTqK~|D%o1)&?vY#~~!2 z1c|eK%r^Qp_Cfmxb89HMOY_G}#j$7ZB`W=YQt;DN%xM*vmdP{2l08NiAh@f_cFe-V zbL%7}Pq)Jjblp#y;rEdtzcSHQgZVT<|HZ(^3l@=?Lg7FeaIcm2xi@k%70RDNTDKhQ zgs@J6`(rC0Px^WA|KUf;qs?roxn2@OQ+1)RK~xEtO2Oa^XYat`&!NrOk2+?X$6j&c zSKAL#y=StLKq%vOWi^ERzLcBN<bmO=R9!vw9B5*{ljjYo?l$V9H*df+6eRAy>t%2F z>x^)_%RS8OpmoBO?*B)*(X~+&aGQr#!7>bP)Ld^)ZP8mw)OVtqTW$F;<yn#7`tAIX zf@tfthX4xNNdC^%>|c&iFs=hh^#`yNG+rNh^Qk&UR-UCVU`J=YY9u9PE-8qUxz=LV zBYlvy-ZHyX;{wfcwNXY%&wOIrg%z{#L<H-*dHMG)PIm2n6^qO2P9=_s@uukn=dO#u z>@>MXJ>`FbBkY|SI%Fd*-O8*J;<~hH708l)3ap*++HQKix{Pz~$m~2_D(6n>(Q2={ zMnFlp>h&wE??iR7-QQ4GH1v<wU;fCGyH<AIw+(NtSmnPPod*}vY)(tf(qV<JKmC%@ zetSGHxT0SOa#)H+BZpCmxod2Xcb)aGj-=>3;*L)?f5S?ULN4@x;Y@;VsDvDc0V2wM zIgCm%`En%hr2p<0Bb-2it52{x`cYj_1%Zu$du}ag9D%r<Cma)v@W&&I^DSK_-fp$* z{jUYFB#p84<?h*MqI+Z%IW>PjwreXMx(CD4_F&Lcq4o6QxgUqs%;l-15C3kD@#j|A zp2$C18JIf342?DT7uh4E5kt`9j~^Z3(F|Z*cd|q`FK;*v4i63|f_do4+x4UpOgD_b zyGj$P#lWwhKncmh=)2Q4uvz%;=l}X~z^^-)touMJLpb}idh>~7(wirxCDKahc<iXG z^Q}_gUOm*u@(61J{LG+WBzb}Zw94W)kEUeg5b&VGoK?;2?edADLh$$>A?s|I#$YWb z&~78IDlHZG*q{2U7Ewm*B5JCL_wTSCeGY?DS8`Dg34FI)_{v<bYuMkgV5_{D068fb zyAY#&?;n#AnguAmvMDuWlhDoEdC0i<O*P@fJyjeLpGc2lW8+rAf&5wYG;6nb!#|<f z9T^{ul%t-!&{enNmucbVz->ydW7ucEDsLYy`$;!b{}Rgtpg8s!$S;TqD#ELdpLJ<N z<`f&8xXiOwH3&JavzO=*it{7^^Z#1MA^I#ztolM#c6|5n2vWqh(+VEVS(T*i5kt(m z<5-Imbh&vCl(+osWwPgdXH??W&gjkDZZkL;pn0!2=#Zs|^Mk6l4f0ffVbF>JACo;< zm_Zh6^o*m;>7a*HY%@S=F1|~U-0jt3bCl}4;UH}&i!87}qgba!#^Ls+2o=zT7(?vp zz_8~xRDXk4uIRBM3&(CuyeU;n<e>$aroC(@`c!6+zATiY3V&tX-}MgMEikM@9_7c8 zFrwqN7V&59O9gG|5e_Ucd8Z`d0)5K{8YMR}_HG<3okz5@VA9eh-^Kefz@1Dl60G@A zYl1q+qJG&?lnnNG7Q%@#Xe!fB_=^2Uv<oHz&L=U4{5g(J$g?XW=+c^EUq#6W$i+<< z<)>|lLkCT0IawH}5Cb=MPXb%D9%jXC!<FnbWw5$cW-KTXY`a)-*S~~`pXyF+Q3e_N z5rV#)>8wsO3c<z@gHOK{g_kj9VvugB5gK4wPT{Pg4ec*k*F2d{q;Zawa&Zjrh}72d z_%pw-66|x}w1KWyfa6II*OSo+!JZJSO-hc!Df_`uicQi#Q(wOmic<r#4$JS9{dy*Q zJL$gEka3rYJB!K=JuM{Hd&P8iL$?u!kj+EIGMW?VEuL{#01UofkCnM>L4vAi6bU6% zt-g*^bv;9DW_zT*vQXYvOko|~6;XwL)vip~8P!E0Esff0V|gGE%Vo}+p={~AbwXEz z@`^HiS1XmO6JSMNK2X3$kd5I5-DX~9RVa{zf?fxzee0fDr^TXcG~f44Hm!;pq8dgu zy7^gfEOGn)<%9SOG4R~XCsOk)cp$ipD9eqtsd`C%f+Ms%G7p>BGc2-nQ!)^|3+1sn z^o0z;p1cI`z8kqtMuV|-$~_5drnGGFqx&6FATbKR(3Er=%HVn91lSz9)bBuRDMNO% zMfndR-3$f%@G9D7K1v}IsHOa3)WTL0LgZTYxD`L2Vk@-;9T?#M^BqcXI;$kV_6#oL z_jKe;+5v^IbDJVz?Y3DPe6jzfFlr+sb)^Pxlj^%#qleS9{gyE%kQIU>zUs2Dtzw0x ze<w?;`DLAaRV2#ssxUi-XJ|BWXFnrlEJUhC>`EiL<tcJ!@e*KZb3>{p-i(ORob@ET zD}6yh_5ZZzi^0otlr$){-iWB~==#IDK#EM71qDH0z0%+LTni7N192`F$)9B0Yny^t zzL3pii2F({`Xz)zbdYjFixEnNwj=z)bJomeO0X^R74A^m<C#7+Mk;ZyI^i_rii0E; z2>~qHYK)CYlheha6T|rNBKz28KAjc?R0^;4{4K~I9X+=X-RP-)u_5L>M?maM!(tM+ z3I}o8@^QtNjQLQ=d__&hLX?O%D3|I@C#Rl-QyR*d_bUI_AKomzABaIxDIhLH-^J$* zV@H6xT4ZVxm;-FAl7Ow)rTfc@3nLi~Lff<fajsY?!HTbnFZsc=Sj<E0z?7bwgSg(% z&c`9V`}b39@>zct!Oj$IUDgJ*$ZlmPU53{n%Mc?Cys7ItqO0IH_xpRGn6DA}`w1>6 z;?HQ^-ebDrnM-jfEOtu|38#<&;__u<Q?Y{Ez^Sg=)MO|prV4YqGfp2#F*$UMe!2^6 zP_I{^4ptls=%b)E=}EwsIzWy|=1lmrgR@_XolQS3t1zUu?5u{NOy$Pfox}wCgZj~) zd%Q&GM@fyTlEMQiWW9PfhlBp8k9-$=%sWJWEn9z(#ZWCA{_1iZ=}_JEET(44qV#XP z$eK}N4ttbAyZ%XI<cnYD00|*$a+4)4bQ|ttPY)tTwIAlUMqlg_78~KYLk8i?`T`IK zV`l;XRhDmBUd_)wr%;!^&RH+tUwpnUCktH4DZ4fyHiGt+0gD0Y9Y~Dhb5KO!cJvgP z4}F3@h0vZ~T{>3B`|KIabhmZR8H}-yQLl-Pb4-6CrsM3c{M&dhxcX5N?h3E3WF=Kq z9aN`1ARZ4!x*+gWVHh-+o9Q^AF<#iPMO2b@7XLPY0o`Caw<7Urufr|Mn+Dy>QC-|l zqXPvZ%RcQ9E^|4Vf+@|3*biRsl4_WQ{1D(&kX!;N^>|B|J^V6ePul77I!oGKNd=L^ zh{1rw;JhdBz@1W;<V18ZQ*Rt5kLd{2ki+AVF#*XU6;)u6U^i0^F+vSePX4|x*ht{R z#W`W>&GR6o1|0cwEsawTL$l{pX^Blh{6US$k98TJZREje4n85tY(PJM2;1X0vWGu4 z+gv$Y?6z;<$z^>OMs!09PF6-5U0jW=<)>yjCabXY+c(r=%Em<>o=un<C4qgS2*41J z*2(p2VP?oh--*BY`}l|x;BNWS7jR%CFETLM96Bxntc~T&-Uss)=rFQ9VPl$e&(YbU zE<z<+L{hbd@vY;Vr@te~VRJdxo%pKtzFLiD!GRJn;O(gm>Gol11Brus&JX1%dY|Uv zLU#tj*4%BgfR*Q7UMxrADHn4GB68a|+JkDZ9cRXo%`a81^!yHSCNh7%1B(ksg)k4- z9Wv5u#7TD9hJLl6;6V(WiG$m%sW7O!UYw@J1~m&$0kf()h|-;D*??{`iPbmeHS&i; zNt6As=*G>B$-jsk4J6uNDZ4CS1|{&R{S)!8RqpLAz%&g>{yxyoWopU+?pT;#>$jZS zJb_~%7EomD6%4?p)agfZ)F%JpO743*(1c?#q@^(aEjPWs2hPswv!HlNqbruxNtWn| zhRpV#E=2tJDmOfjSzzUcxJj);F1;F<UQUjaSltSQc_70MGd*BtSxo_qwi#i?g}`-O ze@5)NCCV+vVP0jku9?nXcobfZ+YQgZRP@f3+PvjPifkvZ+_i+E++@~4w@!<)v1+f{ zRrnXSoET382csO_X(G11&FS*=I$!SPY|uf)BEYCEoGs+MJCBQ%ITrGr)Y5a%+p{Ov ze^G2w6tntsXlDXs^?0o3Jh8O%$9{k!VYS{|1X<M*KrrAP*rlZiXWJUij;v1x<YOS- zrIQd<LjNsWtrb9$5T5n!Nf-zGDHikT@+@`n{IQo*)(a_<9-7&GMZ`N*ux~oFQn%u= zC_-qHoO>-Bj*ZT!X}v<IR8hds7)lCbWtTPNPRr>@SoT9bkqa~YNuHtJd*u`R(472u z1LKGE5#y7Bdi=(a7|}Uo+<*k(Gl>MwuLNbFC@Ex-gjWf$)96v)Bpk~zx4fS*4NHV8 zNZtGEN3GB(apsw4q%ayjgm_!hJr!m0RbQwO_3&n<s?+s|bRe1G71mj{2R+h6wQ|rA zX@nT=?i{sDB2trS`<<Y5_{VNypAz=BebR}|rq2uRVi1{$Gp~vVWqGsB^*Zf(O{GKI z8;$wu@!lViEFz6P@n&<IWriX<`e|M-IyPwgv#72Xf~r<~rXTZc*$5)Lc!M8C!<VF! z=r?r&BP4dJ0f!If+1s*Wfo6M_9X6t|Q?@IaKkfm)z1pQN+=80U%f8{ZC;h_L9}WR< znw#PL2wRKx*YJ_Q1Z~BFsiBY6I(XA-fKtdCKRD|M76vw9jD@1T7XFJ9ANKU#M5F`D zcgv!;Zblh07>t*}1%~sw32y<jxuXO;V$9wdQC4Myp<RxAY^rYl{{qI*@`E6%4z{=P zBP7i84m~+E-wkGNn<s8L5@Av^Zl0L7{u%}06-X}=EbH~h;~f_w^w=W|euwrvTwN?H zen%T_){&%#{HSZtMu=P=_Cg3tlW<PT6js5mFSXH;EG9eFIFJYmkR?vSnxW2Iyl78L zFx74&DcP!b^Dh|GbgzV=_w*m%rDjMvam?ig?Lo1-<@XGBlW5ycuiEZmKm-g+rZ)_3 zLJ#{eCbKQt6>Q;&$pfgtBRvK5``zHLqH#w25YmX<mZ6Oa^EHV@`6$+ox+nRYy~5k) zk!KN)bAOVADk2d8v6X|1zyP6%^4i8%xYkDVPw#k%&9Dh?#Pc#(BIdqkcYG1rb8`g{ zZx}n9FTQ0Q0kgr^zNXo=q;wtV<)7Svx5<G~OOW#UGXXolBu<IjhqO22J>1q^luas7 z?9v(i-1`jcc<p4=f!I-X_)1*Nu-P+ro@(>g(G#8-1i;!0)SmB~KG~M}x2;E8SshsH z<vT4$6epZUhm7V&AL~G$68ws<!^@V?`2SkO##>GjU^Ctt;uU+_As}vN{t|MI=ff?& zE6<yIqP|B~7&GBjrh7b@wYPELDG=^n2%tXVh^s&eFWX<|8?nEYk01r*0k*=)4wg1w zl7dSw390fJI8VskgNL!`vdOHFZi*hibbGEmk&KV|#KcREG+|*2w%!XfV`K_<FwUm9 zQhqkfO>d@>e<LOw?kVBllH~cs3up^Sc+{awB!>;=cT1?Yz@c7GzL?n+R=Dr?U|I*z zpggG?+v!44;phtJN9BJwg|ru2cxnIZ3ib+h_r#ug$riIGiC$OP3ZnzZB1oYYlf(;E zz6v#oUmr5byyaF%fGR-JC5ZP^kFg5D{K5IZYah(a$^8arN;MMFH1ubkM&&pwP$-yV z#qdePs8#^qw|l6UIEUSzMTAAzsTtm1c@u#j&p^+np!Z8Me)T!>+*t)FDaQ!br#fZd zEf=i5Amhj0F-#4f+nxh@H49Tu_0u_7HbbGB<!`_m7@MZkxOW6O!{LX=qS_B+F<BqT zOED*6m3*(WUXpefqK%IAsQ84T=C{{{5CXve=Mz|=MsefCA^n-#_vHivibAY<w`}!) zFe^E=+h7ojW^jE=p~WydkhQReJ0!cSH?_;;$j6+g<{u88pEHjvK2(Uc#4LhXoI;6e zM7vNCwh%cj;ObQ`ue++CSC1$T&>oE7R1h_j;cQT>_cnJ7HdDysCWFefb$v=oR@5^r zT^(Bu9r@uMk{(O>1oZBrNGJa=6l{-?Ep+=%`;qwzMDV@r(2H+ONS?`gZFV~NsisX| zeP*+BVNXFwp!4``DG{?E9}6_6$AmNaALi13ax@?o$C{H~Mz$RI+|rK;Y<^}}^_Uat z{L!l0mU7M<`^;Ay>gK}~hJ_3P9sbMjR~iIA$gRR%+>Yx`Mp5UNFl^R+^l;f(@Ez4# z1~;CW?9)@b{GtUo^FgnK|3qhQQ&11!FxZOd4z#-r5L1_Zla4}hKy=_GT3=y{-_BGJ zSYq|?w=p9u?B1qOP-yEOa)k~TPqcQkx0i*y#d~fO9EVf%e(q-v%$37dyd?mlx6`)= z*S3ltxeqqd?S&)7R4!b^Cvxe%O$L)3N0Lv&4IQF|IXBO<ZZVfj1u3s!MjYr5ax~(Y z%UMIlTgXvybg&XDlYpK&wxy;y$lK2hTMd>1?|n(-V=9sxex`3zqEPRs=_o)W+;)$z zUCspV79vpZC<kP)^~DNw3Tiz)yS09>jQgZ>DHPF(Een1erb>Q&7^9(a$NneT@2n7E z>7?bMHL+s2cCwCa&t{_`b}5WHb(N**r|Mqimd^Kr)kha4VmB)vw>ob~D^NRgLxX4O zMNW-T>8jq^Nq4#IAxXR<nJ{WVWw%H~o_!0%x(0Go)^Krlj_ym{``Sc|n$A3nI5x4s zgi`%aRiVp3wz?mRu$GpzMgwNUecfpii$p;sFPQ7|-ur4VSqG0>nt?$H(WZ3LyF>E~ zH=pMp<2bQRh_21Z2$U9cF<a+kI@L7MTF+N;30I}^VGpROhs?(mpkzSlArmDpDZ;E~ zO(+bIA2kLuaLijmt;<N}Fd)zQ67V9rjWAVdjZKmFo>|sQAwzT}pRInyUBXMLN=sW5 zhNm3#8!m#v&&u0OuffdV1#nY0KsxP#LHrF7M-{Cuyl>K_%M1;J*rWQPSL7b`m}59- zpmI5+E+lq?Q}H7wpU@~|{rmwaAsn~MdqGCM5HRFuKXKd_8Q2M<<=<MMvU^_o(SwS( zUj$~;#LpmDa6v36?pI};OL)KqE0BisZxs--TA2@J*W9IJiQjM|lfWTmHEz2fo($Y0 zWA1FwKo-2ZW#lP69pjKr!h{0W#>s=86n8oIdc1@$T>k6Kmf9#7Gheu>)Q%tw1|2@N zn=oyH)&{DZ<AE~U#3D@pHD9*Rl&7+B<qk8%;56ua@EFpS2eWH!wkWMQc^sX)DR<?k z{}Ea?26B>4@bw=PUA=mCSxc*a2j;rJSnE*BP4RlIhH>}$Hf&bU|G>RZe$DA7s;F(Y zR0gH}n6iM%`MdI7Yns6u?g}<f(wVld(O6^y64$sV!?126`vmt|1yK`pGgc!dz!d4r z_S8tKgcJlaJI7{6=%dFmQ}+>p3!Hv{KryMb05NOopDR3hLi+BTtfA_(v$yLo7M=H9 zAKD=q$NX3-QYfuNw@=CnR(5$&^K6oH$gCerH9B~}tP6t_iB-VL1Oa6upn=Dl=@(Ke zEX2E2!8)wv*MRM6&TOe(GrYmgl?~Ia66C9RZ1??8Q=GULK5_yn7&rVSl6YYMqVpqg zfOZ*=7f*G?VEbymRCODo3@j(GOY1%QP&Fc)*3ij^(uF*C91>BZ`?@7STl7liL|j9R z*9{>*VyVt*By1RTofR)P5BUjiCiEF5N4{U}582{klNAcv$R{St!(zHMAfm@+Lwk`| zda8M22*AIJ_SpxodkHyy2KauXH8*|9aW1Fa%RQL;hMie=?lokSA5w-;a|To^)?pRP zut~z{j#L$?T)E1FI<wsjz!@RwS&|w9`y7eDXr}D{v>Fgc9Of<Ju&`vg(>A~i5Mc7& zvJDMW!XE`&tZ^JJjJ3B@Oh#n`Qi>CXCyUozp5#x>q4G@Xh59J3)8OV!D!mOkQYrYD z3xVvnQ^5R;Io$#2rc0Qwl<5x2*}4s5Qy~bVM=Z}A(0hBGFi$~qJ}+l>n+N?AXabF< z`G|wMRsOLJxa!2vbn95eWGmY!YLywyGLV4d?9yhk_Pa`0A16NO-a&Q<hY<N-+JZe^ zlx1$?+2vtSGWx|N3<G@)T$N)Uqtt(|%2kO97ObuR9{UEBH*FlvfX&!l!XoTT8GtQe z?muT9O?~3e>lF--nI}`-gDH{Mo|kazclZh}z(|poCu)F<tleb)p!0h}=x07Azupi8 z+EDvU=uJRAKODhhk}@;C0AhNn{A5r5(;ChXSq4>SX;JY!<h1?2-wKb^t?gz5PC;bx zY&lBtPSFf`{`EGXA^~PpdyHVETs!?uwJFU}7((l5qac+0?0qG3D?-V@Aa?GVXb7v{ zKl-8ksrRZH+q)v$jE|RWYEw-X9vvI{6B$fUVY<ag=rT_Uu;*mgksU#FQaI@#yEHuT z#p~>#3qH+6!!>=DAPg(qBjRd+7~9S-bAsrd<)uO}mwDgz`TW5xxL0ye2J`|in>{g( zgG}4JmZMw1HSDP4;V?i&aqe*%w*RN0E_znvL~=@V|4VZf!ev_bilcYlzQ#91t4}1C z%YZlE)VWppCQTOr>~+NozcwwNaps+%OkWWYuS**$0yjA92URX|s&)q}#u#RaOSjVw zp5=@;6p51RyTM$%d8Z1-Ay9iK>5Bp}dU)Gtnf%oiEbn9w+Aw;9$cJ7+Rg@%n?LB^@ z^nYyYzz28qKS~SjC&1v>QnH)nb&L{yEpKZeopRjTY$N=wuuC@Z|2Ca|s1BRthCuR( z=RT4DN2UaT`qEBEP7{o_6<h3i-p$X!Ba1?%Yo2K|pAU@|g`VWVC34_5uo|kHfw)FX zjQlyhhiJ)LFeULfodI9^l%K7TZQ?!q`$(0QG?sbqg=*mo!yVUpTvmW4HKkdX)vQ|B z664OA!*EslrZs~i^<*s%1fD^ycSr^+r<ru+_L|{h_&c)XB&Wu&b(P@?7ov`0kh)10 zP6-ujK<%4S6^dx3fFAGh`3^+c0k#A>g*n1?x-=oPTJ((m(4ixW9eotm(%TzJ7hm;9 zT;r4jRs%jHL;q)dW=8K#pu#Y%l84C`o64z7CMb+gG(y9nBN(#ou2lm4OTuY>3E!`Y z!Q2_W+SBNg{k#bbt<GT0gzl;)zcntgJifz9p1GF%A7%gE3!_%~<lYjWkqV3k@oGTP zmEBgmjkVv{ffHY|^rxH|k#&?<uRDFRNI=tSx3w>K+*I<zJYCtB>F5h?Q~~4YyJ1D8 zLj9?$hY8@Qyuy7H;R9^LZRGweYRvXqXokzpm(qx;U)5tJA*VQd^wb3G3o(OCWaIiz z`c_Ufya$j2a=K90jpKj#4S0v_hbow1ZfV-A2r6=gf=e9nQN{*5+RpLDiHKFqzj&u3 zM@^H%v22um)p*A1gsB0e0fUnJu2ZT84<~=dW9a)y&#S!!gZwyNtQ;{v7i)jmUC}W< z?3q7QHX<V)2>b+k6QU)>Rtt2;RvL^DCylT_4tAj-qaRjTC-Dcu-#Ez$jQk^z;of1C zTZaGfyiJ;Bfz^0^^;fM$thf8&mrd74V!Gk<o8pJr47(vxIm`d(7y3xomM1sn`l<R^ z$slO@so2lw&iXFGu@)?iZ#5)1Tdk;wi4pSNw^88uf5pIgXEyA1NVttc>n;tG=w$iG zFcL=}!ou4}EuZc^6@d?Q&GZ9cKD7rMulm<S*xpw=o&et~1xb&BsbRrXne5O`FfEAs zIK{IMeAM^`VKP;VoyH)bpZIs@6GSt<AhViuE@4FP%)U3>rSao_YJ<pn3pD4yx)b8L zTH^@7t90A-T|cx^={aiZScwu_!;4}g%3v2Z-NrZnN@ZjCr7`VAe0+g6wcLm4GNC{* zz|z8EPb0h8JWS3{D)!~^If$Sc|Kt9K{~s+*RABM!qy9(wu*j8{Q8!V@`L6mw1Vzsv zf?GG<U@x%sx`QM9oW5n|;ZldFuKN})-~RpvK`QqmFeGEbu(BZl?mx;T_fVK=7*XIK zAIqNdbk(eImHM`r<2}W7K5xd9vY+b=ivOk$)7j<HhRiJ}mw`SK6KG%F%L%c*_q1@C zPr3S3kx3Z22Wy{?cJEY;z3r#rVs%CnliMI#Ap;7a>MCYr=f_VV3I_$^TA4YbLeKDZ z6$v2s!_=07G4)dCgTa49ei8I}!qjO8s2@KasWY1(anjoLXb7&q(>XQa($B0QY*gxN z=HcrxpJMr|e^*7**0z7)r1GdDuoQd7Tf=7`F2)AQW^m)iDJ15B4lQ1p8y&#HQkM~% zJatHs`A4Kk!=5F8P*ezBFl1f5v=4!6R^|>se9*1h$%5DF>Qs!eUye(q22_`7cCKA- z0@7?Yca0~!&M|lk;j=W&t7fx{y*$G%oFf>hk#`i90vtq}K3ou`LRy+$mt1fRYip)| z`qj!S^?ezF=P;|csH1n<EgTOIr_9iB?VLXx!oBa7@Ad|d5^3fSJmRk0vFYlf7o_W~ zF+K%*=<(7=q>&Rtkm%O=KA>5joymzGkTRpK?lk<yP=4hNNmIxmd})o!&%MQGktUCh z-I{;jo#;DC><b(+Sfd|tc_ZC>g>LEfl?bk&b8Cry@@)Z#AlRa&D3uw|C&tG8n_8PS zdbD8Eq(ok35JB54fuo;QW?2&zib>6)&XGYMxlnMAFXr}({}Ae`%sv&U*}c+QRU<%d zJ2|YZKdr3a8y5+v_`ZM2)<bvTh)+=pomU*wUcg{3W>OPhpS<Bh7vIq6FJQR$kQ;?M zXRa-t2E`Jdi&6h{_^qQAjd9p%*Ma}y2=)y%#C6MwD3~q-rQB(%(^62U|0!EgHaAqk zBirhP4)yRPBHHh38m;FUG^jkjvQ-8m(V~@TaqtfjcX3>W1m``cVVL9#8kKtNgtR-x z-UQJi5kB6FCR5sTQ%S~ArNvD8oz;5fMQ`7g5M@H9&OAPj^+%jhex*aUt}5A@%7(%< z##0=yIax_x`1qi8v8Z#jbI-pq66+bg%9(onsAPvhjM8o$VMRa4G9mD{7#WYWAu*+! z<3<Y!oIqx#^eB`KRYBIKXRaES+PJ+E#pwAH1x@ZuerFs4i20L%0*^-+x`ijUg<})3 zNx7E4Vgex!ksM^2P9&vztWoF$gk+Lwhyx&${4a6^+{=#jxG()e_*R?Nwg2z9pA>aA z%nUg;S}emp|A>t(Y|XZT^1Pjkch^S<#}c~7c9FJN--hv0D`w6j*WTS*+<TGPb#t&K zE}3(tIVal-BV(g=*s;1}Qwv8AEpiY{eoqx$1ooupyaMnzxMk2Xrr-x`XQ?vs6Toex zcPjc{5~Ii_#0D9H6U`lHe#Tp9Y-qTLIZj3h8-wcU1fdbws?^9geB5II)1GSWGkxht zBb6Y-)Xwwk!KeoTL=_yLKrnPfk<w7!En@@9B_I~Z(A<4{fJ?iagDu}u4V*$0X@#Kt zNRLv$&DP3z*CZZU1}A_I(4I<??HM>3sgwMea)7)@kDl4s>;E_>xQanNbGCw<CQ;fN z{}<gDv-IsVpkF3SMWl6W3F_G`bEQbGsH1K}<9ozuMzB5?rm{~S=|<FTdJc?1t0_sM zQx_~K!w)@%h)~0<Y8@c@PvdkcYw(@-#iGY3yz-25U)ddWu428A4dGpe>*#i7>23@! zF=kxJ3I;$Zq$x@?I+Dc*q2I5-fy9OluIEHYR&v63!UEU7<F_Nro!n|n*AueM8o<q* zLJ-z!g@Y<oEwh_2c3@u<gN8M4tTq@dU&?ASRu;BDO0T0AEd7xNgaQ63{(%LHf%-{a zLCOb9AFwgXD9wu$5wLd3m&J|s4JzQeFhhM|z}nEd*fZ%HlJENC2S@v$aEaJlasq6& zQ*}ML3z`vzkQZgu?!z6<Xxp+ZNV-)WDCZ$*qoi+3kORr24=3A(dpBS(qdS@CrHOE3 zH49zViJS}Q1|IEWi6B?^&C9ccyJ#x!nMAD5GVJd#K;U~i84H&4&*GH4^Hj(F{QG;` z<V(pt_$4^!fk|3#b_^fUezPRkmeC?swuM6uV>wE{yr=LtiyQjGF@KaY`+7WI0oBC! zgqV0XX3oEn&l7`5%>TS|dz1%)afND!)_si1srI&sOclwTuG*87j%}?NcK(j^{F2cS z{@Z7vR0i`wU*N~v&$XA?vk*jF$XwJdA$RC=7s$jE5Ze4Ehe>Hhs!(uha&Andb_-*M z6Qpo3H&Y|T@TLHmFz4oe(_MG0SN^!>W!If;*BGlx1dK-itoYE9k_02)?f+CF!VnDe zHMP^@t$wTjr>TtVk?yt|i!nHPxTlBz7$@~dj+DCVlX;~zLP`Xi=-bFj1MkT~XLe+b z$geaR2X~APcc7fO#=h?AeA-mnO(r#_ZR#ansdMHY_2VYthXEczi^3I{1G8!*yCHBG z#hIMoYh}w!SL1K-|0Bw%X&dop?%aNL;hZk4H1g&hQOttEufZs`5rMK3h~&H<hiAHR zlQ!i72=<Ar$;1><<@qtud_ueMhic|}i=AHl3hp<P`%YJiJ!hrH>NDf=5(Ccj`H=Un zUjcRaB$34bGw$2j3SzQIyC0a&YUz^Z9O%2ceA1WB)B%f0eT1oj!uFZSESA`&=%_@p z0qg(VjCk8a*No&Q^|aXQtF`L-bQ#0q+~!p|*TM?)HsO&JTB`V-#|?{*NV>IHN?9hB z98u}13Qvts6|#{bfA2lECA??PX|?SqMX}5M8s?jkk1!H33viN?$NgwsznI2CXfbQf zsMpQ0ETG5-L5v0RVu+}GDvk-r0c%easd8ll#&OL89gHFgGueBhJ|Ra4Perm-7H<tG zh67FIp^flthKR2Q%B>>2#1k8l?GGnk_Jr*4+2`)E`fI(vt{{}P3;D<VX@;K@Th_s< z>Ja=$$M(U(c5AtZs?gL+TXp8LYb*!<f%jzAoN@jJ+t_iMHI&x}V?4kK33rL0wFk9` z0lJq*@F(-YCHdx>v^HN~%{mhRpvPqg9mC#m98tUKtmIx550j<nl5gdYq!*qC*Yc~G z=@|wo+#)x5V<{tAwxtQ~qDVNcuQB%9EF3wFNA8O?`#IOFG=nSEM!R|mwmIe9dm*Mq zAD~2z;}b=K6f_uQy2}|cqN<YyjXE%0u)o#5#4pgTC$QTn^g!bM+Y9<X!kXnFN0oQ$ zT&UGj5MS#Pq?7LmIK0sMf^z!x5EzyjoP6nh;$z)?>^_eMLT|Q4o<VUL4>9r^CLL<A za@|nmELJakBa}Oi3RWUw5?+<Db$jr7s%bww$=gV1Z-nMuUHGDs#DGqN&5|5vZ9O0% zYHIk4{PNH)ENmJWghS8Rup2BM_}!%v3o@ct!Q4&CS}o)Xg+^jin7Z->Hd`Hi$hPcb z03DRnjUbhe9F9!I=@&C-J?k7f;Wo()H4!}bPAl81$J+hWH-pCq%NSlE6Q1kS)uosU zoZfU@)1CoyC3&ksee@Nw0h~2sg~|ynZnP$q<#=B!2TF`~0cPx-Aedz`tR3WVE>#nW zft0V8d;=EY)_en1r@L{gj#{QdNo;i7PoIp@lq=5Jr+v0X{fGTBxF%zQRa52KT)Y=> z9ueH`Cx(gD%(ed<zN$v{(UL-kBl>*r|EgX9-e&b9v1uhF&BCs~8MW>^9kza6l5Dr5 zvKzkgDssMCvL2Zp>n2nUoV6`c8Q1X$xq#h#R&}AsSb-l55@~gbV9<1yyT0)daJR>! z${OG$rlr*f<Jv+>O@#>EUdmTmF_<%|&6RJ=9?R`f4p5kn8v$U~TV?p%l7X#Zq4^Qx zb5}}lliJ$2Qk<J$^tu$R84quz5WpI=X=;~yn|WUH8fZwr_XC#4k6^s-bOTzvJqc8( zQDVk6>UVa#72x_}H4-?I@^v~wGeaM1g>b;j^z%ud6pi9im~>|`t3}kwFZJFf2p%y~ z+N-zG|8y<?qGLB6ZJaw~`=5)`Bci$v4xy?ydlGMi&P((1j&)(A|16#AFkZ*Crt;Ry zU83_LP_^0`SdDlppxN6uDA^hd)rM41L|5iR1S^@EwZ4wD()hSbCBRF${l4;{#7V^( z+n<#gZ|RB0`khJy;~vqT_d_8;NEF^EEp13EDAGRR{{f=%IcnBi5o_D%9)v;-QcT=g zoQ`w;F%9+zMc`JxfOb-RHS;ZxQ>eqHXrtJXe^afoxg0sWN=1YgJ#x0EK14+Lb57Gt zD?nSfCM+mFTz8aL|Lzo*;58PP${4m&OfVvv4{mWX9~lqHqn5h?D&($-vG1e+GjJ~9 zQ}TG=Q#i5|t@j>>jz1@oTS2v151Z^JzSBnL>IvYaZr{L1n|rYnX&OtgBIzI|+O29> zs!N;zFUwMYwQ%&cXueY8!v7ei<~(Y{Tu!?Ts1n;*cNltVdcz)mlmAUH;)P`+)B5nU zu=ne0y`NL7L@jgl_Kj;j686Rw>=b*+PB=`2?K#{3Gu}W7$4yML2_c4tls+POgq8(8 zb}d=l6pJp{m*;Hw+>z`u;81!PpS<f)JEp7U$$zBuSDkBS#PC;IT_QQGR{sQn@#gaT z+=P1d(~SE93aLjbKvEp_!6uz>%v?g8pRkzD?7RsZCyyZG%rp$riHK~!M`AT6s{7U) z7_aw;!^UImtN|xn;(kG|K8sY+7tGdFg<75;EF1dtmJf{=wISBDWajP<<1~db9HF?U zxgDfALkAwj5S`G1B&n`tVV4m;!?=221>~u@x=gXE7u)$k?YmSs?p&Qf4*)9p4d=kt ziImlfi8F;N<qrI#oaIFha=jD?#uqzx8_KC=wEyqcUCXgE47c;9zNx50Z0|l-c;KJ| zxB*W4C9Zy>qGKSu#x??{!}M!v8qH1~Ue|>E+6Vm*oj^M;Q47Rfl<Y{$)#@bbZVp$2 zuA)ikiZHA>orf`>FwiWPxzFTkzIDv_P9CI#%!kZbQyTb=Lw{K^I?Z}$bij}@&!syc zKdSQ=2L7GiB4mEUZnq1xi_=wQHUXvGIU?DMl?h(ONHzY6Ob~uaI)nR3`V7S?`ubg5 z<OB>Qw8`ra#>f`_;E#`Eh>ar1U#U~*%Cc&wTe(aUu;a>g<aP($={J9A*S7jN3<*SN z12<NxcP#kn`SZ^pgSid4!gt3)7X0KTg3U&-0o>(NUDC6-N)1`nNsJ&?Z{0z&Iq8wB z`o!KlpR)s$&%y9=NQ*nsGOpq*ylxaN>DwkQM-PU%T>yv??Jt|36;f+RabYm%S@NN& zo)<vV={XZow3kilI1M3U0Zw~_Gew4R_~u$;^{a-r;RgZxlh|bm$}Qym2{0h#2u6FY zhs3;6Z|0N=a7jF_G6oG8*KwOeIv2yX3vPo#c^+-%`(YnO=&N|fbBI@C<G$GL(T#AF znQDW?dY7$U*E&!^t9w5g`CSLMBwd<Zf8QBuf-m(IwPw+&rp&B+5{J>LEhg|-Am<9V zuAE|Vlz@pJa@%nEc8=6DO68Wl;+xu|9J7tA7BMXWZ5A<pk-P<9+5;7N>h$s|7(JKV z)18n0)Pz#Df_Eal;%51X40?f-tT}!XRq}230eZ-2T3Jd|XF8!e_4#|s!cOb-tu+Xd zU@_1~C-ZW47!4X*e3SU5M^02M-q;>N`tV{l9sMN1fnxLTpw`zXcM}IipirEw%ZqcM z1})uWm3jEO*^ieZs<n-2UDo%dZTBHj4O{pUWZ%aV^kuf}h%~Iml$p86!KsaW1_gQZ zS7_%OV-!8#eDVaEY;k8@6~6BWcZi$^h!>MWNd<bfKU345iX{TEqkR~==jdTi7>)S{ z;@FFuiJ%r;*%sjG6YsRW1uQICgV@%ZsoLOdI(~S1D?zrxeGM<5skF!fG#d3MZ*@eO zDQ?zPrGVAn3HM6%v^NRU?K3bpShIr`uQ8B6cg4ZU--14Cw$T5;W9J}1V)0k)lIjWl zzl?Bw87utrutCh^r2i<h)d(y3Qb*lc1jz!6mus|EXOm-_i85Z`QKZP_a?oV76^QU- z9&ht_##arNJX<#S<1COYgDYAm^nhyN4Liqc;egmXdsOZPD#K3k=3zR6SGU7(U20=0 zgz-Q(Ym*AgC!5Uy|7lL=JJ;h`X5Lf_sqFG<A$@dz_Y{ZNCDfA57y9q*EqFA(l)sW( z@dCm{;*s9k&frI^y|BlccGCFQ68h7Ji47XHnl^65j)>C{Hx9bQFG}iWal;=H{gtql zsC3~qnM<9juTv0ZBHsrL^~*|~OLB0z!x-$evGqn<u{FMmpL8LCKD<OZ=a~SVm<v+^ z;HngrVGC$9HJ7=dJUZpjZP8H+(74El=rT=Q5_?Jn9(A)>Qj*qQo*QLf?0k^29rLHF zU9?jfD<*6y?^pA3lb=i}bQ1!kA*4o%n@TR;3$h{Zw8b65*z?OhL_!cM{pSm_zWF#7 zgal;VaXl3-96>Z4k-1&%R8!B7G$@sX5~dUfAixM9Nyuf&Sd;$Ci^{2xn0740*VRvg zWMm?s!;{Ic$-X}(!cE=&U=xvJ*q#N+qWypp5`nPBFS{YFm)vU)v)fz9i{yu67_VU* zmjgb81<l?@_T(>f1dLf3Z-qg_<e$qozk@>yO62_S4AQ<N7lybz??T6F-0~kPNuJ(l zo&_X_-f-LJ{BLFbvgjoBbQeE3C+@@Cy$7>XZ7kRJ?1a-naZ<z*7n5^;-^MCK7=!q^ z3UDHREP3+yIAa@^Nu8av+L}1ANK!mrjZSzhEY??kiEU^82ODF0PEzi?wmi(j+RkEn zbTulx+Zc+IK_4>ZJjL+tP(IoLTI3L!hIU&d-)7ENb39C)(8PLe@5sJ+q(M?()SwYr zr|R^jxLaAqupKVorf;nD;Y`dYjdP2#loCGQP7?36>zpF^_MiU>tIl{Ej_XQ7p?9px zFpQffCfGTlRVm8R3R2!96eF?HPeYtLk9EuiE|YD#DHicCv?$8O-Hc=f{Ta;u=wB-W zJiV*kGR#zvfbTYQ(mhHo6z%EoVk=Ln_gKMR`yP18Vkd&KWy*9$tfj;qJ*rQx{zko6 zN=@;gtdHPq@{j||;-cB^F<OKmDF18u_Yb>Y(glM(n7HzgCIp@w;Y)T;N$7*qA`h=& zSeVgYYZIi@6=Tn;TjDR}$aHY4N=03ELXTnZrtBVg%N(WBBR|fVuU$sv(7{+NY&Ho_ zntGy%j<{eVeI5*{0d{~+yDoie7MQme8cgD=DdMDmf?WM@L#F}K_V;jY#=FzZcO0k# zFoJP4lLEx3K<6Ss;j*4~Zt(8Q1B%IKM$e1ay^jD&fy6Z}&)2sW*~QGmNDRtK1a-G{ zy*dSQ@zWIj{LUoj0d^@P)m}2fR^IdFY?**tN&wK^1>J-77-bJ>FiAzG#8)Xi5e0jg zE(5fBTFWzoNuI|(y7GLq1}uTzx{=eCM5bcE+MHg(X%-f97}cNK^zepjOkLDeBZ@rM z*T+jK`XBSpNLh6MFqtQ!_v@AAlnwM##8>dilu_1mP3@xE3O~>!t$D~x^yQQlPcg92 z<sG0lAdpwG_9FVz+)~+esflaTrp!B~Cw9Nhm$z#zBk!G?L=QHucS0mq-%ID}>D_#Y zdIO_$9_T?&REW4O#mtZnAKE?clt<R%#CPBo%2Jar&$7w3@G?_W%>I+9C{RqpBvf6o z_?kP&s`r7m76;rw_{u#OSt8rFhwiiTsh4$M`E#Jz>cO>5@G1snl%0fUTRQ%xs%O1v z#sW7sUyBT4$7oKmuAt>l6lb_k%g0*_(IT$CLbTs(PEeisOQ&sUcUu{>9ZfSqds$#W zh;y3T8#p_gp3B?}N!x9L(Zoj9Jv_ORZ%1Dkyl>AlBoggWX1H3O8r<`D{2M}-N8N1= z$u%uJgy@PxhHcp!UE&%?|8mGRRB{|hPY>0IntFOmb%*H?<DuI&TLXa>s!ybeskB$A z6y_7~F#3?vP5&dTLP1XXn|S}QX~Tlu(>-w0QAQwNM?$m?gg#-TeXqhbGV={PAFS9y zsO;5r{R9&;NJz@C3!$<IZiSBH!C4*s9;0zYe0S+{D+80vPNbN`VKwI{Lmu*05&5_F zT`#zo(Q-SBqK8jp9AoEWM*s4jS|c1(%fB6@ct>q$fF(85qK&-nagfWZMpy@B7tw&I z8lZIIqD3)gT-P5Is@r2ik)AMi4Hq4kj3cJpQ?fLps8jsJZ}yov_(_ZMK%|hieD2o0 z7Qjb7{rDd0%4E%KSD&vvtmy)o=NLeE0-evy@b}zi<ys6-Ft{O?=80a02GT?cNgHWS zjk^T@+|-%DOW+UtUrc&sY3sI+iXDRl^vW^qEA0<}D#n+gk+MwDd%+2Monkn_K4K*o zPUMC6rZhVvvj0emKF2ex#1MBuUIPltatvU<rH;n2*_SxA2H|3}_R+m;pnfRJkM?D? zn8GM!gB=ms90bmTxXYB>j5!c~lSFCt;nkPW%n2gdg0J$3sH04pQpK^nZPKZ$plsIo zm#FZc^(J?93(IvO!C77y$`jK7`U&#&a+NmZ=G5vzGD#g}F)!Z+TOS&$9HpYTdWR$$ zH+ao}<$qT4_P2xA4c0Hf{cDoH-Q+tWTu_<pWE=##ENJq#%FA7SrAKR74=2Ff|NNwN z()wUu*LIf&6Ws6F%v|X6t~2Wz`*~8l>u+Uo|1~w`305V-Lv-EYOP@k-7Im^USd8E` zc*nY>-TvWdPR36pmu<?Y@Tz-JkK84|{MCp^%1Qr8ft@R?dyGT#y3&;d&>^?sif)AN zBn-~cUwR~_a#?B|lbVn*HuEm|U@|qt^l_VV7OuWsUz1LQmo>_A>oR?&Pw&g`IRRAG z3cs;Qf6_lV{u5I}nke!Jgo<<bxZ<xUNdLI2Tg$ACGfKV;ob#=?X|*Uz<Xf{VWkru1 zkdMhpJGoWHAM+vRMY;*F$!L^g8~i?_NYEY2msV}g0H%B;854)W^lIF?PF7r<o6)pN zR*i$G=TwX7TvgQ4N0AxAt3%}lVyB(alm5d5NSzXqsj(eo)E_M;x?{a^RIE%ja&p+C zV~k8kxP+i-Dc?BJ#pUmzO6)DXpKED|l{qa|`K$*@tz`8Y*_Wx76u;MvAFY^Q9~Dhw z50wIiox6{PQI59@61$>Dzbfx<nU6rszjZhiU@YRLo45=lPI~R-xvHxb?H?@I;v8{t zM-Gd{FLvRSf#WWC^U~*WZOtid(|ZMKWK&DJ<WKx$5200PdXI2t!2*Vo#z^Lg0i2jx z_d#a(ZhrCJ-8JLSoSn~av@Az0N}Vw?%Deu_n(5!Z5C%%1WT6(P-_$}|9zz*`-t8#( z-Z6*{0)V4AJr!j7-pqGa!!$0puVW=oYse5~BtLWp0RMp_Q*#PR{-V>zGPHHp7@C?s zjOjgL8tsL0FUmGubd^y{ou%3Fuk7V_T*u*wkC!Gw(|0M$uYhgY&e7si3z8+8UO9$6 zDPYG+c|9B=5)Wcam6*2|FFo@?6zII$*kQw%V4WG`)6^uyaEG@B|3+t6l_<~K*Yu4A zO*2a{?*<pKOIeQp{%1!B{<>PedBsrc^*##yVAd#(KrFbKEpGOPQ9n4nJtG5rrn6C; zyKb1hZf2+OgTP$n2TxK=Qgkli{8O6DSyjY%<1S0_ZS41M+|pZ07;iCGO=yN__!?@K zep*gY)Y07Kl~EU4dXLFYR4Q#?3wL8q1wH3c2cI=SPF4+xn`E}n(~d*q9}Vjg^-NJU zNwZlE1~TCT|9J-Af!+9Hx(5LZ*hwp0Ql|*bsYy6Q`0bXnv_L8r*Y%Hpn5EHLL5kTa zF>h<YB15+<g)5t(qti|;va5bR=Be^(S&oLRovZ_QQF@XbZV{<#BRL1W!D+DOi-`gZ zpNt*vPxcO*@|&Ob93r*{6PsvDlWG)3aMv7LX7x1)3gG9p^xAokL@;!l$PXQkB6LH* z(b23m*uyZm%m2^FR?e^46qH_xtA)H7u%6|bfqjG4>w;$Bkq_vOb-2ZXexvc8{<9PV zW}N+sKruFDI5(MJ*B0-RqEvs0Lh=5oTdnxJNwPcRnIh{+<b#N8A`6EVWN-0=YZBe$ zw(DDo$wd0oNQ65>`HEcRvf`91nsOqzCT%vqSRNoyf4ML!0p+Q!xo_Ik<u2;2^0gt( ze6PlbTwcICK5f$qm<dtjr1O_!pb7Em$A%Dnmfv)^KD2_JcVjY5Zf7GgXGI@2kHs-p zWuswvdb-le-c*<#VwbUytsQfL9&a8m%DoI6F60L2AnVpE`e~G4@PZR29$+{4T$MVw zMA|&0HpfrRU`T@UqfN-F3@0jVOEpG%3$TKZOlIIS?eXDr4Xf2~80RwT*187N<R&7i zBfj1%;~wsnmliva9S2rsLgOlhmR4>*8*is1snA56Tlx#;51+Hy4W-dYcU%s&Q=XFs zpm75(OUXNM7)}r`1FjoHm<y!(VoYh5aqXwjgwP|t72A-;S}4|OyDmjZ9LB3POK*o& z<l;K}HQbaXweO8$b#>I;Q}I1{Wmzj`8u%*%Y&Kmdr0z+wtz~HE#v9L8rM(juVYy~M zo;A+(iZ;zye~^{MBo&9UkG?DM^1=M+PWGteZoPyDXWfAB^aY?uCku&by?77S05?F$ zzk@f3$vWIn=`(5Q1o$-jJiV6pRNNhyxK?7$X1N3=A5ES)eNGGo*yJ;v#LNrXf;&3n z$@inKQ<ZJ?pJtmyj@Qq0-n>4xxz}s|n;o38ZWe67s3a~-=+*;Uz}ZK{bJ14>J~*?s z;xqar_aiftleVZAH%rPJh)fu>2gt%n&2mnO@bnF6n~b8G+0QR&c~n6!gms~F)PD0k zbWMtXf|X_SK{!!&#i$^Mu3F{q?$oZjm@a4xw9EgeZxkzm!l~n$ocD)o(R*DWb7)e- zqe_2A13qj(<Y~!vELu>mnZv0{wR<k(wF@~+LB-s|t(gd2du|H(C>p$~NUj_6lt^b? z+X<*9Nbw4DgOI>k(NcutX*$Tx<jL!N(rvT@SvZ`i%i=K4t?L>pHe-fKHxi%Ht3JL# z@)jC}C3%q`%oGASaSrW_SSG|(>#QN6F!0$zVawCKRzdYZupru)HRi=$?eQ4;=j<l7 zdC(`|%>=~zzaV%&&5I3)@n|oWU5xo~U8Q8%=j&T|Pr;0b^jEl6R~%IGvi1x@t_*`8 zYJzC4sH72W?ok`uqEV_q!P?b7lK+*{Q9|zj%mS1YGrMgFDk#9!5~mejdW~9~FRK)c ztqG(PkJwiTSf=NFO4AY;n#?^Hue*alfhv!VLvIY(h^rmT=ls2|dbd0ID@L9@ziv4B zXN><rXfH|2=DsZLrGL5ac&t0yb_M_VhRY@8km;bQQR`lh2Q_(WBLNa%lLbIH8oukJ zUz7)MYy!DnaQrvf!;}kr1g@;oVBq-?0XsC6o=&-uNbKB%rU;0o{+)s+bW0SP5X>#O zy9F+clzQ0>>#i_tr8+88obh!?`B36EJHLMVUfcH?f@H7eg<A83B_`wJ@ITgPf2&D? z0eUs_*@r6-@>%D5C~ex`wM9sx$y=n+A^l{|R8%e9UH4eMHAdM3pcOYkZv4`)sSW2f zN$>d2N!}k3uaFk)c#T~J5eb%5fCdSa+vweeD^ciBu@v#oVU}0P#f8S7jQ_3--oC${ zot#N~VW+Gby#+4|GZls$GpwZ$M(_^{d%sqRAE&V_bLRHD28lW&ymXoW_Bxlf413X7 zb8G>;wam-5)~uZwbfSTDp<SuM42>4?OyGC<uiP?ROYxDIU_t*G<H?_W;nvSA<#qa8 z(U3|?&-4`;x+tpGIutI<a67yi3w`3KX9l?GbQ@08S7)}zeDPqZbKWbYe>;cJ3_eeE zNB3a&<caYg`9V!I+kE+y5+H}jqNDyD%g3N8f<~lXL-umKZ%zzd^_0Y(79n`TZ<?(w zG(N98N2nI@GWhN0b300@E$Vy6lUz(N(aScN*>DJK?z~D{lo^~mJhN=LQD68v-);lm zv9q5))^Ih_!uG#f`PS+B%d-^nfgbpVP4h=z@bR3DL#C*uZ%nM+?jZUrbjq1@(n-FS z!xNRos510S#~CDSJ0@@l)oYhU+o*Z(+{|K3{r&x42j$()(nK6mX?DB)gj%vYPKsg) zsveNDEIp2F@5`B3F^2$-3{KK-->N*ajRb=MmCpi_k4~z*j<iUjIrOuT4V@rf*zS34 zSUI_FKgPw+ML-qzh9Q?Rd8QkzOE`<wg)eGOO{w+vzqqPl0XpnaSNW&~o%NLvTCKNq z8z4@~Uu^!ppO=-Pd<R!)jwghKt&xks*Y#3t!;*LHVtYK+Z8e*)0Rf&0Ig{iB+41c6 zs0L1%QEDR51$JRStQF$MCGBgrLjO^Kp^(>Eis+dp@`#=NMb(_YsuK=UHWkvAGeXd7 zu3mGM0kOI9XM0df2r|KgqPTqwZk3E>Frq+d%Fsat>O-7x?o@>PpvjvNA$4Q$=>Pv> z(GJV+5uJX^uWTNM^f3AVI(p~wZWt5ts5!>77{U$L&nwjf;;iN`Z{`~44zPcv9dxB+ zj59pnPnIi2x!J3@qe_bRIb#AR^C($9u@ZznfswPzTxHMpZm(n`;3+b!{J>~8AmM@t zB2zfzOF)C&3tw<6jW;ggd{!2lhcNh<^`1^BTLeyU2d3Ic|8D{8o#7QbL~rB5Vbc|? zjWUI(QSbgJP%{~}RkqtxLM_Gd9-Todg_F_+s$cdDJO5VxJPA~SLQFFM@$_`}4m?TF zDGul<S!p&*eH8wdTgyLCKOeJPs`t{?Fo~AYvF|<=dUe!)je^jFRkpt2o#}a$hE8b` zyS(#H)j~8EN&%H?<(3SM$Yrv~I>dP7dsf`5>?#rli|8nsiq!38=?Pix+C~5*^yGe7 zay?tCBy}r!=7s;xLJ@o9lS~k8S$!)S^3|~JDmqN9tb{lwd<Ab|Fs0FW#y>^HoUK~( zZ!mB_`Wx&dY?8|d^!iPN9w!m6)d_0^l<t->U&20~kOvj`B1X2}@~F^_7L@B;^8k@a z-cBEz4{Q3M_1G}eo{%j*_O*$<K*kBy?bK>_-OPLUF&ke#*rXZ{2@$8}!}5uV@4|R; zqCaH&`)=VRbf;K*hAU++LWERsVb9Y_XR6*7M{blo!cpVsOuD?GrnNL^k#()UVO&DP zsUT)C_*6}0B7t1UX8<Lo#*xtYl8zLew`kQdsXH~+-SAio^V;-eN{eQEL00vum~Rpa zyy&P+5%{|-;F+%cVZjFWRizywlXZd6<HEO@a{tXB$Q6&J|MP{@`@5UK-x~Cj_X=jY zcj~7Ng{H&qh)+5Jzrhb=oD@<7Ic+N)W8!NvBj^JI3J`}pT@^&j0_{BLLHT8PUO=eP ziN+RTJP`R1s!=TpjdWQl5EzNlBuhUt`XH4w5mpcZ^@|BfXlT=7cde(zU&#3LJom>M zu=5?}?;>OSFK1j8b?Q~8afC+t$m^}Ac?{{_DHFeb3M`ZcrUo=v><^~&T5HqXONwrR z(j(V`9^~Hc&51>6?k-d2)@5CFlXy5ADw&^YFe(`Sr~h0|&eT(QMHf*8vq)=v>ti77 zy_6tY?qas)T=Wvpu>&PH*wQEHMLv04PNTVdje_-<rieCb(33pw*H?5<Rj8K}WnRG` z&?Z;sA+8PLi;M$|#DSgU-#ye^Ro5g<`;?TKuZz7@@@eJ@RhTq`ls^ix7~3eVPUP=s zP*YelI_$tbCSuXTSI%q3g}d>$R?-&;{7~-s;T}9GKV_Q*ImW0Z6VUHTzbma97*?@+ zzh8cK#mUWZ>q>b%-Kb}9+v>-sfs+NGX9($nQ96k}J~B}$U=b!B0*awfGUF<xh=vm{ zKh9xQE5a+rxRdYiT+e!@Amx&P!qO-sXuj>Un>+DZ#E0|YA~q(MufIE}F&J5yFKPAA z?04Mqt5q5VY!L7UwQh&SQ6(*T2ItwXTZK;p1*%qXYC5Y!Q)SePF8<-aspdNeKBARG zL3xV}&niCTO0UK!+5$wb@gItrFVRiiL`@lVMtj`{a>TLj8xcn<UiMJz>A6Od8~4fD zD}L(VJCKJjot}!&T2~inYHmTy%tA2v4QO2DJ0%9SRk0ittkC+#N*~$pkf*6}bej$d z`h?)RaS!m^vOQHt_B$nWH8c~$ijBpNvlIyObM&EQGm_#I%=1epvi9sY@gSafZ3ls% z@dK(0c>@;mNQ&M;FQcMZD1&T5U**`w@U7EobsN2i^HRHEhiXZNk^hs)HaQTfK4^sW z<KV{EEHpeIUf=F7FgqIr1P=1BgWkx@6ecutju4+4xI&ml%%BH6=!{Cd^vw+ZKX?~r zTv%9XSjO~SwM%IO^9=X1vA}|nTQxJ3mc{)OAsQYeTE1h93v+#9AAw3QQMRIkCe~;y zD}mo}y*NbNtWIo&<CSXwdo5D2#yGU<9GDAa*9G;UL-42dhO66_oZ&tD5erVHrD7Q> z!YF|6VNNLDcj$-G_f$!&km1v?wWc619X*e?=NTl<*rN!11<E%1Z5#5-oLPh&zPvO6 zb2}Deq5bOr&~>dCE$>#LW*nIXUF3$%Em?4`cV$&U75hwjTh^$fa8J0Z9fmipr5jFA zQX`rfe+cgIcp8|vd&dZz*Y6vn=y6b<%9H^Q(ToSby#Hj*_xQ+wVlcT-d;xElK~x1L zkjY$?7l?jA%iD|~>QB1nzuZt?fq)mhq0{HbR-z|@ZBl^Ce`oOWc@h@qa6*#}F{3tr zcGU;R;o6y6Spq;ZDYj_!yi*gmfot7%-Gf^{T~OrSjUqPLw6{Kedbn&kx-X~%3^Zw0 z$rPt{VO2EO`5#eM=hdx8`>4;ru-_QtJIwXZ{=Kxe4tpkHVx(WA$uhHBkWA<p%Lbhq zjf7mOUx)Y$mG>48=*yLC#H&c0IQ*`_)moJEhz4vs-LT6V%o^I#Y{|-rZbFn6&HB)~ zRM(CNi93Y&V5bA|8QdepeAV;?e&dyxKy)I%*FdiD7(|yq>Rz^mzC2nMcqK2V8qR~B zV?zUQ=U~{vePfrL1G_CO5HzWMW&zEy;wl|_AZA0lt}sPw^opl>ED*R`g^`ZIID$z= z^PgfMbV0^O#CK0n!z(BWRRIQ0Njps6aq6qpAjnfgHGE0<@mGeIbpk$Yn@I<c|4=lf zYpivmw%G9@4ETJvX@YuBdhrdV#NcJRKlexM)s*B3U*@I{{D<^?nj%5<1f_xlDz}p} zMCb1pI$i>^*I^$!%0-69Y^@_|I$`~X4!)1cmco53v#0v(yYHXl1xnTIk(1a$R+R4% zOQ=vS19lI;vf=1`abN=DheVd)r%Eozubfg4pP(W_(0ya(IF^n=EuV>V#>oBiB6v7o zDHXFFKFY?<+K$0EodNv+dV)K-Ms7bTL#&!A(xRqrFJCB=xUz=^Bg~QcY|=N^`B2q~ z!@1@Q$)WcEAZjx_P+HkSg73Q21#%ww7O3ZP--rqRs@}2emmmL?ETqEDEWC`K6sz=J zA?SPx4d^DqxW-U|*nSsp**8THf-~*yQCvQT?7I~wg`#>q7%|~TP?@c0)ou61dC87% zmL}Nwp1*nv_vj>g_qwT>$U&Z;EK5XHNbVExo7DAn|8sXWu#2w{`w~&`UPll|(3v3< z70^E?dqvJE#tGa%JpS9s9)BX%9b_oI?6UU7x4yc`AIxRluOG#rX6JdmX}YKE6cZZ? zrO9rjh$$?QIuH)Q{pm+0%>p<ATXu+uP<q=KK_2|4S6w}}>E*br_yh9$MG>-;QRM2N z1$SD5I&p$;t+5)-!|k8J6$@GFiqcpd=Z3O2>;B>iKDC>>=>)3(6^l?s{kkb0*c}9+ zxuh|e(-AAQ>xOCAs8&=-keT?!Z=iGe7*rlbzOr>&N=eYAT+C9jJL&p;<TJ2D0>~3} z)O8@|N+rq+yi%gmuue>fYjJQR1hV^TC>FYt9h|Ht{85*Ap8o1%EFU@&hv8#mLNe^$ zJr)PNBC;5BAbvTSQ$-&-Kb&e+jK^N=>JkmwY-Lj_%{3|Jgg@idK{>1(aA!eTIBGMG z<IZ~;n#6`bKw$Q&WRA^4F4_<8qFCgmD0wuICgEV_HZG!FCg)&`rXZjbRe_v0d0Y;! z5=<?po(|0aLEML8z-IY8Bwt!B67XdR1{Mh-vrCW-JLpDUz)#Fb_+M1Q=wsR3Q)GJJ zP_fl;Iz~|ypDqjmp@T9l!dPtoX9Y!M4y>9=H2uYQ`vL;pyMD>AA_NandA>_^EMuJ1 zX`%F&Yh(&|p6#=Blt-V~<%OuhWr|l^KEAMoW~4w230UI|L2)AO|A`%U)tOz3WIEID z=|~Sl#(PX1au^5Teu`p>+}~IyM5l9(m1ZlLE!U<R4$h#bj0BBEN(Ube7y1XvO6pop z))}(Z9zks+8_zL6$aM%=mGS6X9~iNs`uqz+a^0gSY3{9?)J8&#{@y^xJ;jXM5O)ZR zYV4}ECGKyJzC~7jQR{+-&35$*oBC$nJqTR9bRRBqXl^|(^1-e*rZE!cQkj|Te|2rz zd$&B)71#z_-#gs@!7dYdoA=8nmWY1t0cLIu^1n6T7C#s|vHrJ7h>Q;f63~^wsE-*h zzEpW{Fl|J-n+<rF`dqO#XDYBvm+}r%LL%iKqW=m3-?ays9kh}Dyn%E76+(?jb7DS| zydbdx+m=kF56bCxh&^uhzSIAPmfo$zPzkUWTK~tjdVI8?b+m;I>7@T8lCsF_=5+IT zvOl9(rv#;Uc!voZD%)1}L;q*xJ`YYY=4qdJ9j$hip*Ai02>JN?kC>{L83R7P#>iQE zh0WoA_i_wM2OtnW=1j~)Wh=}F(Xy$yF$kd1e79!vTD%M-xrUzk=g4uN-K55>z<tVR zJ}t-i7z!!>DcO)+D&#K!MYWpz4bdIMYFuGhvFZViS}ML{Ojzq(y<L>{6$=Jb?r_BR zhpBd4<<6xtZsKSr`osl8V7rFj$L+^v3UQyAta|0UJdvu7Ai*LC%vZI|GP2Zw%Q-mf zzfOgl{SK6G7=+=vO`Bv($r@ba0jCLANgM&5xnOX1;Is7tFi5p6b(LtYqvYizhf1j+ zl%Lg#TE&IMz}G(@^UOqzxt2%FGlY&`c=jTEkPAGE9uE4{3RX_X*3=cwS@Z`Q+otmI zVJ&U=;2vu+txvH3Ca6FR{nRMOm1*d*D7RZ`$!W(w-UpGrYJUnTsh1im+EGamqj##c z$**x4lpCZN5G+=q-1Oa~Yk8TXtPz;FwP^D-6Gz~<j~dgl+c@m9QRJ0RhsJ$lR$}=R zs~^t$JNbnIDOno7;3>1+z@}tzo4;?;9<j78s+oA+je1}^jT@Mf_r+X8EXZx4@?JZP za0#^hB^rnLnZ?oQxQrC?O;J$T(*No!^u>=g?!`ylWIt{)nrPj;G|t{OA-duRY&#W= zuXfPwlZnl=`UrxN`dcQHoYlUB#P8(YltG85kAJ$ruY9Fl@4^Ud(C?0#&d8h^n`j{G zCG;3<0wFWmk=JT=Y+CSy)}vL1;`0Akuk8)8M1Gk%@X^u0e>>~-b5f+Of4+jyBIPKa z@JfmOrtT!Ei)MTd@K((~c{aL8`p`};he?`$Koctt#<MOtPZ84bMiA1kaN{Z4{AoLt z?t$;G2$upTQj2ue|Ama<)8oC)AGsr;Hv>;o-hn~Eg{{O|VlkDZ#OQx1VpA@=V`dQZ zn^O-d`}N4K_QD+an2~L*YWB|%;jceyE&cQ7H=%j)l=XD<b=gYG5?_a-INTf7eat&K z#=+dEp!bW~(=JUFgcciAw5rp{vR!|&2x_|J{{-Q#QAa?%jd25PEaEvgFX3^n0MqlW z9wC|TDNrLq&mGtjvl!Q<0po}9F|s8^@BfyIEY#A>-CZVxxBuSssRh$LRxEO|GKt3i zB-3JdTTUq&Bp60)j_#QJ{M8L)kMK(gD9VXZY^Tn~|5*^$fjVKdmqs27mv9qyG>ZNn zqy=P$W<wi!z3@TTE%j{FmO6V{T#iu)IN{C!GEn>l=dk4N3m8%k*A_A<7e}y>JBT~$ z(t^^Bo@2o5uY;;61khvYxU982yETK69>|eG+_qogx4A$A0wQV&Ap9HpH%BBwv$PX! zE_T`7wP8|rhW&gWweO4xe)q)tUmyuWCS-h(c7_i$<6%s`YlhlWKH)zn2i$p#=0csD z0;a%QeMOL@HBe+8V)%`av2jGmiW|g<={SuLv5aX#stvN4KhBKEY>cUjR*jU(qNX6F zBB3@aq*hlO4K7q$Qehn{EA2qr8^e5qUX`ch3WJ!Z#=RKlddLlobl!@fCqP<JOwOP8 zfY(Ukiu))mDSm}1bF#?D*n>n7g`%0LTf|W-pbT7Kt!`_65Xi0Xwl!)OiYSoWR3nL% zZp`RS-`}0cODnG|;WX&~9hF%5pJ>BwoR`6whlYO1QmPBxuzk-<Km)_G3pVM~LPT|r zQ$YjQ8MUpVKooqdCuS;#;c`aAz!lRFT*GEC=yjIAv)V3{W~2<NVNI1T-G-~_pk_o} z!!xSSQFI5L)OTJ^kHZfBNVd6K<!G71=05X=DGCG62_e-XeFQ>+xDpyBnDWmlJTInL zTaLmT6sY%+YX|Qol4`)<ql!(r^Gj<9*IojNkupsIi3gdG7sctB>i1b7Qa^or_k{w+ ze|zdQLDjpxkuYM>_0=*R$D@{si>4(F=ylO1Tg6;*ndY#LFROTdJ`c$M5dV)mGq$JU zAIV;<hY?zn*_3__>gF7;sY-c;nQu;81_;BT2+&VK1nrOiFg<2R8$oh9iuB(g<`{H- zY=lSQWyQK#aT596Fjq2)ZC?L8pHe?>?}&v(*?GU_GC=9|6gnJB5L2~w<5ybRn_G_w z@(&ll1Q3#gm&q+8V(MuP$sasgcN@>y5L5+b@btD`w}qkjL&GdvVq>p~DqpFx_J-Kd zbHgg88Z~&?kL3YwaeGg9qHCaXzy?CVJE|TO7o;d~G9x&s8da>k(vSK)o$9R24_@by z`=5uees93ae6>{q3hy6sRDD*Y#rrXyGy;ojj~mG7*^iAkO3o<>nVlK|9e9A-QIIev z9!=bDW+f?OpBg=1EBN+<)(NHE{*?1e-(&y%`c;e)5J>t{N6IU$Kvj@OBIH7fa`U!g z<J{XYa-`bQ7?aB_Z^cWMNNNM1ojz$H8aOdqwPJ#g9xoaMeW_DO5Fe0VbkCPNiiO)( z2`C`#_%t5E`$&%E3-?>>RrppM!R=BOTI?PYzzKs&(}dfW)dzDD;mhqYIJo09?u|0? zsV3}m#~3yuu<}sZph~APx94;96dhf_*4z7qeWZ8MJNQuixZ5yEgofp0hs~i?DM>XH z<({i2<IET<a60lAAIl`kijvs0l6szeq7IfA7%)O5Asr<N;@6L*`X27{=0p0*9gw#) zmv?*fDcqNpeBI89-2Ep0oBv{Fk8DDkCEi#2+v3^twvb^=EYM;HJ(Ig%H0EJiFbuYd z^0&S38!$TtZfC58m1{x?E5N>i3X5$W&%DQyWtn!r|7YGO|3`n-vc1>fmw*zD#ee?< z!ClX7cU<}1-6roUP69l2(h&mzi02WUm|&W?Jo&u6_pQ_N!jR724X3}Dy%JH-J(waE zBazr@fa}j7>@TnX&9E?4dx?=K_Yy>bzO>5uI~mH~!zYDsf~aS`fybP*bnSSxdsu8? zUAQ7UWidE<2x{y!xpMf3EWmR30jxm&&Y91Rv$2J^2AyvoMA;~1e4jr%N*LXJPXe8w zDaU~#6!twokZaY%$e39s*^__>^pkndE`G?sNLO0gcdk=b_1}AJlHFCG653y*+?tZ` zwP#z7^4l#zisPhz%^8tyJQYUX0KGJY3<h+^j*VY_Q~9b2m_GDn5feV566Qdwl3u`7 z%<mk`LnfGOck-+`({IjPfhzi7NYu;H^u^9s#pzb>;uM57BL)}wSPc^XKL{`>$x|D0 zr60Rf)y?pgF>IusO@{7hnE8;T9ACbMVFhc{v-epI>2huh*cvtO;xvrD@GN>HS2@am zS3ouE;WAfO<VvhP*k_ugytVpr$3VMy385#+PVFqj`p4>{?fh^f=yy4qD}TQy5Y>;* z#y}6edVnxov`*+aZY<ZCQ4^{-Xd4}Lg^Ku%_xcKoJ4T%SSo^ODUk5MH*(~7{dh&H6 zI#PHG&>}VK;}<uaAPh-t#ysW=`@^&qWZOW)yn3JRTQc4qqf)>g<(q~Mbf_8~>TC6w zRicP8dHFB+B{i)6E^XOn9N3&5=LIx9Q1Qn#W2043W!C<_lm8v~+9evEXC|~DA-vFm zW5+yM;r!3fyUhM#np&h#%-CSijt_JF;O3eqM1rn);cJ?;*oD_g^3oc*WvpoG$xV|> zSZG++kUe}_N1;<r!14)Kmmlv;SSgu!MWLFrAAz2Z6)0s^#{F7w^;71of0X!{)e}F= zmwUlu>(tgMGQ{(LI%_(WHz8E3#3wY~b-n*smUjIc{_-;A1Mm6#BZbL!@xgSq10k(q z9oS<EIP+3xF+L`<E`OztHx1^)3=(uvq610e>{`j`w;&7kDZmGU<tz^BB|U{HOf#g$ z7^CE_Fh_(wUNdBX*&A8<AeQo*fwp>at4OYK<o8R&;w$W7KUKED*U0K;gz?i&69&YI z0}Y<xur{;y#yhWMX(Tp{1w$D$rSSiLjW>>}ys9M2-v$igYQh~C4V6I@cDo|{5%f@Z z+UT~j&Bm*5EgLkEp3ZcJ)VndpcJ<M)qawH9MRreQ-%Y^=44yhkf%53hc_cqqvMDn5 z)Mf%j8)2vm*|I8-n{${a*lG$JpJT2KppSz>U?;kZue1h7%a37%OGt?py(U3!DGx5C z0~elvCSbWZn{7KUKPN)%_>aBK1+!@AX#&6Th>C6qoU^ZuAM>M17U$Izv<i3m_-L&% z$ndmKUrQfKKns;nc=ius4^L)kFVrUGls@RFukI|xx?P+hxILt=gL{Qe5)S12pq|!> z=vV?c6kHKJz{U>Rt@}2SKnWAw@&9R^!joXIpVrv~vS&xP-PKohS?}r%sD?&LfC4h~ zHi3FA)Q;P7yYqQm4YcF@OMAVctyOFiOJ^xOE!F+{lNJSaMKZtY-u;aR-RUeNOQQja zk4<6f^pwe3dgdXUYS!!e@BOn@UIi!eM@gAgZ$HAgWv|;l>$Ocf2mg9}14NtexXx+z zf|ZhKDyLKlli+Pz)mP+wK--B~T;@c~$yzpu=9bTHOGx7#-(4=WFA}349FA$x7)1m) zyoaD|X-u7z-$@u?=kQELnB9-Ar3jFIk&+12yuw+3`a<{XHff>Xcm(F)BTeF@Ktf&k z^9Qph6Q8TX&Lg>6qXI2<+GC<Pgiv8}o)u~=e&CXky(3N~z0!E3#&(gS^!;3ZzvZR# zA!iAvUFqC>D-&WYV#f1XV$t5IUP|=_H*_%?sl!xwBASp0^`FLllb)Vf#{(AP6~Ijc z-t*b9+P~1zoA3_9Q2!@F)%iBy#t(@>&G2;?86OvAZ1Zfd#O9nZ>?Rkfb*T`HNu}Q0 z*^Ge;S}|4*Y<b<FZYY#_fP>%j0yNp%70n9@(6k*~V^}&6G|!kpw}RicQ+FD;X|Pln z_2bIHJ~bu1hBzTw<cwNa0ZE~Hd#96$<FzMH7LmqZ3nW7yClL6;4!G8Hr*pJTHh9IV zH?8I06DO`5cR+H%D`-|(5_)<h|0wu*Exi~GFbSWHA~v7&mnHbf8ExDgnL@wxeWcSh z6!ETHk$4-?C{VW+1FUG{IRPA2@-bt^uxqp(8Kc3MQnGetB-DD#2|*zgy23PaXjV3r z5bvL^2py@y(s~3PO+**8{Y-sq4bpZyno7)W`i%2#{4s>rZO?ScLWl!0M@#+4h-O&? zNPnV%fa^dsWyzN@W4{Cqd?AC}sKSA9NoHu6Gqx7jus!>|JI0OEMH3<KYwE~{{gYJr z$VLC+t#L_U6#}s&d?UaOA)?Fcdu)ypz|hc|39oX6&+q`ud3{h`pIABJ_&f&zudf6H zV$=tsB<5wxc2eK`XSH~Y8U5W{e}EMe$dWQ<`JYxuzbaz=mfy8EaHUsHVp-?I^_LmR zR;O1!y)vlsTu`LqLBM~z&{+j+CV(%PSWJnVwpaQ)wO#1T_hcrF5dT`2G4O3FSROLA zb#i2IaQ=akSVuB;?4j%7RgU=z>5q4NGAZj%o==}SJqx+xS?^3JxK~*JQ~sIoWv~|W z*N`90@y8+_y8xMh8qblA-7cx#5rbW}SJBxzRhL*k)MKNK$5kd`-}N5JV#W-mpwQ<d z4i3IskK7>F=2j@%gIdCTy2N5_vy%w$T?6YFA0;an)!cj(<c2LVI`Vj`9Pjps|Drh^ zc4L)zIY2a%q~;4%Cak1LRA~lY=EBdLnnu&QHdo^m<u^uwno}a&X^QJG90(E?BL>~c zG=e2&Nd8{XJn<lW=SAfUZlnix^%qu?PD#=L`mGP|txLm9llUZulf!%xyazfjP{Qh5 zDO$(R74sk%-FML*An5e!m+ku|HBqB&JZrh0@D@B~6;v|~P_N*YP|L+;G(SpOP_inA z=erM9wR=FoP_=$xxuc>6B9ujSwvN`GGsAuiREP%>xGs#yHLXM3hg%UX`yz<k<_m(C zrITV++e6^_zeNhEobQxC{H!li-mgB4RTIqfQc+cz<^-&<4t>TXw=tt}cgNJ8Kf)~| zF`m<9#8KVmJbPbj&Qu!b4{#XkF5cB=isz899g>aya+z6JBXQ4#I_DU0;;@llq$bUR zA;>I@ZYi;F-}~)W)wp~37YMT(I85%39<E0q%#w?SA5q?~MvHlL@xLu^%?fPScd=T7 zx$+%8E{n(4Q5augvYJR6tJ>UIHSTZYj2~;JryZ)MpwBlVLuv=_KmyXm@lEcz%72Te zJM2@nUZ~kk4|{U+l>^i;iW2hjyhaW%LZl&3NmI5xb`QCQw;p_xu$34dv})H0s=~6F zbXS=r)WLCfJ>B~Zlr@1p8P&9#AKL$<zj?{PcjyYm9I?&x#}IN4jppHD!pWNYXl!yE z;C%;gri!b*6@K*vH{&t<EOH@Cm+M4-M9CFh#SoA~7L0>8N|q!>c1KDL_F4%9>j7im zFLe1It<X4FrSiVp$!Teg$&4~Y{VS}*3@EAQ+0qMM6U6foq?=m1ih*o5me&S4ORK8r zCwUjD=Vg>{<V3z-(CKg3V740IC(@RG1A=SOqMUc)!P(hjp^5033;#F8PMxs?KcI$J z32W_1{GaLWs<B&bHNuNiJMI2F!4N3TjC6FV>>4lTzM4^8)t?yDsj2+>rQ4q&=it3C zxC4Ff80-TUTv4HvG^B5H9w@UEV3*D@x+mgke6F53I;>x@w@fq`PW&_pchZ<VSIrM~ zb+oB)v?`pHg$<G@tge*kdVRx?Dgw0DQBkGuCU!qvuHz#K(x{&Q5Meg39DN6H62Izp z^|*#wM3Sk4E>X3n2T)+`g%q?k>w3_;>bU8h{U)=MLrbs|>Ya1;dt^Uju*y+)HpKdM zC>7Z-<Y`Wx7S@Is*0aYJnwsE9hAzVui(Syc%Zvvbyi~mK<JFB$Jt6S*y$JXHX>ILV zUVDNZo!Qo1FO^Sb&srVpnW7{kE8L%xmDfLHQ2o@WKmL5^{i!wO4H*GhFQH!#5Mi98 zFGXK&p)B)qoiyJT7r}QIxm}NUugZkV1tXOB2G@iN>7vWoS!bGEArs2w;#Y}Rf2j>+ zME5S~KaZG_KPTEZ5B<PhYi`yyDA?pvh2*S<ib9jSl#!{}yHaEAD8VWo4r^ssFQ57s z&cydUJ(^>As=P<Y7^d>KU?e!m&^;ru>c*IIsP+tF1XG)r^v+i|RvNyA(0ky*RqjC~ zEz06Xkdksf)y0^hn#mAXOWY}>bn}WYt<GHbR<4ppl0^+8$vt)HY7JAPjU{W|$(9W^ z=li{DPe`~YX~{IyW&rX?XOwR~MzbBtSh5THRgII2@im`5k{AEb`g|a6T~QE7z(J*d zwVmF{0Syo@zles+TOgh9B-D0F1967dtJv#=m_|w4^JC0=R$_048VKMEBlPSf3Tc=D z=hH5sGQpZXij&_8zm5f;8kiE=Hv!nYK|!>r;in%8NFlZR<{J*d#gmK0nhRuGu^`$M z@oQBr4=`br15W<3Ryp?`%Dih0R#rQo0``_+m{1{P@(fr`e}avLzo6y*-n^yZbjSQO zqXOP0>aWLMxlX|zx6YR8YD4<*1CCnH>U>;lKK;M*%I+}#;)&H5<tgE$K-CJD3;kOV zHQ#POIRD~co$36VVF(yYI<RVa!pUlDB+-m&37)Tl#eV;icZ*jL<j9>Bh!dTgJHn@; zZKwp|teiH=v5o7u`_?*>yvf)G50oW<adV^qtwV&WCvSn3`w;SJ1bowhwY4^~wwSCe ze_vbSR(#m*O)9lROf;BI8^4wPHzo$o%tdx{x}5F4v`tq;=~ZX#5YsR~m#`Hs`%utG z6ff0`GG%O7+f>89PR<!z$>xfSy?N*n+UGA5O|uLT1f{mQLm}6RpDCK*Fr^3fDXZG4 z+mm4r*l`5Gc}@2%)T%V+{_mX6YKF;#ZiFb}c7<X!SeXD@t1{I%bgmu;>-YEQZq9u| zW7+=v84QKyA#g-7fHYkv0X6->TwgBOp$@oExwIlw6NZeXTuWEuz*c4FLUhVi6#*h4 zfzw$;=}|{P@S!!KD<zms7w0Rtbrs>tlK&qU=6I)RpQO_JGTYcD(+yZ(<xKqQ2lvbC zjw!~^uq235FfCOdq=CxH*<v&?^PLC2=?S!tQ2R(D1J??`D0J4;E~yylK8ykb$^jOl z1Eg%O7s1h~rym3u=sRGh&|8Y2JbDFWzGdu|nwh`iAHoft9R)HQJnRJ_bLAUV?29)F zbmb?mugdL&U&pw7O)w;;fi{a32-d4WDVsnVdzMHEdoUdblwLFNq#eXQ57WxtM;_Hj zQ#63DZIUTX8EPK8Xuz&^4YqWjZvV&?i{bfVK2ysW)I8|^_kt*nK2h4dNfUEzcenox zf>O=qiztyB|H;v(6geaaFyT(Cf}YcM@v1bH4!OV#b(-_;oTr0W>#1!jfYE8$Xx2~# zR_QKNLDJGmSKZ)utBS#yK+YNEuI`svz5Q{o*~(4bew57Z1lk26-x?=}aX?&i3Gab; z{P=x!Y0{`$$hXT__>F5*tUuwM_GqJRD>WLA_OlGeTKyI$<AR74|KwzA5cE|1zEt+Z z8W^q-%bdwTTE9<irWO41lr2BcI2*cF5N>NN8@9GeThdRkBM%Q3n@Q}<JFv@->IMcP z;T^>><L%KUMgaVKD(3ASrc)xs1|#R20LreH3-;A*Aj}xbtrCC9&wAbn4De`)X0u%P zX8+uJZF{M%8){%`ys4HCZr;y6^rA(&isg2VAe#XN5D}ea`P<H|uC2%McUL4?){cA6 zK{FvA?kE?y3xFBn0<9*tw`hhrP{2D=o%0(gVVE63()tikAk;ISi0+hgL*#Jad`&PA zbxFWPjHEOnF3S$SqC1BL(K;vO^GzEv)yV85b=5l<!w<%gDQh&OR?A#Ls>7nT&OgH@ z<lsUKVM+T6$=n9s#O(96nLmUbsO%cS2*iSVS0wII$Nv+OCUG!p<{o@!D0Ov5V{TL- zU7jw51srJNdO?PGH#J)~FVGVoijSkO(uO2ZL*0H63aF}^unl`QzjGo9`8fZ5fhwo8 z0CPC^M&n=A`6rYSUNSk=bKriyrm|~L^o2*I72=s$B#DQ@abew*W@RT92pHCvjpL4l zxt)V>kbOp~&g`DST@F>>S<IS)#Qg^LOQ(r$Z9udt<{M>A<f2I5Vt_n*g|V^DB0SSJ zq|lCyQe-b)+z<wnq9?+^>nDB7HUE=K1yZ*5)>e?rCkDq!%|ZV%KI_d597<QtS?%>q zE7pkh#y4F%yWx_2VriH()X`>xxRTuHVP!+tsfk;kp2A9u@~~(aizK($im5<73Pb_F z+6jqLu=nQ>yAL0RiOkQbBMRpwQNguF`r1bwV5pBF&$93<WC96x)MM0J@w}E`O%*vx zjk3cc|8RQv`4Zu8TEPcxcLdE8OKQJ&4vt$+&mVCYy05++cp$K{=g@BG|5r)^f^m^J z`uS&j!(NxWdC#Q6G%T4fpR3cd-G^H(aL6}%Xwg|BmU-7)wwdzsV3WH@(WvY7{mD{` zG_u*Y36VN%SW<41mP46#R3Y_Y1$sWU$1GP6^oQRECXG@Tuo&D=6=56gxx(tiy=Ker zMBB-Vm&jO~@U3`}y0n)-kk7xBUac@1bKm}+z^GoW4pb()INk&|CnUFabgD>)bInWn z@oK+KUt552Wl+~w>tF~{GKB53;<&`nuzULC5Q3(R6?iYX@@BuL2JATVSs^~Ozhc_q z`t?Sb_Hk3vNh=P@JuPx~J0IEod?sX>S9ve5sT8ceIGsYC8z`szLQ%hEE9k1MA7S(= z0fjT|MkP~51iY(vK!{?U;SNMnKy;alyBUIR1VJ4pJk+!~&@BzrV^s6u%MlT2VcVcv z7u=jE|8I5rHOWfbAJ@TJln+!_?|j&%o}`O7K4#ckX-<paCLR271q~m2KDLyutuors z_X2k9;=c(|AE*9V3~-k8uh2nnu%IX!X_?~`W4bc^eD}qj1%MWx2YiPWAtmpRktdCL zE+j=X%J%4b^nwj?L@MCqW6odTZ>eKLVKU5;&quk-Uo=mG?Fu*fUt#nGd_ZE)1>l7Z zVq6cW;6cW>3~~Gz;mWN15-A6&wG!Y{A)A24k786S2B582Vg~Ww$*Y=1Pg|FCSsO&w ztzsfKPAjcw=?BLK0k&o50n6O{uesoZNmsUJ_PP}{!(bjRFc-^jsjuWU5Pl>FrDLLm z=IXb+k@li^i+Q7IvBXsb_s;zKso*#YIgG*Vj?;U0ux5X~jQXf9o#~-Cr6z#4oy#aa zX&l2y7*pbJ(n%$?mziYoONVsryxp#8<fMsc;-4+3Q7B_Jmn{8%i|1CB7U@f;XTWZO z2@X>MQreStWMg8QZfK<5i9um;Qp6@-7*)c_BNR+fU79D7Nr|MJob=A_27pjHLL4}h z3E44~H2O<tu%}X#GRc^8MdsdZ0)VWbg<2*w_S*Eom@M-a)IjhZ<Fo;*N;B4$QWm<Z zz6!OxeYcbDJcRZeTm(0;efl(cu-xBpw$awP$(yJPl3G!Llv>9k-A!F@o6Mux!j)6g z@zPB1dn-*=ooy()q9AqcQgqLKn<Dz17vF(Y4%eU}Y$BI7yuCkmM@a<(EYRL)^I;f= zp2PSN=z2PFLA_#i3U(2k$0GZ4wwe+bR$j2!+lMa^)0c3b@jJPIe2gE|Qg-->@((34 zpzRyQ1N6lQFR05$XFWT9=>!MP(JE$dJ`OCf1^67^i(^nz-j%tEel*QJW4YU<3CLM^ z!B-SOfopB4T4uboI8_;iWktY7KdtG*a>Jt@hRuf<ty3rri~Rrgq`*I#rJ2;~96&U9 zR%gBgssAuM>$OvPAgqT+8<6$X`#ms?fH8Uhu~6~5mb_aH3j`LZnTY_>i|!0_8<Wl# z9Lu)m<w^K(cSv93<Dil(ljLdDmfsIbA-EA`QJqco0sl?Tqd`cZh1_~+wCY&$2vh%m z8iBxH(%U429bN_@hf8c$wj$bk0Id(uZ}z=v_|hu{nz8-W%ytFLwRr{ReD6x)`wi3# zH^$)$=%>@ZTFB#&ct;c%-UfZqNqdQJN*dPWhCQ#fnJ?!wDzRtD2qYBqO(U-npw5gD z0}7N!*g=AkbpSdUA1ZMytgL51`cT1q2jLpk|M~PGAE5Jcj<6{HPsyrUkcV0a)c?f8 zETsF8(I=zN*_AOT<wlgAT5sYz-akbV+2?OuYM*1ySr(I2JcTF?Na?Y6Ht=p6^HJox z`?e|5&i|lt!1B@S$aJJH)$22S$2Jy498ffos$~xr$hF1X(K)hWC+Jw=epk;LdleHy zjO`25YP=h^8d%5W{{1$ccB~>NxDUBg-d-H4;Ee%pF}M*5WFcC0gX#MVB(h@(2+vQk zfEImZxsk2OQumfz2Eec->MVE8;~fxaOW6*|$#tBOwcVzwaPSU<aZR<O$0W0-w)%ur zGjbeFAPFeAq<dW4m~P{gUCH}9Yh2XUvc|tY{Is+;h>r>HeLP2k;cV1H_M>Au<4ODX zUTD88$fQE3*n~aK?f;l|-wSd~I~Nrgy<I7kce5d-KQ4dJvQ%~F(%c-TET+7c8s61+ z>rYN$!hA|4#k{VM3fIAbtby?~q;!4L?C=}@V#-a)n}RFC5~iyBpKF)0&%+}3M30lW zqYtHYzR?6nTulx#vylO7pN7e`e&pxx?JyFt5`)?4aX&;0?yPl8nzbr<3DRQW&Et7q zJ70+=asQs73PO?zt?sMe2a+6U$WnEfWnm>uH#lf0n<msfKc$=F>)~FA);a-7H~!*< z;*6Ur$zTTfNS>-rYLZpo^$QPyvlje~!tx+ff%a0Rb}~Qs{S0~CK+L*D%9UWi6Sq#j zPjELnra!n@^rW2l5r#@e(NPOD=u=^g@cwf9UK_yXuV!p~mJj38b=Jj5;x@ivJ+_A# z_fstDI}y)wL6#{r`@=S=oetNCx-ZENLxL>Wg3}?iX@efsn+wupFBH10g#lP#UTmJQ z^qcI%U9}M-Lv`OcPr9RR-~xaH6G>7Z$`<gqSqV)~6_k-|^j{$+yFaynwU-qT4$9-h z#>IQK6XA{)%iz62h-b$+9D0OWNh`(`g*<*Cgi!|qx=5!juAyz6aO87_wJRgd?QXNG z=4oCq^=ku8<1w2Zh|WG=B`9)}hjx-opidj6JIDR9;L~{<2k?u34M{v7=d<jZSLR68 z9<xyNk%nG+inXCVtA<%3tY-o0$fTPZsJcnnj%Q)(XHLTWNIEVu{2**fW`@3JScO)T z*9^4wb|~a7!(n~<491RvD-+CogJ(toVQV+_IJ7-vJy4^2!m;Nn>F*{H?)Cr>Vg5qj zC32L=kL@WBPXXtXoH*4&31!f8Zi76NI9b<^TRcu1jH+K+QgusLHAs}U)kEhPN_Hly zWscy>K~Xa!u9-yTY(of5*mz>)kdH8G$)fukMA6baH{?2PUN#*Zt6R4whNr6syKO0A z!j-!C^4C1SJqQwXLe!IPBrUM6=t4B}6Dg1lh*rmOwl(49(ZE38N(<5{1w=8|L&)>q z8YuUY;%7c#%={?pElNH|8Ni^wT__qUOVX-%7VZcK9>w3|{aMe-Z5987Ts)cVe9w!D z{$>!k{0ix8L>w2zt0?j()EuY>{%?=3FfL{Tp_OU-WP3<Gpn=npX6(=vAvH+!Z}z~} zopq9ZkBWSm-lqPOuel&?p`Lx$66g54_ay5evUr6N@dw$AwM?_MhFHYq>_<@kgD*-@ z@VM`9B+QaT_p+J3;ET4qOejq(_8TC7*#4pw5#;kwH9bJ@TjzTBsH1MPGfw!5^HGp3 z?f6^AWU6?B8-*Qq46}Q}0;r%0dX{BklhV0VaGQ;J$o|?&+&Q&#x^X-d#sUa}u{w+o z{7n^L-esY7id@_=*}o&x4E0)PkN0OeEK%yEQ@iRX+GFHF5(1%|Brc}wYx$X$)MVPQ z4V<|FBt*6_!#J3xGb%M_jXFg;LRmo|l0ujp7-bN#&qeJI%wt1`3hWJjKQzKyGhsW~ zTW27sH_WMEz`o$Opm{42+TdR}0Y(cjo5!Su=Kz|xN10)|aW*v>-54!qo<*kw_XFs) zol%=~X%V`uKr%Y2R;PZIRq(Q(rgFV`2t0|!37an|!8>-K)18RoEun!gWMvKgu@6Y? zQHBp#=f1(|GTOPczptE5Mi+uSe_jjTDN?g?N(Ll<#L{Y;y2?=b)A)c@M9n$alKgPK zO$V+GA3H00wQ*2Mz!BmQfAl}z-pM6!p9^Cy%@5QseV~C43Ys|{?4EPqCi#Z>ay$6& zD$tV!07yg<XH*|!-a&SjyHal*W^g|ssj+k+@E!2IZ>YTfPRpr7Cs7+N#amaXYmdVi z2oqaoziLgp^t@wcSFc0ZEf1<YD9sqUDl8A#H^evT4`z>XAV~vR*1b|9B~v`Y2mTp> zht;2&ErP9j?V6qksN8t)%*HdnX3v^z*%pWfCEUtkftsxh8#_X~#31J_>S||RqP($g zrX>f_%+ru)L@ssdAq4fe_#*_hJ15<<-FJEL>rO;qz2xVqaH6JTKrHAMW>lm(MN}5a zr`f_}z>1Z9grUWES@{p5THoD}agI!3)YNz${jzsK!nc|MQ7{#Ghsu2dMD6DF99|LP zms~D+iD>9giEryPXkewif%*bL9IBZ^8`w^?RfmA^PrGCz*5LrxN7bXVr)_OgUUGv| z*ToCNl=COndl+^mrfl#KnpH}ItbVH>rVfgTQB*FGaYxA-TW4Kb5lfjs9B8aP=0NmF zp}@|57i{?tuS3h-8=wx+_j#?}ZD6BX5L=!waTDBOnP<J)VckeZDIZ2m?}94-UCC|a zHLlhDijjo}z*A!;7tvez5txIr3+t6q|0%@F`9l$y<|tzQKo!?X`K;^H-AIwNdLaEq zt3fRENB_Ul%v@q6gCEb{QsGBsAUyQ==tT2^d-CjTI5{__Db{#K;$$ZKDxzkS@$V-+ zy&V4<-F#a;*+FdslrZoJc(h)9>=v)eLJ&j@SpvN%87b_u&#{%*8qgb17!jh?Pes)r z_4PYC_oX}i=?mooz&<K-G`4jTer>ncYa{Hff9y2Lh*8;XdO8()?_%J+i)+UnBE=Dl zbfx8aDBAcdZ<Zf_(bGj!qjA}|z>PX#jT+{qD)sf0V<L^Yuyw_lBi02QuzV<3b;9Kt zw3YI@YpPWG>W>0#l`5+%&JD1iBlpwqJ#>r9tG_mMxMZUeH?QX{Hm@U70xA~e-8Lg! zWCa>53<zn@2V98V?RBu=W+enRt9ppn7(Rj7$OZk==b~SLw&BpJYccjECwVy)yAFwG zgV#b}0DgOaTyU!s?1ndz-9DE2apo(iYEG5^u6H67HIdsjTiU&VcYG(ku^I&kA;D9U z?R72rv!bEGu#Q`TcP-OwndiTREE5qMv=9pdv>w+RLRKfT;nezlnpcQhM{z_hmgRCI zF*m3sNlrFge@K$gw}15!!|}Z-;+D6MCdeS65aK=u<53Y{Qz4sqQ9(0Yvb)l3hvEvX zZC1m}eJlLv3USP}+{PM@tGbkctQIgVj+uU;n)0*;g5rA(VeW(f1F@{c)7o@*_iWFV zyzm;*EPG2&x5IxmOB}UzvrkMxxjUP6VvB^R<@Dp1;vrLPIreMVeOVmlglShL%p)7F zXHizwoeL!jn&E%Qc#WqFK05%4>qraJ#rC!)a(N2oV3jV3y<v2A+>x1$j8Ep{RPv!a z#AzOONAH)}E?E92OEmtuP_><1J7q-Oc@~hv1ezYEU#lZH5KqXE3sXMFgRsbeKaDiJ zN^Iw#<|Dk~m$mizP)YCFovzlpH6nURq+5%_P!DKckN;sx{^;B}SAdee|6MX=6r38Z z>O^%#-<h@LXt{n1i^WbGt2|j-H6*lvMrUfSL(Y!2Bij4%;4!l_&7(>4f(DqHUx=z` z+c!OrwFDd5vvWuYTKzwaodY`#d~+mDHgeq?;m`cYDxb)WNcxOvbV8wC=t4Z4LXF}W zUW}TaQOO;O$J6*HhR5KC5t(>~O70y#oe4l$J@4bHhh`c=5i%a)iDBg*Q3L4V^lS#* z=i!}R@Qj<LwNyvpIBhkC3Yz;GgrpO_u`S~Aq2g1Je1+LMhwItQe`uX_sY(L9z?1Gi zt;_$IkJ`E>um9PZdbAA6)!IE4bB8CMZg;INME3ZZ9)(e?&uPXX7Eax_AI*`x?n0`T zUvYh~1=GoTHuf|#b=gB?>8VD?GBN}-w%o4mfHAM~l`$EG<5h{WWiZGhWd%Y|(I@4^ z=S9!JRmQJyaVR3p(H6Q<(sOKV)|tj|&{T`HQ1EELS7-;1rDZ#Vz*;>1HSBrnFYK)I zY)|^%@)R>}jXbMZ0t(XJpz>pxf~=IA%YLBe7)EYzZcfp&P!=7ywI2cR!FYR>1rg<u zcdjE~{7u<C)igpm>&Tbta8N!xLAA{p?9y0C0>mB&Uc{r32qTjqJ^m~n4&qg{7|oq~ zus2|9QdF^0%J>D9;dWq)5a=gsuySN!pW-I{KvG#7kZ%h9Dy2|v(?A9d)=Oe>Sib&% z%9ef8|1pdB`u!_{z=Wtd9yb}W1LlMxi)IxTQv^8$8eAAMX&~>w?tscq0m4;Hcu)Uq z#@q>SH{+nr07XE$zmk_AM~h(55F5UcjMl$Xlq|WIDt@IV9wXFQ9;?rLdv(!a)5i12 z@MwmMr?7wu)rdbN7dy!#6Ix6(c(CTUd2oW-t{5GU)Dcdgm}wU;1r*VB`}YjgwkEoj z5wfS}+2nY};7Rnmx7({gN}{%-tE86&#|5m+)mul@J}0lUanQv=(e9^bMt6E`ThCfJ zxhQGJ0n+fI1&n@2JH-UJd@Mtv7ey3VOL3SSsC4#vPN`8!2K_04*#SKDkKt;_xIG36 zDq_{jb>%Qdpw;B?|0j%N%FY!wih~z$q%zOJ_ElobYwp7nRVS-P-)Md(x!Yyd5RHl< zfuh-m^*ZGa;<SZqV8K;crGRR;m06t<v9VS~uKkzXm$})cal5qweTvuOoK<-DoW!u- z>JY`L=zw$YB;+lHWdTd@D!^N`F?&}@+rrJV#JUK7lBliuiFEiD-voODS+^xlw!1um z(6WAcyHDp!c>}dsxqfJ4<*)Ih>)IcPP?X7m?m9%rH<+D7%vCqp>B(Rsnf9m+L}*ye z4ah9%ucPw{$M=JE9Nz?Q+Ipyu8D=r4D6M||bHq?-SRT%A3{3<fKt6FanZD@tcg%2B zz|WFuSLck`Tna)N1Bp{aCILh0%+*IWil~V)VZ!kOSkIO#Z?nYm2N2i^jGT0czDKd1 zGl72%YZ^j$l9vYucXm0&L;bMNpEC1@sTTdVaS|&i$Rk_g?Vy=*jQ3+WD?d*oqn)2* zu^*qsK+4IDC^S3OnDY92@0438EG+GspQ6FHph{3wTpfD~Vb}+B4mPIceIV-6-7FBy zrLL*Zs!eH;^Ss{4mLw`d2Wa;-YCcR}K-bpwi}YC!hw!UGUbcb<S~Lu%$LSd~;#_|D zHJx3f*TAmkC9?F`<ZuPEFym)I7X0IO5jKPZ^=!=lZalV^uu>kl_4~%YH<k%GVwTP# zeM2#|r}q<Z0~e@k1|`X(5;KPgr9WQLnp(r-*%&KAR8M78pij*Zj_A)9J|6?c4hLG_ zkLGMA#P@+_Gz^J}7@Cg&=naGH0+&OtWi3p?O>GUILaNx9s(6d~o*?U@fy!DQ^|o?$ zSDBHv{$<U5*&dss4*#_G2K|0TLS#Evw*9xqxpX6F)=PC!T#MdSy=%1}OT}3_7i!jy zXyp8a-Z~*R2F#h+mcIvG?JAz;+8yp_6@0Plv7!LaqTvuCnxzhH;Y7rPt39eRQbt-< zL8N3LX02T|8M^TS8Df6W)UZJSI9#Wy;JqYnqMnWav023Kn&*kcB(O+E5gV_<%e<JW z8I#B`%9y&wfcvDL=<$P`B?=^5LUHG!C*SpFN+Q4a^TLe;msS9(EAvC>Z5YhschJN| zZ83$3flwP==51;&`XRS@*+X~@L8&6k-(wIZGsog{_%xfx(9IfnY8MZ!&B<NU(q4Gi zM<cGS4ae~fVigy2Svh-vlW+muTF-pQvik2#D$cR(I%`)S?dxoX&Zs_faeWs9fcdY6 zp2rR&<N~X2@LQFJgY0dTJ46~85!litlJX*$`ZqXqY#nkPxqd5-BTAX`V-$B~Uhad! z;l&(a1?^uE-Q2g@kD2hCMq&R~8Pr9d(7Svb`7}Nz>+A1D6G`t*XZi8tOW2l!=XPmz z_N{Ec>~qhEJ?yV}kZ<{SmZ4#wt!js*yE22AVu~$yLLsG+jSq#;I1^CjQSQ9P@5jyM z-o=Nz0r0B!-bxl4In7`poa>JWE9VL8#8Yi#RwY!X{Y;BLA|_<SA%HfiXD}3&^BB=z zL!$PrDb-oaI4&_|8pPRE3t@)fr4_x?<u2nx6A$tpCAGDf6+j;ypga7E20L>|)wqM5 z#*K<ax5Y-+jl3$fZ?O~vH-{X893sb5t<G$l89O=af(6CFDB@NNg-0DX906<4#t=1* zPNtjQGyB&-QuN1(e)^yJ)*g5W==>yx*^5`UtIQmKV?Cwxo+T~?kQoIeM8&Hyk;3{H z2XL1Wq*k!A^Ig}qdJ9P|Ec#)oXiEs;2FXFB{$Ev$AB?&9>>`G|{cZepp(na#ri!p_ zZLrYfn@dE}^yo&TQ-7|2BZ71AmQ0|FiQZ$hKBcGmCH&dR=pihkqqxeJ96y$Dhnp4S zSKR3$ZH2qjQC`f!5AYG(BAQw+A!F^ohINBz_IeHY6D*CbYntjb>@0DL=KHc=AsA0m z8NcJNOL3OsGFZvWle^=i6_OP)NgG9ROs4@Lx<KOiPA!{Yqvz}E7nvG#NMBesqJKZv zS_FPEzxYWd^Z%pIx*Fm{Uh;#B2uLpFe*gn|@2(}VVE>Iq_Bg+ge`8xoe0+cXe}aOF z31Y{uNwamnEY)a`6K}5w;24g&5KJ^Soi-o52D8g5|7wWK;ZI%1Y9dE-Q#|+vt6u1~ z3b>5r0%l3YwBh}%gt@Swovk|MJFAl5yoyflbiMBO(iFZ~W!I56##aK=S@sRt<@^Zn z(?O?~#|uT4tE$pzkzmr#Ic{ap4ox!%LC2MlJGr%2YlBRJqjDqUdoZBx{eH%4V#(i} zXtpqD;}5KkK3FpJK`_1j_p5!^W;rUKnpD(XJ&~wruyZZNw-u>2yiV}P(Vvwt^P&19 zdpDwlCc&WYJwmScTy5mW-WU*cTgV;KGKb<H8=dei3aAlLcy!e!LnlJ79Ld%OG$8(% zi?*eBNJ<9_Ejig2lK;+iC^hwV>}=beRra+ND{Ee_bSGkzRxqWeyvw#Y*u7z#Y@Tmz z+`HR;j$J~Z<{yPec1>13<jAsnxgRikuBI*ta6CpFVk>{ZmZM99#dLzwhIIWmS!Qy4 z9<x^=l4%9GEn_T;Y43gkw2&8x2zz&v89%#rFI54ZgRu)W2g*J@4v=mSx#nw!3omr$ z(#6Jy_@OSG!Jziy(s_4)O(onpA}+veP;JJyXl<4y$AS^qCci9KDeyFR8UqsDHbBt5 z*5Ekia!^y)gW_DxJ6XeB7Y|P<UgvgnJz-(Xe0Z59Hwti|!^(^$UE5dD(TI+~GqdyC z`A>Dk--^$Rw-6^yx{Ei#epFa<NzW}W!X`mpA&o?%SRpPU#R`R@87j(r1Jcs?4~4?% zt=C7Z%<PD<WW|94#IbapX|~`!{)EJ63bMPmsVyvaAp~0!jxI@<_KHv|FvVNbW^I6? z+|lZ*aL~|ot#+;XzHc;~0ykVrHwe-3+(Uf$co3e#JH<!VL5<0?z4fr58_-ScjOxER z#(gOk^U4YWbrdKeay*;0&SDs<J7ozg-2012I-h4*Et{@EA@tB0GEk(~MmXMtA;n)= zp<|QN$cS_|_5M&P{bG!}{5<QO!2ZR&qwpB*j%VlO{A&pJKwdod8v^+eUkPCy2=fo4 zzhQp8T*4wwj1yQ4px*__w4z1)x>Q-$>M)(7Xv+%yIoPJph8k8u1-V*=<Krz&&*PQl zVItG=&4#!kP@^T;q?grEhb|QFk-?E0;$7-W6&7ow4zegRNv{JqrK@5rAJY9Tn_l7c zakPdm_>L~JR{!a3B4O>ma^W2jPOW1*lCKmuD9-a+3C=%*eCfIS7$Zqs*&{DTU=zsm zr2)uQxh&Ky<TX?>5$KdpGo}O7l!&V0Sb4?AsPP@!M8GHL8iNY-s6SOo<Dh(Ej0yrJ z87R4N#t^yeJoA#u%<K{f@qliO_62f7w>-;@K_>aKgja6n2D`wHHT?YM`{#1Fs>9=Z z1w()YiysEYv)Uaj3iV9iSXL<OT|6BZCuSu%Dm{PqS#B)-VeHSykD8qxkDXT^YQMUm zItKmc`(eub;kMlP+`o;GISR_!y@~5FPvKJvNptN!rYLQf5#3A{eK@H_j!UZ*Bs(${ zAiz%rFJPd$f3*_x@Q$UfPQUT4#eZm83I}-H5hQLipO6Z>|1u_dJG9u3Rd)kf5CN_} zf=`ItZT+VipZhgRVwA*>AzT%927M|@q~4J9`1PDoXx`v1U19!o3<vV|Rc>^)-~LB! z`&<}jOrP0%z5-ic)ufz)?*o#qi6?37WKm}szsp$cN8djQv%Bk-yL7+z$*xRtHa^B& zGrAUFD<#@W>WH%0lJeB%Zl1NXGc6av86fyuHjLKn>|(It_*Xyj2qR(90B3lAs3mcx zzC5xY?0hwd=e~+SGoCJY8Q5OtZ?$Cq{vI>U_IaE$3+41I(vyQjh%k@;w5f{=vf?cc zXuZX&kxO`d*~Szb*ZAN(lw$u!IV$au)K%)Jfo^6&&<wiPK41LqzBMrN7&!+8`g-1y zzXAl-1K|CK7t3M75bD}OJ`n`pdc$k#4w&o96iE^j*K!zSqu#SnJGuFKi3T%`ShkbJ zi)(c-K5XJPf%GI_pnL2g+u|@;#JheUaQbP;V8vX8gF)*soEeFCsLK(6ipU!*{%$Sj zdMOLIz?h>w1;Bl&HlA27LySkpnVK0-8~HTZ)mvRC$<F>Nq&ZJINEF(+Tz?gLn;e!N zz0Wy2KfI6%mS=59EOYyqFJ)YVED#DRV$CIY9KOh@>;9i{!nw^-40Vq0y?zSAn1(+8 zy*;zS<6@f){FQU*<BKTz+4K>68<&zzS7BO1702rHpTsMJE$}<VXJI*IbB7Hi{uXc= zl0pe;o9k8J1lb0-GaZ(f#rcAxgbqRwqD%XOMZS?Z5rLbRvViLpZ3%d*dwREjuXCPu z%0!QQx6KO9H%S-snNEaOF8H$x^o4eBGq560M=lZ|nn;2<a_=m_m|d?1&S|jphMn2L zx}fzTFml+gX1!1RHzDnj5F5aI3ZZvcwhxuOMxLO&MflyI)=ixqv*G3v3SN4?rA3vT z&gA+Wv2r6u47wG~!k@#fz(U<-IqL4M8+#K0m`YCmE2|)XhhszhJh<3^+iIOVouzak z#y0y09EE^1b$tAj@nq4PpsR#L9P-bO*W<to1uws?slVomLIZ3rewO8)F=bNP;UMul zcXBjCZg(m`5c&+Pxa=EhPiZWe<z@Q9lm6mNqn!2R@g~u!?q;x^|9%iT{5?Gn;F>v9 znB$EnZT%4}XB*V6hNuexn&DA`SS_>H=)lhWw*BX9HG#r+p>)m8M0VQeZm`=REA60* z(y%2=Xk7M428$ah+=A^Yvp*#g{KHB|5Jr#dqyt5PTyBW4_DwY`qa?Xk^qgx4QU&G5 zw}<*(Au}NJ@5m|=-P*uNNbH6M+Y89loQWWXB3qp8k2Lco3`$V>67|D^N_~>b5GBAi zo9yDprMAr68|3x@4g*f;nzW0FOM~mD_3K~}x(*4AdlEPIiz6a{aN%4W_B?up%b#&= zBe^Zl(3GZhCwr|tIr`(PnEwwFBTRD=sa+~t-kv__rmA{u^^6m1)-)H@-eL}L$)7v$ z?A7&ZG+E4?+<q1U4{Vx<tmBq#O-T4Co=7gg(clQh>JL`Urt7<*w|HSVRVRA`>F!xx z-W?&Z<LoQPLhPT($9F4cDIaD-Pc-a7U=trd6%45xME7Jd`Ty7(rbwu8%-IGY@ns8a znqxHN;TX&ff#aMr!v3_PsRJV7G>D|SrqHF7U}z9St2B45J+h$@L{?pig(9)@7IhlI zgcNrFhq=GD0t|<~0%6tvoGKwrJ7bKj-i~woX#z!D$#%>ZLv^P}{N5=qBzbG`!#`Zv z=a5KZ)Yn+?++}}E3*(C#8DhLRWO&>41_;m;9?^DS^)2*1((N%bKz6yZcaP^oOe$j7 zo0#tqR8d$Y!@tRRsa1tdt<{^tLIKdbh#_hoD){%vTiI~QW+?u4UBdT@siGoy<#vb! zO98AuiLQ9oCL!EP$v|u)g=mrMh~%Xh<|~spDk9aO%mCRS>^|tzwG}6t1#$sv`Kokh z3btNP-`1YAz`y^ABXRaYx!99uYtH5{$3rGG?YBT0vRw2Oe;?J_Om8RJ66x!Qr2U+% z-40$4Yl_4w2_?wb&WG;OlHI2da<Q-eolcHT+}c$>96be>{4$lnr{X~hgGnABl=ID% zpS;F19z_dy`TmcVqua9>!a%{x)5NG2aR~>!3M)LDb(-PN)`1uN?3f~$mY~@&SP4|8 zu^&1zwn;L8kN+!JcO~an1%sN+iGVVVBy%OCIX98Q5q{-<!3*0ZX!mj%R)+dPZ<$rj zXa0x6H$po4sngW~K^j5iVNpjNL=^|GuSSJXh#5+Upq++_x*}0`yQ3M3WvE`9*xS3C z%LVN5veeVulKxthoBk7RpZ(b5N%;8`BjJDrY}1;MGxUF+#g(3P@4a8y<-fN)R}%ug zHD1*^P~8;{VLd>+@A#>!M@*M&SE<Rv=dmfrWPfF}sWFLYAftkpg=&LY!Wl)}EFG4} zDGvZx8uJ`3w31(K#F=bvR)j#|<E|!qqU&nR5ZjU=PF(#Qx3Pl8S8~hT7P;9yQN}*1 zbks_pn2G&_5uaBlFC>hcJFps@ApbT_f>|*EU$SSh3YZ7{5I%7LG^F3xXbUWd$Froh z3rak;tHT!bP>QOGfez(rkx)G+P9e#9>pwS-usSHzKGmxElG7R0wB&8cc7Y}@fHCA- z!Lr%|y2HS3fU55A2Bo%G{lsR%XHYy4(H5q@HxDI88A9MKJ1>UJKL?_K{tft;=op*v zlY6&_HO|Q?bLM0_PKy8gk@DgB?j_NoyxYo?Sgo8S5(mJ*GPKLD#|UXcwAW+W#(yU! z{srZ}_QDKuR>Sp35YcwnIT#cn@Bs)vb1rfCa_(mjhy;}gISOq$B0t2VHQ4Nd7?Egg z`bVxOu`Shy!VD>dIpgs#nP*Yo=aXTdSL0m9gv_E;O<D2CR1jr{#C=K=8_zT+w%r=! zyByiXv2nUfC=NWOEGYt5RH>QqC^`<KJNG1Ayxnc6r@r1xL$edBZPfupW1~lc;$=PY zDc~9=C{bI(qGP(?nOa)NxOQ7CVy(TX6tfq&tTwN!;la(Uwo))5!a#Y0Mhu9^r0ZK4 zvTwkOw2#+MMVe>wo)FrvTWelXH&>sCHN|qpWaIc!<IaA8T$m|p&R4@K)=P1|lkk)U zefcXnHY*n9%ef=A+3Mb7h&?aXH|&EXXB;)z&t|_G{E#aq%&!v$EOvstEsx=OZ5R$U zYdgXF=B)quO`&BEV^qg6c%_>+C=inmD2%Y0P;wWAzQ|_J=UWZvd(bGJT1&@tm&kva z^B7?T&^(T^!9#V*Op+SPse>UY$hcBahtT0+D50}>4cRN43WMf<)xZTKg5AtJ2kcGF zg*LrWx+eYXl*yiY^GQ%_USX&o<q=I)jMP;2lyh0gGHq%dME75V1L}18ume@&^E8cF z<8!u*C8RyUBqbq=E(?*ok$pS<O~ATCDfMP^If68IG!H=ZtY^qH2sq8NL->V0K?r{E zV<gd;CVUGCk8Ume6VTQ<2770vvU_+NF6gka%ejMzuO0fxI_m^uEzK1<(%6s-rt67| z=CYq+t?Pz3yPbix6)(yh^GB(aGR|5As2PU@enmz>`uiJU9kdB4k{T_GMSv2^c5j>+ z)-S5-*~Mc5WOyQnJCuyaoKMZT>OPVx3h4?uU$N0NhNRQZE{{8J5r4atxml5N4*g4_ zMq)VojY(d!QDxTsOzTV){HncPx>q7+b>&m+6~@Sl*_8Ky8KiO7&-ol|c6K#Y;V8`h ze_d=#uWDUsiB4UQ!j<=v#K$vC>o65I5rGf!SPdc|RkKe)aQs+>8y=z))o10IomLqG zBYN}cPpki3Z=)w}^yaf%r-(<uchj_enrMlCGX*J^dCtuUpesem!QQw&HPxO_r3uli zV)Q0~1O#@vbtzAxX#2`i1|2dVFoysjM2hPsC>M=Nt(x&q%dh^v?I#P*nRHZvXJKYf zQ|h{NW8$B7(j)#99Z~Y~jKYPHD{*}H7=m)`%bhcyDQl=$MP9c5S>be`W4iB7%aBg0 zEJdM(>=@^fdH64VKT@HD6BdS(&=MRw#i4U^d!@3j{P(Fo2qnJZ2Wt2^YN7nSqqSMa zp*@((y5^_!`Rv~CGJJO1l}z;F{b_n9P8d7QYYJ_FJwVzZ1C(*uiBJi_%5XSdX>L#k z7FOgJO(s5Y>^H99Sm^Kt(X9R`*AtUPi1-f~aEEZ&RcSHx-`E^i5cV4r=Qk-J+-GbV z3&xcl8nnKcz24|0x_IdV&;K*m>w|d>n%GR5Bw6GgDRZ1MXGmFHi{FDQN6Um7I|#+g zo7%A%)LW(h$XRHi_&IRyTrgW1YRMSVv{b`>UDx+LWB;}Q>Q0H99bw-&UV6+MRf{)Q zxP0s?o<4Of{>MDx25vX7fS)W9ko6o1cpG04Cmn1G`he_9pwjnvXDXLGIo5y-vhQX8 zxepevKpG7wpRhVDXBvTuI+t86(V?U`>vm~v7Z&sDFAed{c<Y1Z7;TI>2qXuFx1%eA zuh(UpH*`cT+Ygu!*b?jhNxI!`N=?~8EWEExr72Q=D5SLZj;`LLRgCP574V^XyGm1@ zqZ(Hlr(8*FS(%&(HNC5t*YMIk;$K51V32elfsYeaj#9hJ!>NP7dHY<XeU+wxhh)}E zNX(!%CXq9eI!f<JMH)Pl8J)`;Xn>Ohbl9&r)T+VN{;C1Gi%yRDHa4}a-ye9i3Ax2# zWgffqcVm<2@n+yRSneF2)Dyj%ZiA&|w6{=y%}yZbt@H)RIvpgbkQIb3H71maqVk1< zZa-+qYwryekIAEvRt<KRg}DS_kK}#Nmk}79#f%*`1>1hu@KVwk^@IN;&*wK1I3+Y) z$6_D?&50*`Hk;=v=CfS=Kj#b+IU*{CV(i3}TZSB?FxS`lb~ZjjtYW@xQZLy$^cKF| zr$0n}U&hyPKTp2*B9MR!@$s9Y&+*!VSX~{qeepL`Gl}21y<b@YZCFZ4Z3P?G6!m+E z#+AG$ustq2M!gFdznM`<xt!z28KMef0x$u%dDmksGaabCcF-x8gHMwPd^w4OAIDz( z`d<$+UFkp=fY&Dr%rNW^j@HL2U~JsVZGA-2cFozmWo+Bx%@oyUi8Shq64r^?C?|zH zX3q`Tgo9eaNei{Aud<u17RB~<Yzi(5hpuPq`A?bVP({-o_%#xj{ase(YClKe1a}X% zxc~N8Sm|HW*L#2t=mdYU08VD;!I^xhInBE7p7_i-$^f&!yK8q)CHfIVt+PYPXZTOw zu&N7C7fp}+fHTTa5k=({O2@vgz8`ta`#pS-bAcx|425PatTKHr+Ed&@LtX3Wb;xC7 z7b8(TKOPG=Hs&7Y$wQWhtf(D=-af|&Qo{Y27tX=N!_w2Usl_0r+7%>U^_?O$NR|^y zCuFo7!c~QFW`kE~$d2;c1&CwbH4H5kMn_Yzq*M~3?+prf9RK617a)m+0a7Yfb;jrV zLe>>O&UQVDtw3YkSEJ}aSYL9e!&B<&4E&l*wu>4UTWAVsq0NznF#B0*Ki8LJ>AoB# znS+DVQMw_iHu)Lt)F!@%Fu3t3mAG{1F$IW%HFWehY80Sgf;%jtvkgV7n(vuAfVWzj z)L$j=nIsrpD7BqFku9|)p2j$N$?n5ur8v-Q`f8xtXlRJfSiOzpz%UC%`hei3!wD(Y zvi(Vga<oI%XpG(OSff>Cn4%RRMy?3t&PEcKH>T)vz-by?!YJ;xsxm-rjSNy^_Ol@M zn1rp{F=2w>a*I|h%JO0LOv&p%+9t|m!J#Ak&LC8XzV7+II<0HYCy?GC5@W?j(pgY_ zi7m<&8G@<-y7T-XW^EYjNg;eHNuFr5V<jWOO+ik&BWO3N*R4E6GoloZ?48a6Z#Vnp zY{`+v9vEs=3DFKbQ%P?Utnv6I@U1%rjK{$W70{hl0q~r}g?Nz2k9S3!K<+=F4?6oN zN5`#+WJ781QinPNE)4&o3<N1Eb<P>#tjtwmDb4Vs9fG8Jg^}c;#fdr096H*&ieD2p zQ%a~Hcg{b@9=SET$w&>1eW~Cek?>Z9?-5jmM#7=03CO)ccb(-{!1FvZj6N^1h+U+j zB$6JDA9eO``Fk|Ff4iIVrRR7-LBNyh@=tCH_z?_QNEep}UQL9_sAxvwWV~d3)*ixK zNX;5XL%qfRHVNkOS$U(Z4unMjc4>>Bw~+=fLhx?z)Zkw!*%ktw<SuQQy*tj%#8!V6 z3dnKp_Pe&YDK@MAVNWl)j!a~TnCyCky?=075~<0ch2bYh&g!T@nLc(2^~ae>ItpUG zxtrGFtPIs@jPZDbYg;)?ZpKJ02-i`Hyt@d0DR+rJrBoD~W?JZ1!wJOsiPEG5sX+y6 zu2k6A{jz)oAOVbax;j>Y-kKMJC<88fOjk8C3ZOi0TA^X=Mc`#Q34>n*H!Yu?|D`tf z$`C(g({Q-aP<%XyX`1{lT9Q|A?Yv~#3Wi_K`fvSxX*~+drMwnrHAmk>&nG-;-GO{# zMK=NMDuM*UTF*zrEq8%1-F83p&NOo;;_1Xtm88)zY)1I;@$9qe-jy?{8kIzasfxnL z=$1IC!jv;$d55zyDgP<(VKZLjYTkDBL$Ar02_X^P#_j<7xNi8SpPS7^q+$O~=5jE| zBRTl=OKOtrJ==(k*BOMlPoeLxaF^U)7-9P8O^oo(YxYtWoN8yb&rg@IIKMcOWuEmB z4!B81C?nMDfaGasc!Mz=c3sL-^Hifx-z&mzc|B|gRg50Er8@5;V;4GMcj?Yrcp>9c zBUwZXMF{~fsQSvdldrqfaA?#Rw;9D}YpWmQS9X0$E=r?giJ-=wq(PI7*cwyfBMK_H zP2g?#391ivLdiN_A@fJLPXB$7<5^Htu9TYw?x7?iED|Yl#<q0-oSK9eQ_}zm)fRSG zgg>nyMl!>{o5gkyfc&LLKBtG;mw{R~5ORSEMUu!~t)*e&Ol4Jk|D-Zm>MNbQ7|G7_ z0>Q`cLj96!+jtJjg05p}7ZxQI0(A;347P{J-m5XLGwU<4z7)huYf8lNB1LOdIUZDa zC}cJi2!T3)q(YJnpQ?Zk3Wm%%s;zazsE%3)sjw*{;c4RQ9l=bqU?GIi(3CDew!z)6 zl8?n4#Kg#ae!3g1&UaxTo2n;x9x<?$II8v3M!tSJXR-r9S3CwvSn#$N7cijdqR=+q z&&=>y<b3}K<G1K~_#wX}Wk@1{-Ken90JfmWnSnFaIj^&{<)6q&P=AcPS+}nk44uxO z@ko<8If(28h<!$D<<F!5itkP9))n^Q(rZR3Yi-xd$IY{TUYRdcubP#yihwp!*_SL8 zoXGgtbm+zKG3ZfNNJf9&(rBnmm=OATa$G>XYAp6)u?dPppnZ`h(&==Fe<MrIYeLf` z6&5^Ezb<y*GyEUx)j^o9i`dFcUya0)0wBk`$wKt0i8Pe|CP-?CL!cEzpuCo#OS{)Q z>C|HP8<mCM(UD?2p+2NNoGUwh4(;d7pi{tC7jYM@vq#==FJV)!g;y=qk1o9!1jL^Y z#?2$D;X*fQ*E^5y4sEaEaZTJi=Ni2A2U>P3*C2sILlJ<_N=)wX!C_?v>eN9NL0v}8 z@==hpgEzzS?7g6Zn$ffiBOy5&z2LO?S6a>R?O|h8_|R^7%Ae(tL@TRs9{%baut>99 z#~pkn&FpVlI(qC?70;@Ytz9g2E|qWNHq^0KD4W&ndFz+i0az{3Qzu;lblUmfTJ}OW zroW-xKFV&wszbewNod$_4BM6aK~^t_h1D#x)V;uF^a6n|ve?j5uq5{n=GcV;t}`~R zT>h0p&j$7)$TXFntgvLWOwtvNg}hUf(*L{T%_`YKt}<Fr{VOHSwbiYlrIoh>PCi!f zsph`OSOw8(5I%f;VkZ&TgN*pgpaO~03{u4ZY5b@LxBrK0T2}(_v31+s@$qr2<W)k! z7ZS2VZ@kt|$g?8SoCVqc&*Rt_EKlEED0SmjI&pw(l!$mtGzIsSBlRpOhY$CMECnD8 z`O824z*`vLg+6-z{jtID-+j3I2Gs|Dax28H5fX5K>tElgI9zdJjb+<0aCH60^l#m- zawF4pbnKWr>?T~)>!OPiw!<>jqv^V*nA3KLyZaSBVtg%9<p|s>`;4Ylx2B#Vd|*po zeVqkH__DxVEpEvW&@;Cb^Fb0}?t(CR68G*MKw__IIiIWb<GCve`Ckvq#167QP&>Li z)L%(N`z!U=*4?7oCGT`g`3_727v6n-{xjd8V?fVU4ROK-_an#kPIgeY0pzu`ugC3o z-Dy<aA=MgTc(!5F%oJAA8JejCewvIe2-9ry!k&LIys?069A5q5;6DLoSjiCfp%%7y z`KYeNhVQ?`_{Z$u>(*R&vMI;o+m<eKh=Nfj@g;{OvmUD>@9vG79-@&}ESi-BV2#dy zYdho9`%Zr4kqGZ{6&U-aqavizD{Q>qyKB3~_@*e0j>Np75iWj<mavjvs6PS9B;w<> zqW^*hbs69E=0q5tthZF;<)p?M5LhQ*h+i!!k5L&UBpjLSnASR!y&<{pz}E#we^7X= zFY0PX%f73ZXBzGJ==&ca>n7i6iR8;id8)9zyuBK`-fc1?e$;{5ox{lM9qarz@c+QK zP-#@Uo+2}b`o|I_P}5I4&9vln`6K|n@5U~_2(F+?*YER(aPte72x<+5;m#kMVM4)< z{8@kpa0^dmx5z^5Vmi2!VJOVb*9FrEYOdRFbGikyg2^F1(3WiC3Tc09!6U)2-?opM zn;#D;Z;^)$&L4vXptqq)x!s5?uqc~Y*|~4h-~~P_cp~O>HWK)hAvQ*>Kdfa!ZpEAS zRTCYq=X$n8uBh#CtrXWO*XgSiB(aE~rmkbf%cFy2i$Stfr~gB3{f7{cG)CB=#rYg; zZ>7`Ugrs7M)Wj*m`)Ty1uz&e7_kryudJL>L*I8kaeFy*j%9W6~#oH*lm%!M8yZ>vM zRpLTzh&=;s{oMK8kbf(%_QX#ppya1b^=Ebe{TZ%sv8|Z*b+-b#1n(Hm6<lCGhIj<E zW-OW^oRmqcuoolq*u&V&Mg(KFPcO`xu47)1?Ug(=6PqMs9_v|p10%Ek%1sCMs;bI7 z-}uXs2gW8g<%^{$5e>*yKk6aHZeH3O{F?a4>E`gg5sW&vRoFG^@K1Tiv?aSO__7Kb zFP6~c!v$S^&kTvQEDNh&4aNENG^p1L{;Sp0*kT(_?X-pwTlem=Y5&3F244bxg8pf* z@K#i%93&+L4v?tqH3lVBLOYT2@!ytXS8Y&>?ot<A$u3PGZ1=9$FG<+IS!RbZ0DWM& zR1W$DOD^;L1z@7|i9fGa_Os<gm+WBBGfv)JPWACUCPs;k<#O4wDqh{Ve(u5N1sNSb zjg408t*-$LQ(jf_AJ{UR=ca{K>mdKlbdpr#)JY#GTDkI9wx?GW{KbWiF;{U`!>Cl= z22Jm*mtSt@+*H@XA3!;>JLraxwa`p;jAX|ck7~`o+Z&a=-gl#PBaQzYC*-ROvpO#Q zoW#Q1jwgBO`JG|;c^~U9uORe4?q~@<9|U-Hps`QMU9H<tA1RMVl21pgZJXX?!g?M! z>x_v9X<y&))f2@sHL*k11_>bZ5QO%#p5nA#0dzP|OH~V9^{1>6hm=Dp|1qQvBh?6X zJjuy1dRupT5|UvXL0FqNeC00?baEW_aWR&wW>c$fpV0vsv;}uFFQ0fsilvkgtJ*n} zjI9pdv;5|(?0Ff&C?qWM7k%7MK@`dVN<&t)*Jxsw-!q7$axL2ADny9Fe{IJ>d&V-^ zBYL?SXUqRZ=*2riVb7o#CGl}ZEC(qo6>}58{ya2BR;r;>v-qZ`Zb)sp0fDHzHkDr1 z<m32wPhG^_j)XUCg!3#gZYN*1hGHOWs4%Fq1Tp|Cc(yqkZM4DSAVD~K*B%3VuCOB- z4X&J9X#Gkol?^+*H!W`-h>;E-N(TWyWjsl!)&cUbmkUAGZ|oJ3pq31~+J5jl(ye_Q ztZQM5@6ljPS+`@#r`@}u{FM}~9@d6ojAul_D{J|$@2aLW?J{K?D&q3QO{1kTrogkP zBNBTgu>0Nu{?DueTPzE~oO?}O4ad&P=*cy{=cZ$pDdt};yN0BUmAj}kTw@gck8zzW zL`(X`NMo<$RUp6LXH%lw8^5zqszUPcF2iEhI=PGa)Xan)FyTGIYjh4O>Z%vyjpiZq z_yJCV1?*(t$D|JB(B{87?JCme8gz`XW<S_$m&lbh-Nkd5_pwZTLN}$ypz}T4cHZON z%SI_>f;a7nj&*-o&CtU=l|qu)CU+l8&Vh5+?W=Ck09@a`uwxX+A!3w=a#E?)r6eG4 zMs20Iot?B21Df)Qh2!CO;Cx1xI@Ut-6h9A|3&f856aH}XXv~hAE&=~dX-xTf#FppR zcEi5Bsw{Ui%(udh_*Ri}8COifwo0Jwv?p`jbDM28bBtq681-o7MO}FG2Nyd&;m-dH z?MXn&Z(d8lFad``DcD-E$s_$DMveE(7CbT0E2AZjo!RX(g+Vk836(@7kGdYerNK;y zn@IAK2a<2u2RkKx6c135w|lv&$ZoD1_-M0;iZ4LEy0v}=>f_vn9mPKXQfw*wenGM) zO@8lFytCE!noq%wD%a>6Y(_L&1{WTmv-ZZ-CgXJ-di}n{>tkP5O{m){T<I>+x9@>8 zfx+7L;l_eg_P<D(SviJhGW)E@=}E{r<Xv0wudULT1Ta=l@d&(wLUs2&GV*M}tAgaK z&=s>UQJ~H`OMiVBdD2@#4Ugj@&-qkH$@*@J1uijbMfliP75nwqHq6a6xFoz3!|Fos zAWEw~<(997q)jU{Kl!@x4Y+fJmfB!542MG|Y^zxV9xh#gebLjw0esMu)u_+)(k?IU zII|q@k2#ebIWql7vA+M3u0&t^6yvZH^Td-9c+800UB(5)1T6$Cb$OhQtzac0g#V8} zZy`uM^aTX`!25dH49~JxAd48V0la<tKUmIiIf*Dh)n!c^q#WdCH#$SrQ8azcQlX^| z?0%EidVDJ3_Hk<g7b#dh|3MP^%muJY($Rf(th0Jo&gqFd4YT_f*o>c<Z9NNy|A^i; zxM<NPQ1Pz(cSP1RCZn8)1b~>rkK#R>^V*=wiTr=@f^e+4p>65a>uB^kzpi9w+7}9w z^O@^1!!eowX^^1X+S*CGNadgf$E6T)3SUNjT?4_rQnsA2Cr<rkBa@~561SDTsfnKT zdm5RW2+}m}X+Yg??%H8#p&mlN7c2&+nyx~w#Tftrvn)NmP;I&^+@>SHrpPL+I8eB% z%vFJ2`2Ug0$080PXI(>GgBh8#hSz1m4MUtxvBvuLH5g|vp!sqFhH3Kq7ca|*IxCfd z@te}hB{`7TpRV>GICGMYpnE2?D~2ZpWeD++nUlz17Awuy1H;d|p|t!S7rEcKQ~glm zo(B>yF`7o_C^ohVW(*A{qnW)I^}p(jUxUakvNV7CUu1t`Ndb7CC^?v!+djp2mW&js z$<p{SsTOMOQ(BH2D&6L3x_};SqTNQrv=1c(T+xXn;x?MZppV4(l6|oL`F^hsf|?8w z_QtfPac|&K@xs}2oL|}f9v68ZvOcZZt-Q^SkK48!btkm6eI<y^hIk60vEWS(z^W@( zo;8|7aF#Mbt<u}+gn&!&Flr7gaJg`UptJl4QYfrjU(F+ZkJF{G9k-Yec@`|<&dDkQ zn(!GjL0%$lEBdPc6KOA;2IwlkfR2uMuKx@NXj~FnH$ZI4z#_Hf*r%a`r!xv<sirEk zQSm=1)y9ml&9<DCl!bPR6=&6f7-KEwECL6f6k+6~Xd(BBQ`6jvlgvJ*u%kcO=7`bX zvYPsd@0jJCN^R})d!q6nhH1kr#@ZqTikEUEhF+TfbW5-HG}@Pj`S2H&8BHiI810C` zs<Q2+vx&TAsf1_}s$Ik;)O}JzRCwhsj+%1GYgilo-ar=WG2IwvBa7XB(y&WKc3;ek zjLdzse=K<OklVe5!PJRcVzfe6_jBNYW5)h2-9f{i3}kk18HE2#W9=4)l_?b4fNEk! zd@PT4@8`!Z;TU4KhR9ES?*GNi`_m?A$b=twERys+P2IeCi<u=v`6?vT{l9woz5;0b zxNg?QIrbWVCPJ!in)y_<|F8x7QFT@buyAW*0}Th`g}z6Ht8)-9Zv@n5)$JhezJU3b z@%+6fC=((c;(=TN_83=aOL9Wd8NSEE@@!^=2rKlP;4j!`TIuG?7?VT+ra)D{tspAU zSczXTDDNi~gsl)PZBeJEMl$@zo*&iAz#n04QhZEYoC~)Q>!21#eqlTa&FlollZOwJ z+X1*rM(?B3-(cEy$9f#NWA@-Hp<ZF$!A$m@H^rA=8OG>4F#pZq*$30wQ6SJP#2%Ot z?zZ}W)z(Uto(c<pXd)>vLwp5a&ns{BK=G62+HzFG2SqYvtTnVp9!CM;XSM_Q^5KGS z#Fk7oqpgHlaFT%~5gsO9U6A4g;XU#^6x^p+{k5p!?%1_Qj+7{2$2Ve&H?AGxus3S5 zYFLr-x~UkT6aJA2$7<=mH5@utvH|%=M$y$9($d?tu-PjGB_2D~zQAHhwx-JRe!G(a zor+m#nj|T8k57pBJsbHJ1QwiQN`YJzksl+#XA$QAY)gS_8Vu<jr;QvS;cv~;Yr|Ru z9V>ONKv@#OGsPev)>z{%gOYFA423a$Et@q5y7kbgQL?ZYOhbm;BH4evNkj~E_>f;M z?=vwy#RitIst<;@9}X_~u`sjXf%ERQ_^2hYPZADL4D;xfsk#_Fq^0xO^$@1x@RwhI z(rr!8+!~LAeZRJ)eG<~@i!5@P$pVqLz8Pg<QClGJAl68~?G$xc46-;!rKKJ!Lt#e5 zpmW6k7At=WxgbGls1&K5yNe!FrE$kx8I)4Pi&qauv1K7>ke7J7mdqej<Z{=C$g(_0 z|MSNwXex+GivtR`9u%9mm!GNy5BdSgTp7k8>8zYZ6wOj^p9Yd0`&sVrvZGyG*ERhr zU0~oW!ZOog>vtF-6}Pq@$JvXUmQeEDnIdx?tik;2tZ@y<=IqIW>(~)c)%f>Bifho1 z-ARtgu~+JqDojMyJ4uqCFWaD+numeJT=*zSndWXT6Q1?jJU+Jr0SZVF?OIBgR{+YG zi6a|3L(TTRY86@ZHvVlWHxvSTu;y2yv{F7R<kQtgZZd&Gq0Re=+fa1G67+2Kad12{ zqGmKg>`^1f)GQRj)9um-tWDz8_d8xlkHSvrI@I-E&r4oZtlq2Lq!<tLjDBW4E#vh9 zEpiPTHsb5Fp0+5KuxcfA<Tg+qqFjWA5JI>k@XCUyvMRPAKTl)$1D}h*$lb{yQpSuH zhrWLVR)=TfrP``d-o9onCcA<!Is|36x2af;Q$JHMzU&n<AK950x!-QCa?ZX!Y&?!K zUqNyHAyIxf(k68koTQgfsO}oWuw&oeS;y%@caar~8b)@*WIQ1?u*{GZFxnmaU~(I< za)*B?Ge+Q%x)4i5{(Rf@v*wTRmz1;EChjxNXzkMni9BXeC1W}0C`a=HSn=z-id*N7 zT*%-R7C|N;{c0|;B`50EMqmu^2phQAIft4_04qh!m$EQGN^UOA<h&r3@nW>@#tr<2 z<Vn}|eyhtr^fg4Kc?oo@WLDg*`2HaJ93>X!eGxVDyduZTfORpSGe4P0phT~Y3=yC5 z@F;ZIi-*N|mcX5HH4u{QT4-CJPLUm<MapwC$L|iynw#HV!*1Kb@xXhr$B!0*O862D zr}`CCz+JcGzQx7?Tm6hZ*NDk~!XxN!crKp%e`$|QvCP8R6x477VAetVLMAANUy9bv zF4+?WsfABUKU?oFE;Db%G%#rr*gBWxO-aV|8yAgs<CeUEU=&HiM{syZM0;r6`WCE0 zHiRYjVcy87ThK;mcq+VICkmloV!D7WKVwuGLyuBNhjz!0Roh=NH%8;rm85ZA2>aAT z)RdD}s=SS>ppK0kiD^G5&5h8=w5V~y!_L%E1=>M}<(8-Roat45m23_&`b&e16ZJ(( zZ^>~)&?yB<>IDdj$2-$}vMe85yF=1W-(xLlk!8Mq7pPxj<8?Veu6P}n`j<=QSgRi% z!q~Jl<D=hnycZguj6X7(jpCoUKb}t*ljBE7|2>6Lf!s`9V*ep|`-?kqq5tNA(eNsf zh<^W-HKbZ7+*-|ix+~M~(z87neozr`M1W(V;j1z-TIbn!Vb#lED#EB>1UnIu>W-#D z+|EX!l%pTL|AXd9CB`hmH-b#V3;dikF#aljix6%Jp=b3@1;ehyRXZ^W{TQ=4$vQtD zwGAOCeI71EM9~mb+nT&M<voqnOXvvu=_RnU05s!x@Xr_hM!ID>;-H$oo=`ktb`b#` z2cntq1ouiS#H#~!#qz~n?;stz%hTYns3B+y)?|Tf8{#OcC`<1naGg+ztsb_*9E%b@ z^nld`DfR-ag5G;A(d>_wK&co`Jgo;=wp@(W{s)$elzE~=GfV@CYWUJz_k~G!G2U_3 zQ*#_66I9Vk5Jq5h(kY7F7RCXGbv@jWSrmx3Iw?i!xXBjnh+7(khpO<~pBhr|8GQDO zO5PIf0;EyGcoS8zF?FpwZgnop;XX*y42jPF3({9~X6Vrqx2hhYY%Zq(s7zpmp-TIE zI=mT~u+ooGUFlA`+}6zJ5w~5qQSjxOKwE{Rpq~H|pGjG(ORmP)b+bE}78d-qQ4`V| z19gxT*Z*n65yZ1g+sW*v>_X}S)$&=bC7nk$_bEg6!v2T8*z-I#gtJSf7E;=8B>YfJ z5+~D}{O-p3E;0;igj7loWTdV}VQvU-G(Mf4SQQ~plNNbTXuN!<l}d?5nXhm)cj<J# z?wQC4pXxvg-VXiEr4De~w>&s{G8H_;0dQ@Mjg33EOC)DnM7Ldm9ek?^l54c=igDZU zJA%8Z7*w6EIq6H<XMuH~UrdtGm5HsUy=p?4@PH1Q@!KCcRUqz_K{b^g5ShklAk{Jx zf;sRpoCR&@P!aw)QtACZhQcy^7buYNpuUG;64)a^qZDE|8D7gSGXR+H&;Gm)1LcBR zsWxO}r%WLI;)shQ&PG0at{owGTJCYPaB|0?2wF%OB@85?vx==BYHGKfn&Z1&>^zq{ zTor`33qr}r%ze;ggFPE}nI%9>uwV~7;1`6KRXO)Kxq-xV_o`AlkhX=uy&&qfbzA%A zo94r%;`;wVB%->CvF($*foRR8B`qhGLJWiBCmpD7f3qh~y_OU0xRN(WvJunn#2dvv zIOP_WT&1Lrfg7#{!$C}Vtxc!kJ?C#Ev#>mQ#+uH5Kr95WA1ti(5LPh_Fdk$wPa9D* zGnW`z(L`RXzi_qfwU-1Q^Iawti`%whB*Zo!s^EBc9-6~(SvW;4atqW?_=zOb_Vf|{ zaPTklvgMJ3`b}6I_dBvU&2i?RT3R*jV$4RvlAffc{{K(%_rT9m2+88Ko#;OBAk|(n z1-EbO$}Z89?uUqc4^uW~8~k_@9~|a+cpx5-Gza*)5uZYNz4Q+Wj4vAlF+W-z&nvwW z2rJKN4*N&oR$|@%^83Brh^&zR4;cZ>wKi#oy7>nSira*NB0iD)r==5m{ps_eyBXr! z*7==#w~J#(L*aq~@KyMy>uIN^LiLJ9PX1*+mU5?xeu{aSZ;nkUvCiXfqhS?CJS|<I zBg6OvBNItwi0f-$x^7+=c^Oa+^pxf})E*4wpP<u(|D%I}jw6i?#nAt{tzf>k^6ycD zNo&-|omNpz3(qT6zu8MNbOa30iq`&THdXusu{Ja9$&|a^pk5b<+CQ3IB+1oo?=tbd zW+bRVyu91`75m`ipx}*YVV{C4KFyQe8y>S2p@v0t7MPMFi7;BRo;F*QgvY}>pQH;p zrW_}pTphGJp@NlCwIs8dYsWJ($p+Y~ux&$zFtTZt;5ouM24D8&l!Htm89+(lWweFf z&WY-1SG&f=?+`eYC1215eB{o2cr47zFyAWBYO9`cJ8*rwnKZD8N`XmVyEHIG{71VP z31aulMH~i|PxGTUDrV($ed1deB{aXEK9cxn%53vjITNoWyQ5n<jf2RL`}{u~4-;DO zz7k{_%7D-T8;9U^I!c4_+W{^;vSI?j_2(`=YtEgDXZs;)(&qhX%Qbxb#m+>ZnjJPo z7bqmo&vsW6Bg7V$Srf(#jyNV@2q9y39PRMf4F~vvGUP_nCXRK!zjL>_Y|(|hWKVDM zu>o$>iv|yEI5<`Y#Z`TZ3A#wz9$5B9i6L&1$%u?=?)Q|35}=};=3e_>_C~o%{~sDM z12liq1Te8$t!_Z!ymjR_adj7UB<{Ro9Svls^GNVt$JeRhy=xeegt|#Y)NrSCr=|+Y zfRX_v?OhRxo(;a#Bsr$wknLeO6!-f-*c?^b4g{J}^kR;jf|P_R6==5cgofwP@EXf0 z#9hre?eV><_Zw(S<&HiQrcKg!(ncgRPNbV`G?q?Zp%206#<SOAt%QP^R_s~F)jn={ z1lxpi1l%jg#{PVRc|;tm6-Y}(k9PS08qxR)m++?6hG~e;oDRj2v&g7NcMPi1RjZBZ zm8`9$mYeQzH~6(>SdX4Dk$0c&7VLh=wh_*mI+!<xQL-g8<FRUnd}(QGYh502p-UTm zz~u{MQiY#0{luS4rww-QroaIMJu@9J0SyuBKWO7|iytmttKGd;(ISbSFvWU+HN~-B z^zPkxJ8T<lwSwtu-rsv!W72<Y4~p`>Qt5wK<_u2$TrBEbl3Hesj=5o!vu3oJC$Qa? z^k{Vit#tuu*@zJ|Z<^Y4Zh#y=xydD^u+BeEkI*)+U?MnwKOwWe0`Qm^pAwM}th0*l z<;l&GLVVB=J8aX@b*u4QtO=Axy5Q3|@&k`5$zs+zMY;LCstMU3*60a4ptc)|rb=Hv z9H$1wHr=nPMfi(58yTuQm-jgjUmpMMUX*h1zGxIbtzZX2(|LlbEqYNehKT>Ng_G+T z{JvO>sH?{of^F5}#higKa(k0J)ocp~1Hg?G0;H2Uea2H-`93r&tlcE0n*{GQNXjJK zl!t?4Ja0y`^vNRj^#5wo#q!DO#vb^9`Y90M33ychUQq3m;#AHpX>ug@pD)zY@{ZCB zI`a1eC))Z6yZRK?^j!?8<B@rMzG4X1Bqj~9!6s4ABkDdDm6r_7uuV?**r(0hr8<nu zKBcDDwALT-|1T9Mc%M`b(D^#NE&OF+6Qu6GEPCl=J=Eyt!k&TmJxlwR$lu=;ua#b_ z+4NU9w=xst4~pVWfIUGdKK`+IW}^6!DRjh^sin;VLyeDL1&g{uO?~LL7Bb9t-$XWZ zgc%HDHgkIAc!i(j4`21^N5}}4X=IDYAqphtw*R=6^uHxZWgd2@7_-ua{ww(aT`emx z@GPjRej2pkl;ACSm7gj&%l7ZSv67b{8F%;F7H--v7o}#0Ks@8(Pvqnhw+CFF>i!%0 z2+8puR-(a@C3*et1(Lw#rP1RDW%5u4=P);pj(SX3fxE*9{kov}uzzd)=~VR(N+uJj zTzk+y-aqXOeP^35PBt&yd%z0ID2VLjqw{>0cz)D!RsHCYQF(9?Qco`^2V!L^-K6hp z)@3>HovAXk%cL3f6}a(e{ifUE|N1V0m@XnhsXr!$pQv+({3Z86AVSGL?(e?eA$07J zh$(#ft8{WB-_pjkud(n=CIg5JIDT(6*vWmk);~!VO1gpCFXDqvr(gfQUgrrWs57?U zUIQ`bmAqBblcMC?#gjs`vAm4M7(^so!pG`ex9ho1j6@EKT=0<m;-FIngYwjVm(tM8 z>%ll`1<L=(EV;Do2`pIj_2Ti+XHOy(=vGzooP?kx$(sNxK-9nC%XCo3Pf?u1<wf8l zeafvJev!tm(G6O-_xdb42h&WLYt%2g9XKO9vonnQ>h?mFV|;j>0LO|)$5Dij-GOyD z?h#QqOv|t*w7ScWd;T;(d@|}bTvHiKAUhKgzYT42h+`adY&+yj(_fmduWD(qqn0>T zrS;Ikz0=i7?N97$=Ek6JiychB`4^Ht7ETOO?&5E<2vQf9^a0_0rT%q(hSsZu?Ge8U zBziGb_MRj-+#r^+#b;$<u=2q9JgfmhOO3EWuWpi9W7-(%gfW(c)E({U|23B`T%hVF zTESDg1}!s?z1<a_JUotU$@pw_(ohvZRq7a6-+gc3^?0)1OJiW%=@$NlB3oP#c$>i2 z{yb#)r*7IYh7T-P#Xl!HPd)F-bh6tOA_@`(@T_yGYBl*Z#ZK1-CM}m)^mPFMy4Z|Z zoIHh0i(K<+rbaMj{&{m$A(t4|oY)B<+m}IA+}O9$55ikT0}es4eVrpiIt<wlRy^Lx zSwOz=ehq(58<``SGG-&wgBvu=KnY?-;xQWM<P_r%!d<@sn6H}iT_XngFA(YsplSTc zfmQ^<L-XhmXnUnqAr<21+7be4N@kdl<p;d}>sgBybxlA@>87H|Z*d#e29|?^d0R(L zrO+*`9#)eBn}V?n>75W{fUK-l8Q<kd40JRL&CKipc@-}}vZ<OJ>;cp*P;Z(N1F>Lr zKJ!Toxi{>AY}*b9a~}zFjUNMhVb2nN#VUszi{@qr&cPzP?*f{yZejk?eo(nh^9p^4 z4;D5>l>h52^L-YXKH@2u=5RH(uf+uZjl)_ot;_$FNqkk{6{(gB0-hs`&-P>6x&=+$ z!MrwSQHQ37{|8eT)Az@%qmPF)RDlRm;4umc5pm}C42=iyb4z+k#TF*|y?nnIm3#Ru zORc{cP?u6EhJ1N5?{_8y{|;&}Si?yya1#fUx4*s#h>nD^@1_g{gg2mg!e{deO2=DL z^bpGCx=;Qc#kDL$7V0`LwZo{S`%qO#GAK!mTS_`9$p3$d2lmXcj&+(^fZyR1#@8Nz zku!38gDZaTS%E;(k1<CKNPl!LDMN2|2LV7BdFMc6_quxiP6+MUM7NV+0;>B6P)nDY zT4h$4DpQqy7=k>*w?&DZf{tCJE7w*7Oz9(=w1FA_o@2}4jXBPEs?a&E`eeh{)dc2- zVx|_%mAu0KsBL+(7ayblAkiHrJM>J1S)}q!%E4RoOg*uUFMTOav-G?s7T^3*V83xk zID#Q5IdNYds#GO!sEpdWobQU}f?v`@It$#kpGe&x$b(MTf1jLet2_O6=`(|nfwZG3 zhh<B%*Pz_y;?5*J&#L8GzE+EgF)lz8Zg9m}+Q&xOTE{m&7V4zb>Sy3Y$g@L``>AC5 zscg>(xTZVYaA6_7)n~Sr;O9s$BB_T=gHp@o7cb`;s!}UESttq^&3;@2H3~E(7Sssm zT~r<h*RQDQ$`ANX8XUORKu)mP-ic+?o${_;ab2)Ql+rh;uOxg6a}Tkiw+pHlTm<Pl z!<8I(0vPtMrawtDT0S6h5|1b~3b<7SeqN1^2(5?LpbBk4_|zz--QAr>fUWva58xNY zXvs0gy6`|RXh3m}aNoH7ZPbwVDEUSr>`ElE2FOJTEbRETZ0^Y<Vq!gO&WgM=2F6>n zm2~h<=jHy<A(;O-$K119i{0oJ{C#O08Cf3On<X17a{EV;IQt*WhS7P?bv_GAOIv36 zcS3p1c1AyhLuY?iZKuT2`W%B+0VIbrG@&p4un>fbm65|U;xw$GCkjemmO~U8D}Tb< zr<AeKeJEW03J&z*)85zDtG2APmyz*#y|P>}EhZV7wY?D85mGeJtEA!PgXd3WQ#eZQ z-s>*<;9Q3rQGTo;{4f7>Ru%L}-4?~*K1x;bFDeQF41OAln*R~eRXC+RAay8xxJgtu zzdMr$mZ8A?1twsZQQV0Lc6pPL_($@P^w})4+5zckmS4Bf24*Of?Rkk!SSMYf8ZIYH zwaF#E6nau$!=KcRB$=dBP!}A?+QY0C=Eqaer?OME%k+b^iT9eFWHE2B&tpV9f+wxN z6j~L~lTLf>RrF>J(Lzy(nZ!6^HY6+Vp@}ig>D|Ex0AkJfIkIvcoJ#M`CX3wqP<~u8 ziNorJd^()TgB@9JnKdVYT7TW~C>jfC690}ehnD|z2s|ao7q3`npf~^(fDWr=s&N<{ z#sc!ahyjq>QqOF<#d49y%Nsj^uU6m5ebt&;{2CueuY*j-l`y}gR{$S5CELLXTM{MA z-<~`E8qF|%!_^OD{$d6psQ+eCmL|fp2SH5gV0aNHE7PXFW$~V2%$2jmn3}C0yFHTf zs~rJM^VYpJog|kBqmm-P7j+UB0f#%u_Of@eDX#)JnLPj1ALBa?GafV8kF#q8)4M1v z!`?E<ZmAD^s=)z#!}8~;Pjz(&P(DVZ;X&L^h5uAS1o)%Y5XKN4cOOxSTnhI)nRJ6q z0$Xssc+j~f6z@4vpDAR~_zCZz@bpeJwj~fD?5a|sz<>JvQD_5O_yl*`^nrX1YDb1H zLkb7<^9#Q1*r#x0i^8|~kRP)(r;>%R7ghg*;DAHKh#RxU@kih-wA7B@bKu%!`;?tM z03n}d+xAMn*3n3s`qyh>jqtLmHTJJldIr^_d(U%<@LV+`lBu}VD2ita&tF@r<FSo$ zw6EctXs4r4T8H0Cg%G*Fn4_gRNEkVQIoO0$#x-ywq>hxl;NDABEf8}e>C(l1F!0Ne zG`Pyf6uBbkhBcz^)pK;XTz;nU40*BK2TFAJI!965?X(31uH>A<`m!0?(EwJXw|pH! zBe*IZIlI#e4~~en$IyobW2LX|<QixkacaeV(P*Vyx1V=wahFfnLW+EKUmNFXQthI@ z@GFe4h`zvRE<t`Sl$PW2%bS1)$7UUkTBzE6?sFsE(u3h!h}QA$ObH-oPL7S?asQD9 zg3)!vSVS8ejYki1#QjPHOH)!N_AlXAdHBHRI_(wECguo0KbkNS6EH=S{;uu~;`}`e zQw-gKpO3V1yyZdW(W<jcWEOQkO8IQ2Ap}W1<)1`u)<ylWuRv&`bNw6@du!!=30S;J z)XvX2RNZD71JSm5O=NM+2pDRI=jbt~X}p3f#{$z!+kVg^#J!JSTzJNVC<qzBTEkw& zAz|5*!=9;7O@R9SK()<U{f9`TNJB^!bc}Y((gm=+pmw4s8_*!<s~xn^Lr~d4<4oxE zBD(!r5YX@(XFA4RRK$(EQJr;OI8q>aPLz6B9KvYB1Q!;D0OG$s6-FK3^p3?(K?xcu z{G~cA635zgL{lv-@Gj9}=b=*JH_(hxP6F7F^l;Zhn@;qul{}KJh1oY9Yt(#x%uF|f z>uK$sXFty%#l#pH)rKvdtU?VQ@W{RbFb+td%r@M34n*ndcjxF$VneDliOdserQ5@` z2Ie)o_30a;ep-q#pRI8M4(EdD!+ZbR@v<g$B?8)M=t>@LMtS4llC8l6WbUF#9Ku5% zTZ9)2u)8Z3X!T|&A%-l;A~Br~S|FpmVZJ7ol?!l8e;?)3R>pEw14Z1v3`Sr|t-qkg zA*gQ)$WQo^u&D@UnFFMOf+Tr=d5hHrLWB!EJe=9q6BI{U18}aTiDP=0ZfdHjFB)Gi z^DZK%Zb_8Sv@ZvMgDhr7*4`OlXi>)|LT?Ye$Zhrq<O`me)2=Z71cTxHj>-Q)24LOo z45?@R>*i_A%QIPhgm_97BqI!l8N=;(E;9F`6rYAH312iK0(<SJ#B33qjoAC8wB)l{ z^r6hI#$ytxpKT;%{TBLuuv8{SvgZ%wOtuwzT6T9*Vwb`Z`&E)zn!IbAInQl$m4b7A zYlS6x1D?du@L_F>?wxdf6-7igkm1y|S~JLP6a6KCqwMu&37XK+y|S(AqHO;p+c@(# zP$V!R?tkV-!{LN1jn7!Vz;A#X-)<%BifAKu5A^ekZd$nIp-S2`!vZf^wDhmhQ#MJ( z$Fkcrs3SW_?iyrr<2b_73slWE`B*$#Xigo2k)A0e{z2%*dKRb7nD*^7Lvn|s_Vo!M zH(YS4Dd;%<_$tO|gbssr@;HS%%<|n60#Dq`X<*Jw70z@7n^&XNC<_JHpe8a+m=VXC zto6ipIByVMRZ52!y$T>*2e&-a$-CZ-L+;iv-cRiRHik^t@eyRqjm?0Z*FiqpTqWtj zN*(l@V;o-%m%8>{JdW5nW52rRqM<Q-9qObloutyl=%D+oEOVC8h2wE$^M>y^3m@Gb zw1n!)_6*KOo}_MclPpZw1ZCwWiO~^<iYi<Gm=`!S>h+h$Dg=m1P_j`)7-s$(e-!-& zUgpy@w$ZH1)mAPE72UQ~2>r{2O2+fxj=|*hvJ)%d1K@Z**E{lgEAuC&z{lx#B`G>5 z7hr}dYR^+4r1Gedg*cYjHge98BiMwfS%!-g35Fuw<WE=_x{KD$P#<kwEQbHY`#vJq z3C`vkeO*T}s-?~WAri4Oi&Rqq>?jg}4(SRe2^jQ3E1%;xm3~ta*t1F+TN?cXlg!<Y z9n)Fr@E$0VptV4xxO}@bi#*#lpS8mq=JTYAHj}yPL@SoS<~GMR>)JlCEpto-dpZ+$ z5kbHH6WL2km2b7w7q2i4GN;`N38IT9Max@|CYrbNjJ~O)i7-SG#cP9VBWmESAfY+Y zFfgzUA?j|372qwdd7l5U2s2RY#~qTqkM&$}4ETQ5`O3y{hjn07pZJ=<_G(kHuUoV; zwEuc0Zo@(&YL*Qv{Nq+LHO&MG(*$%-e{<KWDcz#G`nDc}Lpex2CP|gle_W9yw{&ss zEB*Ag(%e&uAA?wMOH(!i*BheWHa@8Yc+*OZ#vgb#FFD-AWzvMR9e)}ki(>w&$XtC+ zu9pd5sepx?vY4df`Ti{|!8&3*UBmZ$T0B=%Y;(m4GhGYARP>B`2FIk}pkLNI@fIFn zUdFXwVDy&&e6Uuu1rG_4F%iDQR%AILFYbb!O<HzlZ=uBY+R4Q%5ab7mgQr3U3cc3G zpfig1P6YbG$^$=RbbWj?>+ueKwIv;M8nm3b`?K=}T*Tk@`ozRB2n(#+GHAd9%$1UM zgK<z|=PsKzUEUXF1^?-AK4DJ!n^U=wLZx3YLx<(AI;ZYyVjX+x6fg}b_0nI#1EBHS z{aA?7Or?OjAuIQeRs6B0q7FZl<ywW&pz-p_or_K<>$=*|3-O&i*#((S93J<LpU{<z zxp$dedT3yr?PTbBMYw}#(Qg$gTJbnCPsbM2%c#1K-S-?AJ(~5sN;#4Q9;RJ7q8=kO z7As-V{(^Qh0w|i3${@gE;WR|Q8*EdHGB(aXT?@R6KpU6^5LGOTf#;e}J4j}YFQXu) zVDc;mnRvgqA(RD6LIr>FS~w9g#DhllJ_R~3CA~X<>&9ed0fmFf1YQ+6ts6PiQZvD3 z<`e7~qt-0MK;)>ONy=!QtrP3Td6z@xHJahW*zgub*Hxc!)0DtqA!5<JTzM^suCHJ! z*u(UCeTX09<k{zn{dqW7)aBzpBMSvaM9aBtDo(-zckdPotB&nuZTf|FEanUfTx8`q z6d%<Y{<7WN!evxuQXWYRY9KQnE2K3en%L<^D-|il(d3W}@KM^sr;{9qPj*Bmfx|It zMjUDpF~#k>zw^;qB$TC0oE0ukT&iI;9&z@;rW5+QxLbd4xp^&8U^elClDAsFC7JKb zH;5~eIT$YtOL%+6ye-MUeyBzoXPYauT~m0Xv#g5f4m|O>gTxY3DktgK=TI&S;4dsI zBm-W_s4G3V_*zc1Rsq<yNc9!Wu003?59R@Y9iA(+pXO32#KdN)VsvDUq$S<o&eOHk zDGlwl3b?Kj^}MwUTa4(^+OygD-I;K4H&_2mnf`TKJ<8kq>%3<*0xZRT(q|+jp~G`s zQ4<~q>X_u~GuHlee;<eJ5hi#U${JSbbfSQ--f1lxl*qRE)7UWPiSqZ+h*)1-*M?Pr zX{uV^1||8(b>ayCFVVDQ(Db~?-0D%gwWJJ}6_sT<3K8xLP;j1@g9YHM2_+<>2%do4 zw1}(iAhWC*Z02I7Krsg3#3#c>@mm!m)9-j$91rF~dJSt7(0)^%)*uR-754_pprwk? z%z02-nVOEfO;ki`S!==BtRdsVEB&;lY;ZFep4*I}=-^S{?+mSFcHZQIN5mZ`&0f!= zbtxvnHgLsvd%akGCJl!QkVUYOL5=J082%6VgIF?|hh%G{(!V{IPBzH(ylXJ9GbL@0 zRD#fF$>N7ygVwe#LVwxlAN1oIzyFh;CBP<>2uM>U!p3yFD7@QI==N9wfMu!w)Oy8D zwb!$J*#b~Z=2e|jdAvO91_r)zkYCYmF#n@%B5=Q1)#(FIo9BZyBbE+A1*|&(rv^+} zqdYPyKXV!O%P5ejQh!vtswqTF&mls#)@OomxL8v}HPEc@(q$}AXWFq9aZJr06ZT(0 zmt34m5F$NJ)_q6mbBdx9yXs2BhM#tY$5$D{<+|dUem3&G=#_-z3K%wSgHEuCaONy= zAFdT()pR7g-FEyyv+qNo40_q`M$kwYg$8WduuNY(&~?I@U8@N`&5#~QRqk>nm)K#0 zRy~szwxUBNufEl*FQ7*zz=DEXr%5uyB?JNBm_>)b5@K<fEH5n0^qWxs!47?I85rDu znw-PE+I%WHrieO3lh=}c+EDMqSOp?TVIbDt@q|t>RW9uEf*7skZ^&2XsIn&|iXcZW zY@*&{nRYj+D`l&Ti0T#JYD*?7v9a}v0Dp4fQ1s!u&rJL)l9Yh`Aoe%gQ51vZq)Bl~ zKJHG2y@;UCUSS0>B2qWV@Xrb2%0hF5PI79F{)uojMs$VOGHbur*rrgPP=1!@d80Gm z91!y-ho{J$sveEF)M2E}`v_tHhQe&b4r_S}*zjicv5Ce0t&+_Q&mP|Yi;banOB?!d z=Yvh7qvy0;+;zpVJ)pAOkQ#?N8Y!(sVIls}s}H7dRoBjn(`6>L$B%>75zWP*xLcA= zP?KG+J1S(xga)N!C(FC?m{5r+7H6v!jzbC09#g*Bb2s8EtdI^SowENJNOZmi8Z$k{ zko-h{3BIe@C?Zto^TI9vER2t*FgfGCc+MD_RiFmYkk<>$t!Svo^KwSVzIkbAajVcA z9Xs)x<IQ2~C(G&%cG`T>?TsD!eRA~M=gv0;!C>|zRsz2p9)lJld?>RCik9no_!pNe zksTfis)R{c%(J0`ED%0?D(u1@Jwt7!%E0Z<4v+dB*4!2^&*4o=-72a!@N_&5W4m*f z<=}lvwX)-4@u{C256+I&HFqnN>Ia?5Kp_=n|BJ7+Jb>WNES|C>w7P!lx;6Sbnmo~` z^4^uPz(7JHeA_WNt6j!!uT1)4Tx3bBC>{SHYaTlXzQnTVD2Zk4gBZ4-nydI^HszYl z-GrIDv@@KW4Nex>kMe=q4!G+x#6Msp_jXM0Qc&_^4kWe|4HO7c`7s?^M)`yh@Vp3{ zLkw3$I`~tz#Z_OErej8pJ*SemTW~;ZNOdBmw{%ymq=7}g+FnyrR5fZHuW`%2ZpVDJ zU|5oXSvbOJW(a&0p1%JHvFUA~VXZ(7;llCa86-p}9$q;1-9k~eaIK#r*W`a;43cm% zzbk3{#~Ex100V}8We1PFeQCzSXk{@v!20{ZX-don^#nO9m+C8x7C<E_C&xGIV5VmI zKkufRRhnyWKvgyz&0;?Azx8+gHUlU25~}So3z58^xZBdSUJ*TdYW$sit^oMl+(mQ) zHJe)>QPndBwKL&kTFVFwGREbk@Ua;KnzisP6oZpC0bP7?Y;q^_f2l{uB2+M>h(4(} z$O!jDihq3IwJEJtgWNhO1CerH4R;mBWgCxgZc?Bg2K#d^JBrRKk~Ij;!=g_;;>cAf z=J#^{SN0aw8zHNYE}b*xl6a_<{w{w@I)P*x&JoV>XS^eleRaHc0gF;nG&Qu(lNh7Q zbKPz$kavA{GO2a6{AAWF*2v26z35==BnFm0YhA0<OCWscB|`Nh@uR#y4;z!fcBiQ6 zHp)8<MJOu^G;-7Xg|Ixgq0!K?*%Zo0?BIG~J>R0&f%Sp?N&=l!{qs(Mj2r@IV4H$n z6#o+Rl4Pp5ly`L^X~%e`nP&T9G(yI-E5BD(Bw-g?PjHTuG-)yft#xLuX)mhP?e&3) zNO&udl{@c=K{>WRQr|__dggNVCou2IEq?NME(_0{mLL4C2Iw)sC=u=>Lsokw6qE~j zD(hGBotT;_#7R<LDc9HGK*+@I!k+(%aD_DDUKyFujg*}4bFwvJ@$-u?uQ#Fs&3>1F z^@PzaEnO1f<m-hhM;Bu0s9&lRHGO#(Q_Dd$3uT`~c|wgfkTKEOi`*|ZxS)adwzjN| zoGD>2XinMI*-rUrHu7;n(b-ElPTlMSVx#t&gz;*dGVI-vb$fPjXAfgwRSe|E>SDlF zeuV=l$XBSr+v}l4s0?~ARJ2L{vk9)EO&QnopYBp#yZ~;g(Z;TMB41`_=U7$?)W13M zV3d}nV42?XC$IJV|46CUt2=)_yosq(-5%WK2R`#z7AuHl*C8N-B#7LwkFAL>g*0Ln zavN-?7O%eh#6OTNDGav2zS4Ya2y5wu#!);Ll25ZOQwk}DHmY^eSMso{4^8Vn|I$wQ zV|z-B{Ccox$Opmuq)l3WF4jgEiapufmqj3cHMyN+%$C1hH)w^uJeWv|L`6tJ6B7K6 zXO(tlb}0Ht$A^H}oi0iTu60(ge_W6R=lGBl;C?^xF|$3W3)dpRS1qZy7Y5_=cMrhO zyP<29VAXj$Vj3j3wsni5ASR9w5F9lH`mV$O4ER8)|E4q13wmqI{SZPsvoZvD$_{ik z3V|&3qXcC1yNJRMrdx0N3hiV%UTKfS#eveF6$HlBR`0NN7_8egeQDlyzcmNA|KnYe ze#&JP3w;ql*rj_H6cjCEDz&2son+Fc+MuU{g#%djjXZ3fy~&Z{S9!K6w(WD4fu@)0 zcuxzbrLsq@T`c_Hi2n>cd*U{Ux}<LU1%dgAg`sBVB;$PhE5`e6@xDWZ^BmQ3Bh?Dr zdBXY@Pg|5WV}I*5<N1psY<D^n;FubHQHZe5NTiaBwrZ6_WSx9%)NoZq{eO)j!gdXL zc$(EIV7_ZxrYwz?PHOjv0DoqK(e%QL^3x{JG)#z{Td}wxSUW@&UQlT4mB4$n)sgpn zfW|K4#YR`nJUIGR;>}{lcAf^yQCj@Di%0eA1xuam9C^UsP;H9wYa7vKuCJ~ffB-#Z z{?uh1m&*m4(^-DMWLc{Pzu!Z*35v9UP8bpxxmGY9Y4B7-LA;Gd-9t>##g9Omta$5m z&6&4yyavLMZRz@te<Utwc1?2B2a0NiiB^UaY2P@m_FY>*af<5U)c+P*t9cjNeNa<T z)JNesx3s+jv^|OLb1Xi70ZaBr^FO62^?YwZSe}e{-H8kr{VaNSI{^}Jiso~Omke}< zLs8<2(H`{F)%Mc@)J~oz1a8;y{xPT&g{cy9NNW1|SBDrjV6#(#AvDGh3Ny%D21w)j z8f8HBMFBsIVJT&jW9^O;GA-V{1q2b2huGq*;UF&p-Ao|u`Apdydy>8-W~vs)&9ty( z@}h1g(Z3!&mE<=5XmCR#@4Q%s(3I)HdclT=0R4e9rSW(W6KBMq-z@kmoe~Zl35P{Z zj?kZ$&%D5=;^ik#kn;%`6meNG;vEqr4C4Qv)13$3$#S10K`xN=PVdEVEY|axN#T{m z3h=-3&b+&H?r%85p#3b<X%ks1jH7JOv!L^0O4y3KRZtYqviCVV1ato{B*_K-4>>{L z{Rew%Giu5J;#Ni&;%VJ*E~Vkqn~&9i(wkI1cS#Ks<X%=S!*9rTJt~))Q2@qdeeUrT zOGw85b;(0SsSWi(e)@=K_T+uo70?*3e{XLje%qa#9xoGO$==1r%xw$RD`q>0MBQX$ zw@8(^B{88bCkQ(r2M}}D(^5XXVShq7U(c8iX^s-YgR6z1EMW6krf3coNT%tdky-TK zsIk6;KtDB~MY&q;^w5)mqrd^qrvJ`o8%KN2*m(aYE$Ke{j~j%GXo~lPHPU75AN<xb zcF&?pYN)`Fzo>-;&tKs{Xf*{|g`a$ifE)|&9_Yw(XdHMX8^O6bn;m2c!l&v-cw)<} za2rY%xuX~sQ1v4Jb<7hBaOFnHO`#BL9s;!U+xRC<&C_Nxnx1r0|B(Tgm1_&j%Ljm) zA%twvxwy+b<WiRH$&OtL(lw!4UIsB_6aF3L;med0sr*MbJO}Nls?TtC+hU<`=sB_0 zgb%x4zS3=CGRn*+nx!!`jJg*U6;0luAZ%s_kx<>*U+IQGw7J_2Z|B`}B!iL0%S}^^ zr|%`oRSM~n&Aou3(hp<)!yQ|;RWSXPwS#Y(6e$j(J%jD`#zkiEkB)E0XF64j&No1# ze{8qdPQWk}<~U|D-UoDX{Vp2`<wom$y#!G5MT`hc7}(u;SO`1_Ts#&!x~4ca=W9iJ z%7~Udien%D#_`f4fi#F=NQ&T|O&|t_lb5Xp0`GTH%i(*#1|V`Gqp{UeANBH83usk- zA(1Fnoc}Q4TpzgNpXSbZbncSorYe;lxH$?2XADkqIS5NDb`6RLdFheBz*@34^6x3C zE$jtEw_(oOv?1yV0g%(E_-o(Xum6+#V8}UAXUu33pnEeGqblJX<D0@*=|$zB?0CrK zJ$<^V<)?js=U<8O4Qs;J%~56DitF3v%#UWO+GjWigiB2y_1SYV!oCuOP%^|xA9ie( zA?j=O@ci3LkE8ft=I>k@<%UL_lYUp2#`((_(&Z1&{O|yw<^ob6mou`^xKYq^<odlx zG}VLF7yn^#XyopfRb#{?k0_hh_`!ge3kU=%l!yp=A<jA%FCM@P$<h_Y3bCB39xqwf z9q+3IHRW8pg_MUPlNqIN(kTp7eX8m1x8}u|lOCK|Vq|-akM3<Px05otOI4%~6MX^Q zbFVm7I1)h^dW^imHtczZoyby9|9Mk^Lx-YnU0EkdND8PMJa=qvI-bFr%9lbGoJ@L| zmZD}>WY0wo`KofQHa<8+=G<_P9%t5{@Nd$KoO26UswssdHR9PY9}EFJX%Ds!THGD$ zZnKMuIZK%NvJT+GhEj97-zH@OE7XC1pNhq3!&_i={Sh^d2I5H*Ya^APi1^xO3j`uD zZmOi6+E5?^=nW%ik1CRGdOao%giXw3?irkz;8nwoi1KZ{=T+~cg&@BP<GxajbBAa) z{zaz}K;`TtdyyR@t5xVPRsNk;*$%#zsX{jo3$1Eo-l$X3BBwhU^@eo#=VO58yn86R z&F6Cbh9J+YnOnw=nzL<1l^@9+%+YKm7qFeB2Pu7(&Loq`t_X%BD>=@P*hv&CySK;v zrm5;`T#h#1CNOpCR%Ht&kVl>$3m9UF8CzHaCl_^a{PYj|R_*0X3*o65M{zI{)=7LP z*&tTOn?dI?HC@t5%KziO+PMnyJw6W`z0@)CH_Kon26if{Oa5-1dH&El?4!@bEHnG$ zFciUy!~bxk7nZ$%6*mH|^5a5(lQ4kDtL|BP9>m~^%<~%{CW3|Q7EP_{DE-v>LS3#C zy6473bPVdVxE5C545a>IZ4@b}xz<`Y<k+t$)W<>$lUhuzJJXN^Uc^7~Ve)*f3Pq-n zG_881y}9QDHubzPQb-)1W?1i!JeYx2q`N-EK=TA3GQrDE6WBLLZC=hn+W^sNQ6aPh zZtORz<-{I^3k0roXvf7{XUvU}JpYY}R=p(>M6p)__BcOkPSM{vZ-0|wS77Z=NGxb- zSybrf>TZldl>OSB1ps^4pj&>bM!+DR##QBy_M2LLpc`Gj<ke;$Jt{*188U71Khe&- zBml4e@U&uoV!W752x<XjK-@z~H&7LY8immYw4Z3<N^RD8@WMv$E~qTrE?R2GG`NO1 zV-o&OYZPr$DV5p8CCQ`-Hd}Bh7gS>o<hVs!56CpH>i*9a{}QdqSGa^is6H!L3cKKm zk??T^Iv(5WyZ^SKS`le0`iGoh7q&jd%ir$5T!Fq8j6+F$l2v>^Sr0ZN5D^f^98%4l zc%DldPLwdS@LIQ7!u|*X-kbB4PWw{&QC~OR)Sdu9m8Km8XtCZ-1qRufK+fH$$9!b0 z0=JTV)s$L&-^#Wl9}|2hW6Q(Z|L%7j7<N3@skD0ux=$_sx@jZ?MrkE8u@%_Tzr{l8 z|I21-+6M~DCT};>kefF`@pj)LmHVk@GP);IO*Q~Q=H5**1-?5N9anAe`3IgG&@??l znUheaiCWZN<YHTx(gp=~@a6J5@(d_emS%x~n`Hb#aWkD?3yL`f)#%sR`ibPs27Oqv zZzn}yrNlrnD!`*h)`|%0!jE)d*dZ-|hP7a}YmF!tf+9J9gz=WboUU0_9GcOFFdBes zJW)8py#u1Wy=`WG;VQ{ZQh~{Q<>&YGcK^$x*}=gXv&=F)MESh|P2v7R44^RavV?zy z;_Q_cVs@9`BI~Y||1nnH#7Ahb?YW2)pZ9(o$qfNAo-i_lwJ(Mg<F>37P!<LzO0h%A z=2cU3SuPA}fg6D>%)a{bidOQ%o9kj61_3J#7$YVxUd%n!23x4;=*U`n$w8^bTB?Ax z61(B1r>SV9>53K2Q^oXVn?73c#td$2rvy)78Q-ja+zCxivM|fi#4h{FRunAC10Ei! z;Y(Y+;~}F(r-49aw!CX{B_C5FCsc*T{DhBbNCWU*bL?f4y3Q?CjY~zwKOJ-eWGI2j zP*SvEig{;rhum8fE(lGHlJ&S3(bN~7?=x#7077>h*i(?Uy8kaS5@QYtC3}8k-X#cD z`TPg5`jSD0;b8<49WwFoO9V{3cyMZ+pJ2MAT%=)KHDV*_VwJ1P%tGaAcJ-KJSwJzs z;gw6!EK$nBFHXcOZdfqubG!o29MGRWPD!nsx;Qbgusc~hmA#c7r>gPgKNJYs0o>4T z*a^)SBz0=vCPg>s&^Y(@Oi5BEk~ti*d*1DzWy(@`*18p<*@@+R56Ip{@Sp>l^i(5~ z+nF<)Y`iB^FF<NS)&!rOx57c?94T5}{_HL&P1ze7foc<^^wK+uIF^AFLH{*li1m?4 z4>n2Uy2ku6i&WFO&<=#lp}P>M>xm8q=cG5bLu5}}0dHnF4LNYh^x~OI$HHR+uB_#g ztbCOG2Bf=$+IhJ58$sxVg7U_O%RoZ{Ht&PGWTJpin_`RglbbAP^qhP#uk41UsR{r7 z3o}~y!Gjx~HYdDo<^IJnZQj+JXnAiSGabAMyo$8%doeQ*#+o`Vb19CR#>h4#(CKt{ z{@5%+WZ)8#d~}DoGcfaoE0fflCnrCREGH5K;`JUPpP(g7YTcErIsHj6Xk0_;U$c%z zGTo`nGiq7oqDF>D@Q;7nKx5v9o+uczRt@~%Fb1RgOgLcfo*VXyT=<|d981>6=pY|% zfvv1z)p9Yp0rYQPzfnjo-C;NrlPy9t#vYQn<?CV#|2TYUim~L}*{Wr-a^$pmyn5G# z<S#H0*VEw;*#IHG#T(O_@W**Z&T-ZbtEgd52X&@7sjN8RN$9wfF3?ho;^e-l!I$p! z)(D3q`z`elWMn~bKG;~osXpmudCut-tPL+AKA=citxLv8a@dRZ_e?tZSZ6H#pG*6_ zKPKdE45;(<m$y>tk2w)Y$gqW7h9avy9S9sBw@s}YcYj`Bgl<nxAV!9xV!ISX_QAix z7g6~9Yl9Q=Zr)cAQh8$9BD_AO@0_Q$8+ZuI&OVAkb(>rFtm<0BlS`xH5vs(BN{C%) z^35SD^_GZoNUUWp>rC$V!7G<P7)zMyAN4QV3`OdXP2Z5`!oelmb2jTz{S^>@zuVY7 zXpD$B3I!tQ`H<L)mR|c1HM3WbR476T(7ZvG1CXm%=^aHb)oMX;1$ZkhBr>Sy6cHjC zveRZ_;v)?S-@JHmeD}hn&Cl#RZA2Y5mip|MY6bOGxj;fb=(|Zv_E=zz@Wku8%Ezso zq2_tddoy9+6e$zVIa2ki6H#;_J!5QP+!vW-rmB_8n9G{59v`z3b$|>$%V#xvspbJ% zd-R_)a%R;<*_P}BSx1<^{MX1=epLPF$MD@_hG;yhlY7`Iw6*>ASWU14Ic65_<TwOl z2#~3eKk)<<9v8IRm_?}IWro*imizL#TL#<?28UbeuCK=R2Ooyrh9cT3^~c3mDJ>G4 z(hRZa7v`rKhrPC|9wnWw_-y$tgiR<`95|Zw!~TEZL;BPP&emcVTjEQjqW%BgLeT`K ziB;q0P%JBmBkCreI!=Llu>@F(%Dr6HRx8T(NCRAX11TGOXR>Y^H|u8=5BiEPrigD4 zztSaA+`d0i|C%rGm9=+Kez?R1;t~>*N~;NA4wfxeJueJc?vsu0l)iRFA(S7;=z#_b z<6*)NygP5F=)c(PL4tWzba|2GRjN^Ns!6KP-cL*`$<s?Cxst@IIy(D%0bK3VvR0mz zCL_7W6=<apXZixX`KUc}M-6E1Cq&!Cx(5%4Zq>j~<YPVr)Gacg=g~hpNFh=SNg;>u z{r&}Xu;Zh|UZ+$e9}d_rfs^=lDcTj1|7^6;*<gtG1V^OHX0*AapMYXwu2II`nu^T= z_pPH4vm?2omI8lEIx0=w#V=?u7qxNtWw5agfC4OBKkkyN7IMEn{6FyW|A7Jdb8Dv; zaLqAwJtV>gZ73g0L=+;a=;HgGu7Za06pvs{&xfC4h>KP&io93X>3Q_yDPWoDgFvX2 zB~q=aULw|a7+ghE22yOOVc#4oTt_9AgdW@$sbK*oa3}fXKL4WJFp-8HWG;Q_sT(tK zXMPrtoiQBY*HZJW;8Vp>f2?^VQx3Au(jaL_KQ$FWD(#yq(;I&S6_S@D2~f4Xg+o$L z4bBwWq8vfNJx?^ZU?$`-TNx1N%RMO)0bZb$>6lL7D*LBq%*9OfbHsEj)h~+mQ55k( z0SS{45E7xk8LbY3Pct)dkxx{ss-lS7%Pwlad|V#!wb3VPgPxa7qL=tr%B*PaI#!F7 z|46oa^9^h>n_YHb<ziYg8c#Z07YyPgcZl6MWFDp4pKS_To@v}w)qdLNC+G;q{{w^V zNv_ZZUpPS7CG6pMiEBa3-$e-Z;m@jSi4&)ey+pR6p-?C-**MldH0lr9^uMfw#!YhW z_Y3%-IJh9XF-z?RKEFkRSX!^!-*V$k!R;dHo%QJg7+~db*|5dRRWth;FhlQyyZ8=% zy)5^K-RgmS!zZuCB@vcB6?Blj(`M?EB5M^6cAR{^y>dj+G>4Q2sG;M}(LO`M#FzJ& z3hT}4^+kOcHYUq!bHeEpWLhtl+~mSKcHCpUP}@huzaiVp;5t;l*1@|HQHEoeV;uMi zM6q%!cJbld_H4l{))R{-v#ChN1;zt}q8YblsBo;UiP^jpMw2$1ae^l4#r3G2Ajl7f zqL9F&5#*%>9$?3`kn!zdi`9&n<vkYv$*v3ADlik8{9NMJuWmY!m+AWSF?w75r6qh{ zQC+}w-${Z+iw;wGUdvWdayLYj9nPOAq%_O7rbCDC@*bNfHI(?iJQmUR?WzyR-4C8g zhHUxGyHYJ`_&lEXk`FSqGsUzSmgfYfc*6+#wMwR7V8kem#jAM&j}rSPkSGN+t(Mgt zt}i|{&9<9YRhn+W4fdK&5uKL4O~lhWMHUc=TM!ku0(MKXum^;18i^{t;=pM&y4Vjr z`2-*r7=QOBy?^Mk0_Hwse7=gThhlYMIqW+_gPgU$Y3il{eU1by_Pn{Z)M7<(b&}U2 zL3k5yruE#T1CNm~@5~K4;}6<#xJ9apI6G471Lj!1SBLVIZ#~<s>+TuTsrDEgRnrh{ z$G78YI(C>=816V^-UJL#;e-F{t8hgzkKMJg@QgL>gx!A*-A$GK(n}h585qCsWeeSl z2$Z>=y)HPt@t)=4GJj(FCOH@3DcG!z<H;v_6{UXJ7g(4!vH!8+JT$o6utAv<_oZ8} zt}*}(se+$x8)(Tr3jvz>OJc8k^mv+K8!~|I+`c1$!EHo%2-z^Sss!}w+U(o~)ujCF zvpbT(>I`iU-iw>&vA_epkjt^$b8{|ut-xw&5D#5-7mLQo@qL{vr)R#Fm;=tfSC?T* zcbHb#ald}hfXUQ4M302jC~e<3`*HrJz9#QbYiULvK{Gf;zn|g+MjGXik|E-vpP%%E zM);;RxSaLE`Sa@(_hoMJCWF~SPh?X{p8b*O%LJTw4MH11Ku^PVQ=l)X>heWP0-v6@ z#CvvM%vth`xkvv;m>3bIdAi_Twl-P1M)oK%LwX!?y*iL(9tCy?BLtl_NM_IxUcDIt zB0<sMoDufTU1!E$iv#)KfJJjRxj-v*_*xC9auy1XY70z)Ucf9qt^9KDwa>%!xTTKP zVuK;t#b)jh)wPdJQXjGQg`kB}mq9YD52B2TiX1{zfT4Hp?%LcM!WTvFF_Ke#v!4(@ zG%0*Af}<m|`pT#RmB=lryKcc<q2+5Nkv_V~F|p(*rF3Rye%^AbM7Jt*{|SyuCO>?D znfF6U8-Y?J;fcKhtz6`OSDC-^-pJy$A-EEZhqCHhJO|da4Vv}4t56y-xLg(5(IZBQ zv}O*qN5BWMt9a%JV*d&Y^a;uSlJ|0T25dg@zh)&wC^G`oF}I@vEYHHEOl>#SVG}~3 z)HZ?}u4#z5bDH!rr4lhbqsWfrmX=4lPRYhQFYK2KrsBX+lar(3&2c1<=((3>agyvo z*w0&0H-O&QJANs!ye*gB3Vo+=7{`I{7xqwa#7q6HL*mV#2{10{7(N~0ihKk@|I(1B z49$4|*9nQkR76*gph@E~S5%N|56{;e5LZE^7ZEB8ZS$ymG@t+Q3}in-h3Zuk9op3A z%A0ndfNMDvo#@(?>JL7brZZua952+OcZeGeZxUMdd~70TbKk>K*5G1%Av&R-oBqC! zGxuC30E40@OYWQKNCphRy6kZ`gw31KG$3|j$bj8|o*6Yj!Y#*FRa6ngTW+aPX%AYY zXRLHvh4u8n_=R&8RGVjgWyTM{-2Ew#?iw;^^7_tC^#L-SR5|ZQf%kP>Re-5;LIIan zhs=&Oq(M~BOXd+Hc(W^_{##cOB=5jBAcm=wL?~UFHw<1{wwI!dyR+E5E-Kn50Q{OV zh$10uK~agdGg1Do&gbC|bnZeutc)^ghV|9P1*&L2#`qb1s!G*uJ5z-<LcLqg3<Pcv z?in9EiAyC}WRcLjJw_gtl?FM=o#c24p8R22Bl#p@W+W{hJp<EBaC>tlB|ddba$6-p zC>h@grHyBVZ~_;jVT7(A;2v)a{cQ!%g=E$X&dz0k&IR~efDx75Gl&ri5@y^Y`2Zl8 z;*A3<gNVpTvuWKXwS6l8jQ4^s3s~uub^Pcn#rnF3gtWqe4h?1eal9`jYF~J7xcbPQ z4Gnj)L6KAc)8U&}0`k(9`%w_!e9W$EocUoxc7+=)8a2RoxE4zE(Xb}i%>uaUik<>! z%<&QqiHw$Sap>0BjZjK>@F&Y(z2nbbhw`0y27T?9bxPGxTLB0=9ke`XDz_J_k-Q?8 zNm*@CAC5L~e9OW`7}%rlE64pS$HnTfJK%(z=X0y7{_QcC6{bW@GMt-faS?wcL5<%6 zh%BoBGJavq^m00XOtYaf8nEr&E^C1_%!a@0i)&m0`bA(Ff%~Zu5rQk2Mk$i+Ry)Ni z2|TU1Es2!UonM1`=b{CFA`>6Cs^riTjyb0GMLp>$gbaEV@^w?@B&6JYYRgYKc7V?q z6dMldEhqt|uXk!jH96g0GjxzoqI-kUDo`dXv+c`X3msF-6i-R%IRjx;dk3g1F}Q(( zuNgs4wTLmN**4(K=DWw^>HQrUC`o|(ZhV1d!Qw0D1WPw^1rFd|3p!+reK~P$N`4`G z_L25wP@mk5!g$!LbIZ&MTAl{YCKPvC#JQcSE`Taj{q4uCkyW1Fsqsj4Hzl;X&CffW z{cjag-5v1RJU6+5j*&2MZ@Ah+xqrzV$t{}S)MewNHZnpeg9}gRFZ0LV7vD7)Tgpm^ zP+cDL(u4!bB%-sNX$#rqcPCkt*+~R)hBZ-tv{jG&`#=FrO_BZ2O$9T;644qAVh<uN zVl_}VmcD%UQ6DDNKhE~_pl14N9_q_I(*3aa@UfNYg=H!%ZDaP7C(|x?4Q9~9ygLJz z0LxG{#Dsm>jt|rz=_iNPTU#Y9#FgviW{}b{Jo8r(xTx*93S~%qerL5iRiWn-3ly7y zTs2c=ddR|$9D)hniS$Fna8T(-nKhSbkMC_hfwxpc`)9lA1AOEN(FO3ldcO$*ouZkE z`!!Yet-jy;LiVG)lL<cY35D!tvizU|sd~=k)bOxkALHmNL%_%v$28Xc--d+v;kujr z*~oL~hXuZM9Y(i#mqqNel-~TuV@EuXDcOpic|QBdhoEc}a-(sG$YiAr3Krrop$`o3 z^*K>k2jAozF;psWz7d$U>fJnC*Fi#eZ~XZsgzO5SLOius^dy2!jIXi1Bg*V>lzr_D zRccAHF4*S3$uH;y!#9iFW2HWIj9Pc}K~9)4tHhJ%M(|#AG>5ygkhs``9ti|lTM>S~ zKJ#Q#ENQn~SMxGL$UcRqWFxp$A{^+*a4`cKDOX;%l%<w)X|YHg#P77EdD`x(B-3gw zp!9K}ur5ypz_bUiRC2lPHl=-m(RWM6a?TLDs2N8CP@$>O5OO_;&K*ajkzk4<)WPn% z8NBUmLNPyS=THeNIyGkw;sXmIPNH>HwM{K6TTah{#q$4?rUCCw^ZdOkVY|}ZEh1%6 z9eeQ}9)E01J*yUER!@K~;0{o%Tv(aOaDZiM7?Q7lm2Xm+LJ#sS!^G7)2&`QMXys*S z>8d0g^_^I#XxaRHXe?aB2Z~|qKpcDqiuU;XW{a*j1w`(u$L7#lYMp3&f@SZe8D+`P zBOyh^J-KT3Xg~2X#u)>bJHwaarN=K|fvffXqPauT#J`Sg6iqzB$EUVBoM56MAi{G2 zmhG7{>)C+2v`^w7P`9ow5h0JOWAYCvMZSowrcah<R>rQ+VG4eE6<`}qd`?zJSlV-a z*nhLKu_MD~3>E+P(k_f>On!@-NUd&EYg}6>Y{sAvoZ1I5{7N79hQ*dH6vo&<$rgMY zazeJ1lMDvm@1XI)vPn1Y5^2gw_7!#I#${u>k0%{TK!Jj?bLQK9L8(b+!@r-j<o`Tx zjnu+tNJ0?PBDnv%aMIdSx^cik^2Y4OcC0v<Y)?(`p%X2^6+3u}Oa_(7Yj19mfDe*B z20>p_U9Y1`hv);dug^tHh8fo?DWlPnnqnM#+g!439a@MwY~7BQmDAmVaL!!=QC|*+ zeMO72=zi_%4>*Al>+_u*&D=r@yz=i0+30;2wI5=_Hwl`IGJ5erAWLH2G8631SbTHO z5+eKPCMC3EoSS2FE}jH^)#2jkb#9<U$s3R9mK9Rp%l<iZa^TksNZBUas~DX{|86JA z1=E)tcqRtFZNwaBJn!8-VA)rUDrV46ijoCxuZ~qND6}Wx|8xyeZ8Ni8WT(tZk02f= zwl-|ed1!-dNw1cpXwB5s$f&Pz_pN<;i}ESqQq`N1?0>E3r@$kK<7kTCEA?$e{1+VS zsgG^vKw$jwsqH)-ALHjLN+BB*`6?59bu>VA`ttnjo~1ourv|rFBn;gwH6XA6fu7!c z>vK$#`um{a=(|P$+P{)R#)%}lhYCeO7;#CW9Fv%hwgJ9Zm>uP?kM!q`QFoo!Qq5`v zs+bhHU&H3zof*vO6iPQ$>o0a4wD|xJEl&8sdE?$3;BylmKQS)#X*>p{r?G7*nDlkN zqRfPITinZJ5eimQ1vb{sf$A=A%pF%KB(WCzyp~(~RYUj=%#N-L0ofpnGkGHl1rs+J zsA;F!Ab6usqJkU(vt7o3;9qu87EiE=9pcI=auvBuji7$T9$Fkwj=QZ?GSs>Y%C5Ky zw>1?*gPA%9@9FxJIJs}dIu7+k00s%jUlOy%iu*G2Ikb|#9$qwBUNDMt)-+E|@`5N1 z`S4t{YK$S~Y#WM@>f@;I-u+0W8t>vRYsIGi9O~qltwRUTm*vJUCO*InR`X=XoeS@Y zshPJ{%5tJAaCEuNkT5|lQnthlsRv@~3f=}FKd%lX2gOs8Em~kfA%R%BDOcsa9SQq% zd!1csi`-4f2T9+6wE3Bd$q|Q!X<B~#=cFH;Pr;I=3=nxng1Q4-*kQ|pB91)>Ng4uR zdC)a<rnCMPzr6cDeaYmd_$&z9@=wYe@o)VZ4;UBi#3aig38mOptq~vX-nwQNJCn#2 zR%?z<!+;E4spTP?{UGJ{P#Lts1pDNZbdK%u?W)b99oxIw3R(8jo7BNFPL029FdYe^ zqen7^77SpuPM&;wQ6m-E(xp_r<XU4{I#1e8%m2YudG%|0x4;VtJ?-{<dzp%}Bck`p zJ7f-UC3*U!JUJR@^BP5h*G~%==Hvfl&DS}sSeQb#<OTUy9_(%WSgGIeqg>g%P)#8{ z0LSN$RH0IYN2Ep~y2cmTs_Avf@54|;v{vv1)-;cpvMdi8bONu``<wKf%u9`hGw<B| zV=~?GN(U`n^Dy5&%H0~@RO@S=l^kTEVQmtnX_}vb_H#29u%*DPcO(x}j{W${SB}au zZoSDTn{?m|KfVP#AGu4UuoHFPU1$dgJD)Npx@NB?>hWzlO)h_Sp63mJ+W^`L_*<GL z90lT0XHrWp1(an395ANcvaKA#-bIf9S42nQ7Fqd!o1YwGO=NwfnH?Dm3jdS;)e}I7 zScJ5q1^Vb5kLGaZbFGO(Kjiwj*X@YIkeJwGt+OD3)%@4L^4^SuiXj2l!GDt`{oE+r z!qV^Q*9bqOK<3EMZea9f&kCz{>OsYic8>`=)P9=N5F1AXajwmGN`?Ou1&`yGW-$yq zGO;+s2G1byT)OykTM3vUgl&yvxeJiqb?5GDmW#vp**`%nqSm1xB#dyl!fa$)s3kJi zA8E-Pxz5i()^s*@{ue7wgYA>;$r7MqO_4pgjMS^S(K<BkoDUqqKO8W2_}qE{{q>-P zLYe<h+(i0M%<R1QO$`b}5M`?<Vw|>Z;<J7V!GiFa&)_l3&}GD(SpbFuc(b95cUr;! zYC|XgX;ZN%iu@-ai`$E>u$2g))C-sC<~dd<=%Zts&qx-gGVIo=e|zQQGHV0!FU?Qa zzs#{o3&Bn6r}hEK1Vi-<cV4IEoXxnh@QqS$5J%>r!P?HnYWOm8vJVoe;5LSGprOct zs+^_=Gm_{h)L$tmH~Io?>a(Zr2)_kXwGnVez_>m4B;M{W5QATX+V(ex!QxMxa$4?E zUu#Tj9{sQD>##|FA#gH9&+V^I=QU4HsTyDH9G%mce=w!5KipU_t$oe-`I<cxP-uP8 zI1LH|^+f_#oqq&+Bn%mW4IDa|5;F304cE}3D3WiaSgun_!4(w+{TB<{B{Pmmh=78{ zn{kuJpI)~oQ7hYEpre`m|HH<J>P`S1oEX&bxI1O<?uBGa=|08KIzHY|k}Z&qWfFw! z1NLI%I#V-FuH$sjNzB)nx!~9?StTPF>cQ{yxtEBYhdz(y(+fK~JizL7P<<iR6``aK zt;LyZwJt^k#^a$rKn?f}fB%Vh^Fx@71<K7eK45W3oZ<j6K+eDO07FcKzVZKli(CBM zo`P$Yd)b=vskPumvVqII%_li3BP$H;5`crIO>Cqu2H4cJQyTmy*=yfJ&o*j}lzrvl z<Y)@SaJ>Bd;=R^1(jzn(#K+3+xTAXd-Lt{#92h+TmTDuH>aC)qEx<ln(BfRCEFqrU zRC_1pJVS=BWG7%qfqbcjG|C&_R{3l5-9fQ}B6fdQ#%mkyN-h0j@~Ul-ywS8ndSd5F zEc&e~Ci#e7qu8>x&FUq?-bGFAnj3i`B5y>2>74pe%@`L5&?wIGmK@eEm*F#G!hOPP zMfE&SpVb*Glun>%Obcmj%AZbrsiYE)-NjynGdKV)-4hS`W5^Lq{JIcQl0uhrGNAE& zY&REYli--h5de0a4arH1no3!W3g>7T*6^l4h`$?LDR!8r@Xivd@@XHPT^^9L&x-G6 zfT26sQWFc%ajTV`Wd8+x($ei7PU%)a;tG`v&3%DAIJ0EBNk8kpym6ZwfWCfgJUoLD z)it1p$BE_<xI|bHL;TlWIoRBPqqUPYlL@a2I+2sAw+W%s#&R2^rip<d-ZO@Xp}>y6 z7&AgnWsNP?c=1Upj-Cf3osy1U#ae-1+;#|H%x;=Y99RvOHcv7~HE;h-@6Siz)Q<|J zeRDHN8GL=x&T_rpX+;(9%&enwU@s~p@zF_oDtKd6vz91&PtZ>+LiM{)nw3GOrf5Yv zeZobJ{}_5Kn|6NPP!zZzl}C1PL~4of9(l2mr;7@LI}j0M4gJcdT$)k)9IgJg8ygxC zv_0)RLeA*v<Do}h8yzUaf(E;G4Lv-{e1#ye?2sleS!f481P|sFSec)7?vu$O)}3m) z&np1$@<HvAL}s}K!>H_hN#t7ZKzt`Kk{qDHb%wv>LzQx70yV^$e{1Ki!ER2x1h-&7 ziDTRc8w90GXV1a`4m;=khGrzP6;y8PiYmDY(IP@FpX8Pn4oiPk^e6v3edRhBgO-|C zpY6B6dTr7KX|HyM<gZ4L_UICsmIdjv`||VumjUYRpo-B^fM#Wu9elUgDgyz%62AEg zKUNS>(YCb$A(3?&q`x}donjO)K?lm?Iurkq3vS95mK8e8&+&uFY#Gh3UFOD@^k2NB zeAo}?Ukn30dyg?fW_Tv#?HdD%>yIiJkp4rFlh&<eBuAO+6=Gi`r`WcSByo{~BigAZ z&-|$YOlpPRBMTE*zvGP!^pBdImoanSg=evVBc8o228r~Z%=8>oLSwThZK;YBh-i1t zhKBzIu5u91cwmme$CkK`x(Rij+L$O>i}9cKP2u+Smu-E_l0xz1zLS+bjvi}$u%|By ze*jCOdB3PE52j)^*v6<)z7nQgj|jd!-|_bsuoIqg_Ht+VF-#sy)m_;HX)6?qcH?Ai z<RJhDw8oRT#O3_Sy}5gB94M_WsBjLEJW!H!L7+FFlVD{fSohp<wB8|gfHv!`V+Nl0 z_Mbx_;D~Ls=M=jNQbvrJq2d^*_Sl`$-skVe`$lY`r)5I|l+Zl@i69U8wvfU+jq+a@ zC1Q*M7lJ&H5;64?uX#Wmp*JT>N7LLD=LWO<6f$?x_Ej*~xF8OF0-qQBu?S0OCxXEG zK{B<HUZmY*cK52Fw*E}-5|2i<0g>p}Hb|4>H(`%DgiYC7=O^hTVvetp>2Gi$UMW)Z zsef?{d1r-#kPfqye+}6Y&l&3rZvC^suOYVybI_8_WHdm(;vB_e4RUZhX3E~~)1Av* z6;H@>Xsu*1?z*dtdGDd`8~wiEy<(hZk-4gyO5_k*1kT*u)wZx<<1vis-#$x&9HBWh z|JpKudDydLOz-^&6$&!>9VajoTcd=_?ZAe2%=~cF;+e}9vO;O?0wEwOW7JbM^2unO z{-%)?lk=E2m7{_@N)|?9F8Kjk=!gCU@iXMh1ie}@6nU+s&OIEYWuyZZ&7Z;=W&I)C z`-bklI0|G%xgDDKrTg3&ab(<9o*`q?@G3tkZm#JOcfvhYzgvo7udT24*1uQd6|&7! z8ldPt4Odz-&ggmBIC8ODw4+TP>#unripg@Dp+ucMxxRM>{2uk5p|eC+&zF0=CZf|v zJ7#9z;t8KfF36+1%*HHwA&9gZ)`khTmem`j^SM#5a&BY&CWI1NWmn@4<s8y)H#Vy9 zZ3C5f$Hz3KGk6>$H%gbStc`jt*I7*Y9NBEo4;*Z=gJs^>xR2cLOg4;>s5n;mciIvI zIPo7;{;7|VsT?JI55d-1Z>i4$iQ7{k|8v}Km>iBVNg{@b!1!N1)AQ1nQ|?gS0SxYL z<P^l`%(~N$VqCK-W<%rAiuny|n*QS(a>5feF>x1$oP&f3K`2f)V=IQpPt@bX62>I( z@D0WhHza*zr`IQ@QX)ovj|HJ9Q!{C@(*2UQ!sxiX^tfmG)UNAA-Tq~qb4X`o>QJc} zJ4+XS(Jm(nx*Ea3uFdixtxb5Kp}ujnBF`G{Y;C`TS;!He+as#DLgc2@w_hIrBz5kz zjuHrSf};%zD9)q+KH)!MTmL)B-_@dkt~60A51=;olkuL2-%R70cOg!!)~Bz7(7H98 z37mGiX?@y|D+#6Zx)L)sthEa>YRU5xk8Z^rP?Br$VLYR-Kl5Guk5k9(?3^uLo^Q6A z(VT<R-m2>w*@%%dhJ#a_!iOYY+Y~$J&B@A5Qw7ySO!#`~-pU~HXp1(v3Pg9bc&$pE zUyt<a9&EA6MHc8%Lhz1pT1&X2*Y){dGo=rVlG}IvuR6Obqj)Pa`d0pUz^ol5h54Ui zIqHEEqK0aw5|8ozXij*hRrnmL1|-L({k7C`uf>i2WTdIzS>Amy2(s3Bv?6@@a}z@~ z%<_7Gl>)Vp3`$zz>}7g3Zhba^MgZqM#$aYqtj;|?LX=tDJ}$!Rf&vfB4iV@ifO$c3 z70-y6@=XgPDN@h#>+7;>$K?*2vWOfWvNG0)KUvHh<mpe4`wk-p*ceeV@4V}!577xS zCo)LsH(;P_a#u|p2zL)EusP^*MY$^rI~4wUlbP#I_}M0MLgtZy`zw0yaOmG?ZvLyJ zV2hN-N%w$2xf@r-1$&9+i(ZDDD~B3(Oi$s&aI-lY-I+23TJwZ{6r_C8Ag!m!y$DW1 z`&{MOAucEE?FRi1Ctm*qHNI$~zkX907PgRB*Sv*q3^T@SznG_F=0Q12!I7dmM*gPT z568>Enq$fY)@~bA+p!LhPJ&1jm&{kjsx<Po-OXKPNlarcaa6rr8Q!BIN>(>BUC3NB zL86msL5P^agtij)*=<4aSjHVdVrhhh#pnPbMi|yC`N{nIl<kJFVUxoaZv)A?Jg92q zdbIn`LK+W$y!=MtE8=FmH6DM~-Y5NgQF-G>80hXeyw8gA9$%~QGAZj&gn0o$fv?`Y zm;(5Ndtqig&}ulXr1!|;2Ql^;!@WKD5e|?$F<62;8-c8$(}%vHQ1Rl>*^X+=jmJrv z&A`d5Gim7hrFzF%S~Y`Zdk<nsL-oUQ>)AHuk!wgmuFZ8amSzFh-dud+;jkQd{Ib@& zxuj)mU(bZRECrWxn3W$AOS}$MSp3P_$nPeweYuDH$8I%Mn<NWAFs-Pk>C>Ue*QN>C z_)j>4d>*W;>!MSHQZFH!c#3;g6Ow2xFAQ->V?1pdD_}N5u;&!ae?Ye+`*VI2=P43a z!(XEU58sWd0#I8`rrtoQi304o)x11BADr3SdQHH}>46WpVK;1kZoI&YZm7+A+DsuA zwuVC;AinTDZ(twe7$LY@c@%H&eK?er^H-lAa!;Paqpf7<V9{)ditN~7!Cn@IX*Zi; zNp``WPg78hx0F+M?Mjd7QlqLlu<dIvwv^WAO8*`N&%TKCi@Nj24uz3WO{n_m4wZ~% zQwIw!^w^qWHIVa9Zm?MyK$R*@I`Pw|UJgr*k)=pqULrM(oYr98<>r_i3v0*EC4sJK z!VAjpObR>Th<oYQK$<&rjTnW-W&Do*$~~)xtn7$!+kgiy5k3V}$4Iu=9FH$<iMnDX zm}9@p!oGS}qjGg3IB}7NL;#RImO1cYndY~A8^=WPrVbPBBq+On#0c+2KY<C8=m@kZ zFqD@lu2maz%`$Zf?nE%GLar4gbkpoS25DI@1+>)Ml>L0IA2GQ5c$R@OISz;QVmyq0 zIWUIOvtoh4Z<phc(i;7(kaHb#WTU~k_KlzCl33ajiBOca8|{iBx1@V=dD4t10?MLp z({?F(eM~)n(RHUYTwyZCkx8W&ZL7SSjtjMXxN4|)Oq#TGe&EGE5!C*83a^KOcE+wX zHO#s76!21@r6W^mC0;c=n^6XCgchJyGgf_G2Qpq@S!lI8hwcFbY14pb$!Hw=xuELh z$R8-uT2D?}f(*o>LoCHa{m_%$OQap4r;D|<F^oxddN3hfN1XYJ+a5neRB!2l|F()V zx6de_sLINlCXoDn8HHnDngD-Hq+3e-i(}3wE<ADA?ftrl!j${pc;K!CVT}jj^!g$L z5Gs0(T9gf@ME%pP5|BHHfL>wDM0U#VTi_Ex`M?h-ZVbe0Vex1&+h<J`)4M{$Nw;_Y z1LUHMH&Jx&qI>jPrLPZ$MS(F!aF|Q0t(9TOjd>PXKzW{<TF623N~-lCiz-I+?$}*4 z>V1_p%_XlqHi`R_n$cgpk&weaW84rb>ylg~*rjDIazMU#Pj?lD4MD<ZO_he7hK;at zM*~{sf<K?a&H$oACn{}qLSP)baJz}zMO+(e=2!u$J^khUta$3xJjgvlt90m@9WX{u zSPb$@;STPF)oN$Ow(V>?oLj^fXvZ*oQMJ4^Fb=9o{2cyM;N!UOLpJ4dF^_HiakyBk zS}8D{xJScR9yeEU<ndXhO4ulunxVPohoVYrly2%^`(dr=`NQ2}59f05BDGk)fy-mQ z$gG~|RV>;z*e{#%1ac@&S1`=cv)o$M0a7vsrK#;!|I=`#Z*X>XPPw?SStuIG=so<o zn-2bmLWD;DewwNK-JEuJJzbYsf-tb}r$~fLUpVqGvDKRDup#jjRY9!6O%W6Qekx)+ z`rQh5?Wa-=>96zbTAL~^vX|uVEi3d45SNuJtY4lru*=ni7+BJh{2lN3iLt4^Y-=8k zq7@Y>#5uok`EL^Oc4n;)larlMC+HH4HfQ2mVe^lqp77&QIs8)hxRam8=3WO1iEuNc zgv(@<An>@$Ft+KiiaHB~yyi9jMZF6~vqIRWGV07xdw)>UO&XgcYl}0ip}nu*X0F(- zny2Yf7JMssldp9}zIE*&M)}JfME4N{0WuTPV<p@d53n|bNDm%?AmU1mNMUaMEDtZo zbnFlyw#qBis{N()bhH|27>b|C-~u=>sAhQb+R(pOp%4;`A+ZIq|KNx#z_wdh*<CV7 z46gRr%hqnGL~()LEE<j(QoMhC{0n#zUeyodh84_$NM_GWX<2bvX;v8nk|fcY2)Nx2 zQ<QOUNW1pQ1r}PdX4W89nN4J(a0x7;R}B%Qww{C)25-j$(V=A1|KAW%MZ<y`g`Vvb zO3oJ^Eqb%K%_vncI`N25)5iO9G+y9n4(L-cWyJo#LXBo2uN4$yfkQz;(em5eS59Y; zBTq~v&n^3uY+$fZ%$Q`ND=wGms1+^AngTF3x0qN)l9nQ(-*fV<oJp1(v-KCL(%;1j z^lOz#*~YRu+tz=_1vV*zz5K9$JH1k7k<C3gJ*<ZY;PRBHATt=@F*3szo?AzI6+9C* z@8eARWjPEAkJK>0syG`_2XicCt>mBAMm?Lg&ppRF^uaU+_L8=EeGkZ)5dR2_u^I># z)AmjCdl9yYZ!c)L(zfYDdg^_!I)`h8JPym}aw0kz${A2JI8VZ1*LhX(=l7h4E8BYR zo!a3gXcl-HPK^Vs;v)aGCu3ZhSq?*!Qp^3TnnnjUt*dCgrV;-0^8|a)3E^19Crnsk zAGSc;$OgNAE~bAN8}LNT^9tB|HO2`j)rx#$2ysvz3jP!&61onXg1FTf|G3+X_nG^G z_cZ=(OplNW0&a_If+JC%5;GDDwlvL8Mhq{n+rbRT(tWOZ`y+G5HPYKJbrMv){L>cl zbQ8A|G;S~lyvKGzDh}&nC^+YV&*%>k12H2_qUGp66DZKNKMhgeRkkWVvP!_}FXY5X z>2J_hLq-FH88w=i%2dYSr8h-(p7o|(S^5LX&w2mTni3DN=}g>LScF8N9r3UQ6?*WY z`;tsSwwu(dQV!C9ZG~p>;%?%wxrD9TA$S%y>?MF*t}Z`MCNiPYE{x(AS|XBGK);M} zrWwHI=Y#+A;XX#{AODxIwGfw<SS$3`-^4a&yHySUaiKs$5VeA+xhVniKtTq=*ix~? z+%z*3w0t03Ze^8tx}C8ztI7Mb!sK)|{*O=VNAW_)piKA$kFL}rpOIcoTp(}nB9u{2 zA@gr5vDr77Z`*IH(Eq0#4WtL_!_L=h5uFj5n@ucs5~&z_AHRxRt(BJ70z@JQ8*@9t zs`?dg-9=Dbm2bEp?!+<Jb1+0hcXfr<X8)d7*@+0Vp7Pq=lAY-Tl(C2Y9L+m0|2uaj zqm$)7%xD82>k|mKKD=ZrkFY8Ye;vRfJy6*1Lp%QowtrxC`ZU)e25f5i%$Z)d1>h<m z1uT@$hE`i=okq-(f0NHSV?|aPEf79}RlU&vHg)GQkA=*epcIqbHSF1u)0^=7c^Kms z%vPYv&P1TqQH<f362Us>b)|DDAf*KxyAcb+l??>>8Q;t;4P=qjssG9@NNo~*LVJtT zo0?tx98$t>Xtk2=*#+5O5v42`{4#;Fzsv2An|Z=6_KXe&6wYF~mJ(rbpCZn3QvR-d z6;s%A#!a2S6O8r0Wk<-08$~&TVbDEsu(?Uf%_Nvw6lR&N#>?xzycjz(kr4E22|u|^ z;<yg$1DJH2W5P=Xp({s;W`u7$lfl&I-lItd=%7TJ%2g1sh%`QTsOE99w~bdbQy#ne zLki^x%|Se1$2i^JHxF-^?nl`R4XNcYJbn6vMc_iiI#3T6!WOto1b!Si-b_<Zdjc{? zPrx&og$CFc$m43dWPS6Kge|m8iEpP8!_<Gd-<Wr%xEs2gK>Xxi5~aOffO<3r+&y?C zmA_}Zmf(syJv{K@%<-tAbaBU$(%96KOr<UfOwMhSuD8ZYE7`gw35OQckDhxS5AeGv zqLS!+uXqltbgCsgOcb`SpeO+!IX=f_h{avc)!sAZ<#7B$s$lmBMjp_^=>&9VY&OdL z`b&IhBbUF}ZU|JP1-1dr2N<gRd2LjS6xKq4Q1{G1zUkX5QSz0m<(w4AOLxdsfZOIB zu+kxY)aOsMq??>p(^)2noghhUx9-g(iQNQq0__?R(0akuvn^$;#rmUEucCxd2s7@Z zPgi$iFnT-#!6F066g+P6b7dD%?&8FK1{!mlRJs11XC)hDcEhLQ5@$fSz}msUFfB4A z1Y8oGvdN4aBMHZ4|4BJ?p87L;6c8zcPjjn(Py%ZCxX$y0&Vo=6z065!WLhI$vCvZ) z%+;UNn107apl!QhJ&cqC>qXuHM`6>m_%AYwv~1bBVocwqP)cbwtw!nJXyN4EblV0u zPrsYgwiOFp#rCW<INj++Cwk(n#&zMhdU?Hr@ItxRa#Jshva4hIk^k_GsxIl=+#<i) z$X^inTL%!U`O}U}V(R_#`m~pVjj)uUnrU_@J7H}s3vBo{&=i%VYganp2NyTtdi!g^ z&N3KZ4<W4m=OwH<$tvq}+}^e8-gkz`U=K=#bNQ}{tl@J?rD`-W{D+H%<bOegogu^b zkkBF~5(aW@e2h9JTE<d#A}RmBrU#BV$n0>=`WhW-8D^ycyJ9)F`+<~8lm<=f7jjIi ztL}|x$olmJcHD(*-@t*=0H#$@WR7x<A$hg+rVpM1A36*fxx0pco$+Y*C=(G_k7KJ@ z;vd99oJhf~&sw+8ppW&dgpQ9t-Cya;yhrs#3CN9x|8E;K@m@eEy5n?GD7>N@qb4o_ zY2Nw!1L6mP6VI%I$MyKbIN;T2>;n??JH8)+;gpylyX0j}D)9cSD((C#9={&p4ups{ zE)-0dFt74YWLD{kP%@0jcU>9+mBTY-QQz(Tr^Bqs7)FnEH<r>^LFY@uEe);3Bnanm zX>zUYvpHFj+{84yDwOx3=XC{CL`+9$eAjdABivyLY0_oSY#+m*OBT!emZKo79G@Cv zVt&h}iGmS9ecu=!^qHW;vMKCT=`t%Qcinab=1=pfI%a5p9K&A0uM2nDE*IJZgQ4F8 zW>*2YguQq^Qn{Yk38ydQyHqbcVaZHknNe&~qWaTcrXZb6?CuVVNv=pCP0aA&rc_6r zQ}G%>qfD5Z;%GB>tdna#l7cibXsH+%sIE`ZTRALUIxMKp;Oa3%-CDOUQCB?uAufR( zg<LY6QU>EMA$|*QohWUFalYj&?4WJ1akf@EbqTJ<bu}FlO1u`PL-vsYY7OPO<rl7| z-WUbgC7e^K@s4z~FSaR!PjIlMtte7lmI_mMMfki{v_kYM$=;>*IESCX+^|<a>|IHT zZ5>0=@L5>BX_qP4t&i~Y5a)jnP1!*V!jV*qijG25ct8t>P)(g(5wY<!=>;JYM*<Hx z@Fpsb+oeH4YoSK(1M4C@*`S_sM{GD*bYyr;^1%OACt`t3$chi)QUxD0EDP|i@O62K z4;5{CiCzQ(mZ>WrXRuEBGEg;qD^il=d)orB5u4X=0|n@Nuoxcav|EqW5JhAn)>3bT zR2niAj7C~LJlmZHO#U@B7gz`xZO)(hy*#bu;!CGVXST=FsU|&x&VSH3b;}>P(KIp^ zP#!=?o#1mL{=*Ul-<n__EkJievlM=h!9U4n#=Q`mWJ^a0$A`yoXJThJuC+$Kj}&T> zeZ%DDPN-bsZ*&_#RS$-v#i1OKQ5%|4Q}?~E&xLVH{=1CH6LB4=#;lHIo}pozUzS6! zq0%v>>*4spBnn(6Nil$!Y@haLbJ?*-uD8$@O`Om))G~{X(;<dPr2*mAyag^$?Fs=B zmq83_KDmziKbVNaYP67e#?Rn+T6&^k+Go$y^_HIm0ahSv<Z>bl91WvBQbQw}R2II? z(h0eMaD`2s5wc`lu~YWe25?@nbq|Rm$vxDn8E5iPqvx@5@;M@S)|lxye4I?01vNX{ zp;4WqMR%_i5;f;cZ#ABSt8n!~UwwHz!&w}#MOXf?m>lv)9$2bo6|YBNP*!0An<}zM z3G=l=YO+Jv7p<@q@H__JHEx9X`yUTBE=poiMYOARB2%inx;ewY(BC~MZ3K)`Gw{gE zIFZ8A>p5Vvpjh7Unk43?%AI?bwzs;1RqTq>?_(w(Pzy%=*;u(&Tcff!vI=4xbLqJ8 zA-&*x)<BL0ef(h+-{Al>F*Wn*oL<81iU+Bg=M{Mr6E8H^gvWE_urzqUvRY|=jjLj} z+8*eJzhq35LqivF9p4>k$aI+U78Xjh0pDc!k|-2QotWV}3N19E45pmbnuW1D550le zvKOK$6bDzfe&fwzqp*Yj{7=7n<)A&yi~2h`r@R=H5YQT{aVQWem(_%16F%ZL<|e%1 zQuX=R{B46#{@nk;Re&VgEH?E8epaVdDwp{?v2f|NMxYoSENq-jk2&h(RY)c7`W0Zi zH5SVVu0eEP_NjNU`ph7ZTCPUEc0F9PV05QfTZlryz<<OTa86P)h*4_$jAJ1PGc^$I zrj>IkEJQS&0*q*a&5oBWy*g$zc_ER*2{V~bZ5;q0G{4tVR!jNuTk?a~7tXoTl3v0C zuVuSnSf4qr%JC167?gPQV3<`3!9AxJBGcFW?H%UDR$-Fj1^c8hVbQUyD#UL{r9s|j z9ix-dTWb;39T7JL)OR|88vKpAcEg=MSV}{;A(qi>k410aeH!v8X0$dPN72a5)X961 z)|8%&mJO}QL0vP<E6#aZN}0BW<FPNC`e*Q?lodgBT8$`=*Ux`@K0-rH6~|EfcAB9@ zs<a*M#`5xR^nr)n^sXY?R%fB>EfS&B)L-PfTNzjZ1RRVBrB@gmQjDAqv2@tu|EA}& zCz@2|?js5ep#ry`&VL{%PV;drb3PK%JZE66YKieDk<dp<lTLrRk*)bdA&iK|Zy0L- z-wcS_<uEpoejm{XSox(TewMFM#AQRATEE`>>V(BO_zEa1F-bGF4`y(?93{s#NXI{_ zZ`x=FiMp~6xz8U!r%TbK9-VwqkX6*#WJZ)uk;h6@;vQz(bt8W>6_aAzZk@8<_#;<e za0iGf<DTJnEic&oC%LGNl?}D!g`S_jFziV?pku}S?4cM;I<1DC&ZHejM5!Ox<1ElN zbH~G8aDSF>WH?La{(cy$HHZ*}yp{MbTn@;@*021pbxT?CFeO*aGgt}l6HTv{?Yf1` z!(}7N$D_wn2huNOea=Ao$535yygLPaeHd|rzK!Ei>=AD#mA#Z^G;y|`c9tFr*SG`y z7EizKxB`}X+A&-dr9;8nrBm+IR7wY}KWLMAPSn=nAlmLDAqNY-Q9w33lg-QA2(G+$ zdsx&E44%9K(N%MMwXT=IX#2Uh@ee>|FS664?hTKWeJ-aeaPA`)Sxl?2zakbhqj|u4 zXNQ`u)i2F(R0oqvAx~hQ)re0-OB+s}i{8?Vca1tPX!qTko2D9GL0bA<)PqHpjgY0; z|D991W9`{!fHq&_&*zbev?B~eiHQk~_eA7cM_<J5HV-$?dez(B7;?yM5@qKLT)DJs zwhD%v6#?mSEHtews&2RI(ZfV>A7@lBVC=0koQFn0s@i?(Md&tL%F@tSvVaLjk^;!h z3u-n70_X)VhJ3};*I@fs!f?Rx+mn=jk~pQi=+(;wkuEgXNQLF0>F5ygm`L*!DE|yO z|IaQLKrcm{3i2q#3s_b5SvFc>saZYz7N$^FZ#8laT#j2!I;flI2)G&8rx%9NL{4LK zDDoPX>shJ;$RG5XI8t+n@7e|qfa(A8RQ_W6gHpczY|A}-X3Fp)s#MqO!q8A@q_+r; z`OkIhSoJ*RaaiXj`WXeGU9)*pd{Kd3n4<2=ig8%J463})TP|$-;ub5Qn+uBoVtH+s z%6#dw4cJDV;*(KNKqdXgog6?Yxt&mdrSSa}h;;OI=QC`tVQ+d=BT$g;CkNq|2x2=> zxFt)3=&1XpvCObD@@h{Mq*t4t>V;{&7C;1eB$8_MWdxjIpBBMGSh|9urg-=}2trJL z%~2PVL6yltD$~1F(it*)8hiAuiCf6hZFkUJTzgb-Qj2*ykY8-6c3ndL)D@Qe8<u_G z1^lWgxY52DY@lwIIv1b9w_k)){{@52_?gkRhHIUqr^<@|BpK>TGpt|SMyrAuCbjC^ zDp8MtdRQC)OvtoPTQ|O})M}>Ix^9xuX;=fMox=;m1O!$K;AXIJ%Q5?4FVn0ejiK2y z)MGg$3~ireab{Un11K8@znVFSP)7`YeJY5SAW}@qMN%FHmA$GkpZ^+uVPx?p4#FP$ z$D)awo+SEQo1#a5onb>-T)U(lQe6MXc)R@MH8X&w&EXcNecCKg<@`8|RjnqMCdP{_ zDz8T3y4*hSRI*U7nXaad8(?;%?+1JcWUGf8V3UURK?}F#ax1m4>gJ=Jmrp}psHyo~ z>wYm4uNY>{bJB5wwvsPD(lv!KK(1JZ*RAmv!|LMjZZ}o+Hw`&axsb1@i^2JhNyk<L zf7}2k75+1sRvEvUGSk}Uml6}MJ_F3L+I$qLlF@l@@^lz^21YzM&14<!henKvSofR` zGUd%6aw}Uz;`={EWu-dvug!Z+=(+*)`?R$*hti-eF3Bk8YcD%R$ghFK#wa`rqSO8f zcAmx5(00){7dUUD>F!c~x4E|vyxg(zl$HbPs(NORG;_9o5$+C>^=tg~E^=N>Uyyl# zG8!Q57bnqhRT3M9rn~}(fw)?cSd+q=&xP6ndU5&Hs8pU}HH~;4BVknUu7Wxf1QB0w z`7koF7;4(c^WiaM^E3tgjoiOH+Ioe4IGES4Ocx;IJhmsMAA3T=ElTdHSX>(ZKjCZ8 zF*WegeqpawgK{qv9Fv8Y8BigD|HvR9&>P4Z{O|SscG#&@0%Q+M6E#m$fd)9U*1*&& z6N1;+>AV<@kv%}>lp@y<`Zz8;8QCK+=LPGjDE$F9m!-<2GdxBrAcbQFb=8G~G=nX6 zL3Tgs2l6KO9O7wI_A?LISA27%R6aN~NhG46K`RTHgi?Mn$IGBX;gdnAj1+2yM#Kn; z-L^rd5*jvj)DoTj9`({86Wr`8l+~o4!fRloNGNz@m9!6I^$c{(-(XYz@gJ?N5_-ug zNQ|GLOLK4NL=UG@mc-aMJmCK&Muc;ruq42c*%)8?nr&^WO2n}k%_+J9x2nu7XhaUW zaG6}&c$z6AoY}e)c@_11HBX4-9MA%O;>3}?OgI*~Lr(583U1P>C3TLCvwoyD7QsA7 zhUL+C#KAPB-}3CbkgT7*Y9OM7h9nRgtT&@uhZo1?uNz(!DKwNZZU+i!<ryeA!iMWK z!ucUsGt{rtZ8krkY>XEuEuY*ByuTf(xkjiRkWK;WL3#c{SE33vhrXzl-gKF?I_G7y zyEjPo$3<i?9-&6*rz3ZN%}=4a|8|5{{#$_7eO(8-{gBzi?|RY@|1Gt4WrvK{2hGpb z7A^*@JfrRV=${f*yvI07Mj#nFnKBuHvjUESGRmy!{~m-BxIxfbEf<JAQ?;PVx~r;_ zOP?u~0p!-ALj~If>}N}5c!{L2#j#$#1)LTY-#Z4`1fsR2x0zI5LT~+kG*@=ulZY{$ zv)&5o>A_OO7+G-SOWF|?oVY6oRGK0nBc`db@j_F(n)!He-+9c^xT4ELR7ApKPa4pK zw@*^;b=Iw{O(a3*Eb*8kh>p1QQHX|HmOg&B7N%i5akRif?3a6wE04cE&pKrqzd8$d zDV-7#vB6Hr&814kX@<1GU2CkE-Q7?FdJa=0(oSxvK>N2;y&W}dF!CWUCd6!FTsNHc zpN7>y3i*(0)l4j5@#ff9cCp0oS|gX2XpjyAs;pDS6aO(MXr5VGma^qsWSjyWJgoB@ zP~a|r^EzI}W)~cuhcag(6e?DkhC7{yV|#&E7SwU|ojMZ$UgnGKG#LkgKsk>@bAuK0 z+>71gzi4XzQIBNHED$e9tSC3THr6+6C34)T$Du#sh+NIKxp8$bT^@r#3maq4i&Ilz zwyY>JKcx<sxF%p)<%}dLAQ|J3_D&dx89!r>z6lgnH1!;kG6&LAx8`|0+>W8mGoJ-P z!VJyaubUx5fMDpS_p{ROlGHoG?@#VDqVx+zSAhy@;zs8<gU1`*3m^3dfKzyCL|2;b zJ=%s#MGj0)yWTOUM^D3jGK^l>EW+NSb;=z(<st;d*MXnX44S!Z-hXXtbXJ)flm*a8 z$$-pZ1d%$_ijmF^CqsT5Gw7#*wq-B}(CU{rMyMc22qa~uXDEVr+Kk)yCuBC$Ca@<~ z1#FS+<|>);u`Io?o3*zVQN;Y=mO3NHYq>o<7atZX5VwOb(+TT(Rz`;Rfvyh=O@y05 zl>6m-n&&6S=xpRW%`{MJ>||2hn9_YS0Md3uJQzkK8Nrsv9bjgik5(E%{k78-$d`Y~ zt)+eZ@F)jIYu^*Hz)=572L!Ol0|?(ylYY;V`p2kzBV!scNeOfWS$2ld`b?eKl&{~0 zJ2(`hOzZt%auhK>T@4A#O{g<ZMP~!CG3BCrq{9*6;Pml;I?|2aMZ%Rj4bo2NhHak# zxqpN$F2UUtMkKPAuH<lQny^YwO9$Jq4z&nSFcgPCOqR_O{bMouY9UK;!AEEFgK7rT zoM%x`BEm2N|ACtK@^@O7R|5(7z#?IcHcAPg!5$Ns-r?Q2szO6)4N*G>PVQ;3xY(sh z_tc$<s1as*4Xu*%DQ*%C_R*#+GTsW=DY_n{OUGT~F+)dw8kozz1u?~26-U)9uCmO7 zY)Frm=xzdP%)qz6JY&e=w&A2yu*}TJ^R0gYGGM0AN9SZxIV5i6e7#^^IFBD}!rD6v zxD*?zSM$;7BV-kGJF&RT3^`aYa;WpHjQXL+B;b%rT~zlF`+;CP$S_yZ{Vurrd7qXw zgQ5>v8uj0s!=@Z8E~aC(=0P>6UyJFib>jg7%x1tmelbA|)i%e{;J^m@InPYI|CE3I zEfJ;198_XH92|&Jn7ua+!Tpy<S>4O!_VF;PQ-bi*MH?}DN*YE8k(;o`5_`Fdhl=>+ zBH-3>kYU0AwC91CLvhdllt{K+kqQ6M0V()-rWB@V-og-EAzt!r^1v3SCLl&2w=z8v z;w=?{qKF;It%vva_S9Lz{WH~ax3L1EGBQM{?FBqT-`~|M1=G;r2{}nP<NPf9!>V&k zj8Mla{$2-GxxuH`P>tCwnr8afi7n4yJXWYLC)xxX3Lg@qx4HsXA6iVdiD-6?ojivr z&{GsM`3_8Km4dxQQRAeHz_1tD7fO;`Z!Yv_qi+o<AGgQrb?7^^Tu6Cn5$`KJGpCVN z(uSk)wU9wG^vy`Y`;UEs!Y-MU4zkO`lCSQ7RK~OYjOlyVkH=s@oaU|@9mjgKo6>li zO9@_7I74K_a+}EnE<4kCs&`=N!h_)DFDKrIULZ3+{cweKK5l96YmcC57S@)jF*rP- z!1cXadPDmqrO_=QWp$Nc*=zwcRU<f!boIK_-*WaQHzzjlg=WmHhqAkz`JziNa60<* zSuA~KAt@M3Y3uNqEh|Vbs>;6`h@>7ZT*3?9a(w_AiH(H}X~)KLnxGLKK0vN+dFN0w zdaEPmHEYg(lN6s5?6%>Uw!@(Gf^G0Gz7}ovb5Esj0vIrlM=uY{w`No~Ik3|-eEnIR z&Bwdb;Sml7JOBrM{T_XUVD+j_mB$U$5<!P#ORQ~t=tF}QiUBK0dU#POGlsCwEF6t( zQ;*OZZXqaEuQJY1-$A%#l)P;=`(;CH?CjNY%n9R4s);HNI|^Ea4eKa)29dLw8lnc6 z^%&cAVyY>e>|dohUcvATcgRb~u&X5YxF@jhrM+hV-fg{Vuj$RC`Q15Sll^!g(AHBl z0i{KChA{kD7bc}fNtqY0SuRiORS=>!>e3F+*r>H2gAPzngiHK_7d6;~LTADugC;$q zqfJZ)`{1%g1~2IK#Jqh9HTpwYW<wQAJ^dmlAyu$I?PFs)5VJ~gB(&P#xUCgJ_(Z36 z0{mSvV}%6p|A?9C%FwhT+W1qL8@i8iCJ{VOX1_RbFdhupgz$6UQ~p<>Xto(ANB0mn z$Gw%xw^KY`5tzwEol}iVv+>i=oZd{C-Y{kI*7U|)d`<S_+i{5{#=OKw$+G@Kh`I_b zt@Og_QZ+DNr1yIUZaE~Gz;`h<ARF)y<Gt+9c56G}9+V4QG>YPNU!#`pLl4)u#u!DU z17%l|uxi~bL@R$*wDW5JHxGvFx4`iEt#@{7-~Inp8$=_UG+_e>e%JSK7K=tg{bJMt z@gWMO4)9kB4}E@xxKi#T^(j4n6jE)cjd!&hJKMcwoJuL2stH^#*MiLQUOg?*w&^9p zA<95x(7je}{l=~iVw}2_ze4EXR58F~!!)d}?XB)sDlFf^BevPk2y|d)lF|bRC2}N7 zuB-|ZJ*}d5EI`&FhIft1dsQNsOR%A#Pp=Q=p6VvW@NgCCU&Z@%XpV^iI_*aC+~7?k zxZ}v@(nIJ~?c2eIb_;KKMjS~J(;$#T6$zoWm@=A<89}6_i$ztT2Z1@+1E!_&o#7Qc zI5@U4P(AbQ4I(N<GU&RWPv8~f9;iWr=y+2gF{YvC%*P<GHz^j~Y+S^j)^kM@{%nlE zTdAkX&K}uL8#>Lv%dp+VXejtKOtAbc7fO!cqf^3~A8oRFc7hY^J}1DRm;y5|pj(wn z<%{}My={l?4HmCEtVvZi?Dj$D5*Y>ymHrI$spd*V3)eJ_-75beqIG_AkAR3eGw?g} zK3l|_iL5o2X5$6D@E;?KzdLN>SeHyd0ztx8ew?oT5D!g8!zsXxlf7(Fy>%&W(JxRg zCai&J6^7-=VBk4W&)cwf|5Bqk%ImKfvQBz}`N{<!@|ew=h}Z(~kSc?lHaM};oC%>l zU<q|ki(8%j_)Fu3^3dL(-L2XMs6hG5L?$OYM=3>dl}a3^LtO+;ZVNBJv!lEZWo-D8 zo0ea%4M}(rpTWxg5@v+2$*qD$^M`NPs+Gregpr_FTb$!ifX%!f4$sGLQ}+jAaYy6A zB5QD*8KT>jm!l%eLG%0tDUo5Tb?RbEsyB^9(!5LPT^`=+wmse9XC>-82G5idJzde% zVd+Vt7p=cPR~~%rK|@~wdo_`R|GiQw&fCxE9hiJVECze*Oyo1_pb{$;+yi>pugHD9 zI<y9T)CNl3ZKh3IxY5d#x1vm<(gDO?v6VE7QvpX$UkgS`1Q@StD!j0E8Pm!{O4Mwq z2Q7p;u2W@d#EObP-iBCSR)9+C#_E+hiaCc%)YBU-5N>y*?!nsXG{C2glG1Au`6+Hp zAiLIe8A|R;7@?RjmE<1oCMQ)QYGc8Ghg3W@yQ}c;`QFvq!jFZDr3;;MgQ@x+=r>_H zh^v&yozxM~b5eMvcN{Dr`?=K^^(g6SMEwe;aQLp1ajZV=!^_^$Ye7NA$rUm>f;%`0 zUx^lfM{S|aypN4=qC_43O7z}&rl(2&sJ+G5qLi0$uNWW!6ZgHwx>JBH7Q;>MF=G#m zck9Q^Li40TD@NnI98v-Bqo15@NgEPoiPjn0ERk<D2Z1bexN3kmw(Tn(6UI?kTw2Q9 ztjZnVQiLa?CG)$(x%n!TNU0KrwzoP@+=9*50_LCRti8g<UB2#a)I3g*II#U60Oovl zXFvum5yBZW`XOwdg+&ZyhG+LPlMwhsQ=N#TS|syiOv{(Fqa60u#*@OYbm{*!!8Vna zE;$3#>;xFlvmT>3J^-p))!JEuKR9<F5A8FP48&76)MCg0Y^xvG#p7KpeW`NL6eBIx z(}ZzSAkYe(RpAX<)RXI810O%nSABAV97@$;zEm!Ra>>^q4OJcM=X8614-R?^_sPh_ z<bIBo3&@`<UdJSx26#KhV%{ag`6A3a153|+0J}t!5S{QS1J&gNXl>?btf)|K(ci)S zwB2jk8bTlkDNCTuJEv&9A+Xu_r;^HIS*Iwd&+BhVw9?NfE>*%&pG#__C9G^CSDDAM z%(XS`W(F)7DsIhA(HGD}c>~Xy2VVxAEY+!!1TtM%j4D8qj!q^+gherNA-*D;`Al1% z;WK=Lq;q44#;_Oi{;i7XBrGvic}l#s>>4g8b1R2iIZOiIk*8q#g$fD+rPbQT3qMFP zUrTAhP3WBh%t#xEjnwR2+%As}C+*;&9|zGdxVi+V6bLcyOh%AGQ<IUJNKW)wf(=j0 zVSY?YtDP+0=5Tzj2GEk?G%j%a&V4!+d}*0s$tO}Ae%hSTS-D1Iz4{ar@DRd3?Zmh- zFkloT;3@1JLhLE}&}^@}jjJ3bX?t7(kzSxshwi8fj73D<9}UMVv;lqM-CY!GTkk#G z!twQRPGLdsslPH-U2Gvu<5&-+HT8`VqGzugd`?S7FQ3>2w27TRv%ZVyCD-2Yb%6go zElGWg?ORg%*(`)OQrjE#zn+BzH|>HTEDSF_n8og%JagHYaz*lDq3FB8pAW@acxx^W z2)K6GU=-mx*tVPO=0T6Gq)*LX&VPlkz`8V0+q6V2n5~v#iR6W4R^h!9N;9munjnPn z3is>glpYGh)Wg|^6FPHy%HS1Rn3aT}*iNtk>BJ96#v=DdS5O_;ql%^km5SH~#(RmP zYrU+5cY5Y`o=cz){V2++cdbB&R!dqdX{KKSaaVCqs$#98q5y{UIoBZj+a^saNKrSP z=C}I<%DN%>XqY`?P62)Ag3!QmlpL<%bnOh*Y%48axw=lE!bG0`aalL$i3ka)l)uRA z)2!3&0<@zEeiG-d*Rb`424OACYz1}0NRmYFPX#~FU$rk3=rWgzlF;(NvRl$JpkB}7 z!zkUmdB9F-G`V4{-7Bwb+{vOyf)WG4aegKB+KRT`*`+0mh+6p79IHu6T3KT~+{HUa z3>^`VbqFiT?B~7*x5|t+CykX@4Aom?<Z>m<b^j!i$`=rfxbC18^nMc!aev?apG-s2 zA|}LXYs7=NGt82T{UZRn-a-O2-3{@;!^wnGer$eV#7{udtK^WPeK#YFjOoB7+mKgA zW*9kTwB}f)gJK0!Ls<y^?{AteZA2clxT@3d((Of;T4JUJiJXP13!%FAb{8`2*Ma_y z>_jwdCT*Jr6A<&9g{-`WWLTSABDSx+`N1r`x-dQXRZIz$sSAcS_li3^jzRlQsxp8S zr-)wg%5U=A>c_?n^zf^dQ1h9C6^;EcKs<C{T&Pl>gKlJ#Nl5H00lt@biobH>cv4?O z=4bHz@LkTYVA*FA;pbUSP#x4>)>KYiB0@Nl13M;v$d3fjGCqRb)(K9FB@mPg7nCI5 zWscRN>}sO47G+Pi^l@w3;VSNECr+>Fd_3&ytI8LYoC&bX8ULN+894)*27Qgr0$|sD zB+Zm%)+;3659Z5iCkH9bE;r7GAEVIyOsojx7ujk~F%thMo-TKoCf9EY*4(yyRtmaC zq3mpa25`Uk2!GI8&7)92FtC3gE=EjcuwaUac*P<It~m;HtH~@)vdGvYNNp{^I?9Ua z^=K%Ci1y*AsB%yA_j@~l(C4V?H0eM&tp1ZGaYYUJL;+KD7xZ2gN^fWXgGr!EzavF& z3>N&wWS~l;CVl*XGNCGZN$5KlOI$nU{)I;2D2HeNn?e(Eg}ooeM1+zTiCCgu37jzu zZvPp=kN@>Cn=C^aW#o5t=DxHI*`(=>PJmv0Yb%>j<5WQpZEsh!o)$=sUbmjXz?@?k z7R(fnpJJxhW##=C$m)<=xOfm|8DnL9-?8q=GUDS8cSd9FKJ><wy=*j<EEKfn)0MZ9 z@}x#<)CBj^53?q=Sn9QX#8^3PxpnO`cg-H%mD3^wo8ELMwXOuNbcttO>ZE&kptaCw zK^oz!vM`jp<E>(mmd86^kk0T`$>rz!y%@H5l=oi-JCTU1qhZ2@b2*p!e0E)Inf?02 z@NWz-k9jp%2b;^LqicqM-tu^suZw75stuuPLXWOgqi%L0?q*0O<Jzq^CfD_-h1OIN zxPIcG6J0uW8RuQi3q&|*H=|@VOVSr<A>7joqR=eC$iqjd=v8J7?=4+gat1#U1aQ}1 zq#cG>Sy`!+149^v%LT-rBCYNz!I_rl9!WkTlPn2}3w4IUcb=+}Cu$l)rRxL>@SXT_ z{|kj(Gf-Qe+$keHdzj9T^LjvEfZZH3&#Wr5W9M#oRZh|P+Y=&G<=yE;jJGhdb9Z3^ zPdXcw=*XD$nl8M=fMVGw`B1A>b(S6Pa)p>=TSzRNKH$JkA)&ktk*poYs(Ynmg%N+h z=o@m?L9n1F#c;iD3r3nGRI1?cH}OcZT<Ak9oXIj_|HwKnp@?+gT`-B4j*rA1n-lz( z@-;l)4m&l7NB!`h>9d*S@=~?<<{3-GgmRS+49!+Rx6F9mrVana%jI0%OuWfhNB<9F zG8&c#Tki%!Ms@brBL8<+EI^<GX(pf)<rsbfj{_8_*jPXrch)ZoT3d3#E&h5+>}gRS zF7@!72wK<CCa%N1vG;Qfiw%J7<AS05sY=PMJa}#m(FXwq9v1YW<X$FI)ugzh+nNxT z27X6zuUBKdohCKPtlQyf`Ujz{dX$y-ae<#k52Mt-rqA6k#B9YavOt`IGDj|xc{LBl zC`F<?lzi8gw59JYjP2}ojrFfI@jpP_TVwF)8zd6MU5+}M-0<YIXGOLUdN3<s6`9Pj zy`+*JyJ6e`F(onO%hD<`i)`^32%G{1DpVfB{>dAbBbRm2F|vj7=iF_x-v)}q2t&JD zU(3&>d~ZiIuq1uBgwl&g9&GI7UTtSRxgp8Bq=F&vT2=S6e16i{f}+y%L%@}_!p2|< zbARE=1;5@#p9>;I=K-ysH!YO<5dG-DYPmHVeZP<Db=-;B3|?v{uD*82TW<~pB{7^_ zD*=6Gm1r5gH`I=p|Cd^XcCZYH6Qhp5!KzmKZO~qk8Y$Q6lsMv|9B)s)=d7xG(J!ur z+fq3-6aeZtse@A14x_o;je}5K1Bs_Q-m+yRv_B&I<f36)S@=VYv<C)lY_0x*%pPjl zOrv8>7H!^H8_#T03i}EwvrWnWCQawbXiJ1Hl{;dpVsoAwQ&c&npcxdozA4GmfvIjb zewgBX*8Y1nf!PqD@vC^|f^)B46MMfRjSML<0eTmq(!gSojWfmSx5%q?Z;6vwO9q)P zL{qDf7?XY-r3SMJZXK*1Rl<h?EyOR=-i?a>=SQM-+CCqqN<kZ%Ph23#r=3(u?7CGF z0If4@4q>yIUPtG*kX{rLSx<mB1C2Hh)8~kdfCxAr@LR@(yE<B&4g5Kn=#?m+0IwQ| zQ$2YHUl-t`9<s{Sy1`l$(D-lQ&J)O#)a^&C3;yD)52;Wd<dmNgdq(<$r{SZYkeR03 zSL~MC`+)dhU4-FCqn+N{<=QWH^SWVMGoloC<wcvFoh&@?*bH=s*7$|9jZo~J6rE_E z#^ZLsO62yeDj;S?QDMz2M1s7T=dKSJ>BPd_Euz^#Y5C~mFEhnQ7vlb&aELc(j8i>0 z`aJ=aSOKpwwnSb`exW<e4^D@ggzFN-bF{3t=)J%OlkSEN4424ePsFvnb@IHzREWGh zuV&pbG9u78nl*qalH5Jy;+-_V3W{I6CFlGM)Tdr07Sca8g?88V^8e#<%>HfvBg`ep z+O}DfVzt0r?{Wh<#B+O5|HwwIj`C2Y_jQ2a5J8Sc<-3UbXCcDE{>NBZE>jnG<{<tG zC*R8Pq#zfF^DZm3KlT^BM};LclRsm^Y!TmADCv;CG76RS4G2p1O@#lVqpd5{s4r}a z)wRf0+u`%9OGyZn)CiPxK8=y2Y>%dc(LV;DII|S50^Jr#)Q#~(QZMwy@623!@H%p1 z)^a$y{#V}i1fUlMCwI_w4@~q-oH-tsn6y!It}sT}16{v#O+cIgQ!p1ky~ays-@mWJ znEhuH*GxUyA`s9bCSU=m%Jh@LOdELQOGL1CE7q^IRK3KXt0`CP*4h*smB|?Oqj7zQ z=g@FSE#USW#`Ol^A)tHFJK0H#q199Vn>GlaJ<}PX2X2lRixXY{B9>$d5jz_<y?j#t zleB%}fSH$1%~{DM;h%j=+DR6UO6Jv*S4@QA+B|QKpd4O@%FXa!z5@AMh0-%kzaNNR zed-?}Qwy>9usW+15$@Maa)jes1QQ=<lfTUc|ID}s4aje{$l}V{2!D%Gq^k_W_I#S7 zlCi*>mR+1|uM}wuhex4ZILkL*0gbw{(2j~K9=fzrBIV{%U7lEiLJbE<>9B9CgVJrO z|Lz$>;w$wm!`>56W6Wm>K3S783dba9<WfA<YHid;T`*=LjZY~ePzMMWvkn*cjJRwo z5WsQcv`^IH-zw5O-!8SShpy$}vG;_nDx~Wx%4K1kuQO+P@L<I7yUfcZl}VIe^q7Kl z`Eu&I6hk^E6+U<+csWkY(+o}=>h?czAl!HJ4z#fH*$=${zhfY<3Fo%D$(-UWU8z@i z^$)=_+!d_yo^}!sbZCwD@V~U;5Xd2RE(@Um7X0O&_>;|Zl;j%?06jp$zdnFaE8Ysu z4!~lriXn|~Cn&d+p#CWn9He%H5(NAeWX>elwv)yoC5Qn;ni=ptvS1RjO{qz($}9xy z(P!nNjJ8AhQ)2j0>5_E5%!><9a)rYkKcov6(LX~xz&QT0OCR(7NO@~1k`L&C85omZ zNy=7^m-xre1xeP8i)|e`1C%NO|KSn2SnCMv3|4tFJS*mz1JHflOD1fuTAvOPa58%Z zW3}bntSk&P-8~3;ueW)vyTSwA=h=!-N}Q=8R}H@qcq10ulMpuEB&I*JOGoOANx~Gc z)GTv%(VK;B!iwVd`HjUf!gk#tL_rFJ*9P$Bwu1nQ>~u<VEXFFWAIqtV{u>udGnp)R z9}&2ZkD2<WV&uYRbnG~XlE?Lsj)@3V3jevD*iuM<qI^1kN@H*hr{&hvZoiZ)EC^GO zzMedBi(vUxvRVuUU)6y)GlewGmPvG(YS^vaukTb6bGpwOQMG)hqs^4v-cIskoR3_b zi_E_ET?_*e4)-9sX%%;b4Z!_cQ^#vHWmqu2xVLF3lW|7)jP{~Cg3+7r>Ni$Lsq;DX zV1`Jd?w+VS2T>d~y#9VEJdsRw;ML!2NC_d9pA}l8Kt)1}>n47PPhV?QeY3(l&wz$B z+kUP|c6{^1vaLK=yz)Nzjj@b@nwmqDebqAoI3Gq?ZE{?vyA*U;J6M>T)exjFim#r0 zwg*H1O(Zhtt-Kjs>2tOJYKdEd&wyo05T)TN0S}s<|F55hcE(oO0Z^Q7Xbn2R9YKIH zFfwZGrJ&}Bpj{gWyfJ|GSTAz`sq_ukljjMh1W}0d1IK=Cl2m#nL!@eLWix&1i1<$l zuE7O;JKK%oyIs4Pik!V9cJx1VVZ4yV*Vrd&=Xj^m&}n(9Brb5mM-H<Z`3m9f)T>~F z`<a=@%N2s{Gf|)5Ujhr*OS)R#8s87ZTZbYZ{u<h-9X*7o{1pNXaQ%tM$(X3rnZY;d z;MeYcXcq3O^swYv+1b`o;t@Hmozw(ZxyynK0v>trE=Nyw+IQD>ka@on!?dQkyLjPc ztP-gMezp$E%Z$F}iC*h96co%U515euus>FO?-*$wmAjeu2<mXRPx3;cue7fo`FB-F z056naKlnLZGJ(k!$RklB`Two?n9e}&9z5(@>LR@I^VL*m6Ij5hz6ggJ$Y<W7X+n7I zX$#l)d*D59VlhD$!q)38+g)rCjcb3+b;z6B#$&5X+LfF_k*{z%D-N0SZmKFu;XghV zB1E1IUFY{nNZY92>uhSpup^{soa&vDC;q&bZ36!s^)Q+LkRW#hG@VH}7Ybca82~D0 zIOm8*D`no_%+}RxJU6cA9~@>EV-Wh>f&2;loDE0bN8fIqfwA#%U}I??RdGPQjkCNE zhzuyqk1HqsO#SyZY0@3pw8^R+m7vPnT^v#PO?DFzweiJ^tu18-y3SWt6r)2*-m~H% ztX95b=`bK}?b?R2z?Qh3gF*CO1gbp!n!Z9s1=C!L=X$8bxoX>8Fk}P?34&`5Wp`ug z$V8(*)Pb5XPhvd!iM#>7XR;%Ix*M76CPg~e@*Y=1KF13PbbBlVSAA5P2(<kd+OzP; zP=A^q3N%8W;d8Wv6fLRgz_0UlLA2qx2u$cIyFe3ObroX%lq?f0!<Hufw~pFfK|;d< z%B37gNen5kSi^tOr^Sc%Bb@BF?nP8z&5Hi~n+@EnxGox+I)|(tTr#=Mo)YG#--0v3 zy&zRdimQF>mGQo&666?XhbDME{58s=tTl#nx(PI%UebI&TLm7u`3qFV<`wddPLRB{ zz~I~~`?C}&uvlkn^aT5&+<_%iFgMza!;oivim>CZ$QK&3w`@duY9cyt+LKOoTw1<J zKseYE(rR?l#6_RPXm&mo`fbXG4$3QqtvL!8EUUF!3)}37K}$ceQjh{Y_$VQ$qDl6A z?C1-Qxw2}a+GP{lnW)t@c#<IDNM=zUc75+`yIvG-#}pb$0jc9969TgQPadEaWHs&l zV16r}56vjKcQK2vV@Z1F2;*bJdKtiV(m5Yh4hKDQYd-p{K$);?ib4b?s+5(`1Cg9t znCPB^U{G)x4@9u=7{=XNN%P&~qANToP?BkIRV`f1pi3c&Y@qc=8ChqH$J!Jkm+c%X z?MA}mc=wG?_Pfawl?gO&ePXX&LGkxWwV)J5$LTDW16co6oU?0FAPPAqZ1k(@2ElDl zBVE#Rb7pa%yx853r7Wi+n%0p@n@<7J;CK<9t!u<OV#Pne?F28#nix#tk#u_wAZz-r ziYj@QJVWqmR`{LQ5|PYPS6y3+g>(AheWd*<q>(D5EaIj5>RZ%s(G(b%L_NL6Uf>c) z!!3vbpM9fUKfvP82ll=mEIQ*A1hnv5yJ)%~sHKF15<BWpWl#g+buqh`<0L-Aq|ILP z;%Z?7E<m&uw<iT1qd(>^k|<<e_}sz)0dA6-W0BSZlaj4AE2uR0R?4^5F3;+O4v88Z zMf?*yDxe+D!)PHt|Di1OH~ry{bopP$s>sxX&rv^eq>L{;>;7PO@j3%RJFI=pG2dFG z3AVxox3%a_8J|IAL4;fhfW42NIIiSQtnjcjXnx#Uh{lIaQT$c%2UpER2|Uct2<V#G z>~a!OIE?<0Cll61m1%8_MhU!OeJF^iGz_deHz_cyul%hv1FXPsS1=_xoO-RsW3-vJ zOmZv|B*ot(i>I%VhpC(j1UI2IzIUBo=3VFt$mq1WEju8lw8Ti{Zk5A9aJeDnFS899 z+PLU`V^f(t>tcgtI?O7KIoU7bI79B`>R_-v>`JlsM_hIig2Un~1su}!BRJR53>ulo zn_7y!dJywF3_hs{M846}tUV!9HX&T9793Kp3sv!!r1UkrRZMX(m}kSwsV4>~t)saS zPM9i+G!Za=3fo{y5F?>Q=YDd81w=_m5v9GLHp|Gkcay}@2{MzB(WDX}s?cd}wCZ3} zvuQeJjKdpIsS&0*2Ud_P{?Pwc;(0ZXzPt4&*3B+;FA^>iAJRl&oo6foxqlld6y?y) z9IcJY@0~JdbBox!hX$}*k~ZX~xsCSey?@-Mg3J#VJ+Jd63<d9)z`&2oOr{_^4ip^i z+kIrs0pK_uZiDt|@nNt0Pa;dbJYCF2zSfNia_m@|HUsNoD5Jdntr09yQUfW>G>CXe zL5kNB_U9zFL5h2;PiaI3WYH{46UGH~6el-W%FRK>FwR53EJ*f3$=Xk4>d#5>!HK=E zgcBc3dsZ@zgR9ZHGEvA~2-XqJV)XpFMy{k?ghdsk{H?BifQjSVMGaL0fTK>h^3nLZ z-38XurOV*K1rRS|OV>FO7Me(U#DWc+Y7t`(v?htuSB5^BlRcfvZk2^^FCZk)scDhd zI^~p}`|U=%FNy<5V&B4JxunR<riGVs5$K(*Yi{2<POhnOjV5l2IDad?5X}kx<6!GL z#3FgG{h`(+KinaaoDoqB14_sSmNMmbX_|@|sDlVEb<hpg-DbH}Na!7)u6_R?Kchbx z+^a!L*OU{M2~-EX(oa(QO!7XS6=K(X<ijQ%2Ea`Qt}PSE;HV&qO&=#6=Fo_HC)I5j zY7jXPch>lbT%X$s$6GOc+Bx+xf#!w(+H<$6wyMQ8Q>)s9Ap3BLAses^sVjJi9FiZq zXP;fYkwkIuR9DD>jX_1p`rBl|FL3`R>{zCrXMRF>879R(&i~H!O-naF^*X$2@LDIG zN%pySSSIJH2B1>@Bci5vZ6!sD%cH`*BBYeUPuN;HJ+r%lovjP6wD`wy)e+cP-PsUy zggkL|N|oE0{;E;(DhE;%g(ci`p|O#=c2+tm4W_D@lcniG8c^V|Slb?LU)b7=TzK~- z(Qdi>yNFjWob<qi8@tQ0MKq}H%*9Sb!hRJcx#|IMPOcEOiWoY~u&836DL(bBPaZNW zPgI~B@&iRXT_8s5?YI__2MFVtD>?WH^$j`#PaV9}2<P;|u6z4ot2mIXjht14`WQ)) zk(r`_sitWoN#=lbqNyK()19aXOrP}E(B#(#E%8N)3Nj~RJntFIY}&IR+~CktAqP}s zd++P3o-+gPUqa63UKbIs?H(T)tZzeAMcjOVd!P5dMQA9SVGS%^s{@Wi7-;yOJ<Fk6 zlIP9+5CR~J9=Uu#<*t5+@d|AYIa>AE#%5?_#%S+pk!-Ae9&_gA*_HV+?#>WvPDJRz zo$;YE;h-8fI$#VwKPNLOuLk{Pw&5m|asr}==>tv7Baz@kMvdzb66nFju>1`Nl{H<* zNNCzZim|AB>Pm15A;OZ&!?MBTq8?daF>$4Zx2{zi|C%mWSHNc1^qCRmh7XjHM6_bi zrDm_dg#t*>RY(89MWe6OoiJenR9@hWi=ne98X)r48_G&0uiKu))%im0`Y_Z^)*<E# z*p(B+>MAoJLNM<m>qLB!(zG`h_5&VjY?iCgzOsFdMYjthA`j`hL06{Es@Zn&)quDM zUj1!^D|%z4Hl^Sglc@v7qQXPFvyq$Uw)#Z|Bge6X`o^ulcJ%A)It&|rz3Zw@R_Znu zc`H)BU!GADaOub?DKgc>+`pky(hd2P)gxS6VNf_Ad^P4II76G7h(9*PCRh-l;zLGi zshtCNxRaviStO!)!30pufQx~k&3s5Od0t+9(Jm9S6@VL0flS^*{siS=@BTk`*}iL= zrT#@oy*Z2f@}SQ+BpLhoS}~>2(f<X#JUz;G=47QQ!du%;T*Jl?2s^U62@L|xZ@OYn zC;p%_%)mf-5P1cEI_yob?xSmz$nlLgVD7qW=K>H0b+WtgAPgn%j;r^WXlp50P^LiX z*xw04PtWTkk2%TTr52@ZT5F}dNn#PZ2Ex&v(MzYYZiDq~r+_^A-=mv>t=xJ<`hzhy z?`Q4XF$h<uigu?=IT1`LTi+<?iS6B269+^X35PZUUZ!%~kHVihn*3v&il9v)u<oPk z>UR3X=9kUMO$d%VbITsiW{?=mI)VAj#J>^-bAqm?_3<Wr8I{ddj<57*Ib9{&t_W>j zkrv-ZoDg1!=i1UH<Gw|6axEXxAfa?-v-W-_e|NK4(<eVH;dIX-v>PBMheSXKzDZm( zdcRAz3S1>Azih-)4JKN^T?B@pwzum{2?0cC_g??m*yp^4PwX&me1X}w8;VeV=S(jO zXq==O&D}hTMY!oLI|k|ux%gy%U_b9r=C~$c)S_Yf$W<vGgOl>R0Y|YP9rCEr_`OPo z8f4LZ&bW9qd_ai|m-<)BsGybHYDBkogj$J~W~vbC*e$F^21je?tY&n)%1r+2Y(hiB zmfD0rw5`M%&3=0RA=XwVQMj+@NyFp5U5c|zQOOo9X1cPzOKd`VFnfC>P1!~aJ&X|3 zIa1^+3})Day=g*v-6~NT%Ws07S2_@DQ0<GD?tZ3m%y8#R&!<{hH^#X>!F!Zl7C(q% zkNJrsOB+8)XTsT()p(wj9=0ARYo9kqP!77)y|)Yrj4nYhCt$qh&`CN<5&n(t?3Y@X zOlrn)Lyr8*fu!yZVFvmU9HM$dn6R0$6*{qUysbqrtHIk8om%;v5WE(JVqs09j}`Px zrvY64$Dd?~#fq_y#-qFh_f?5@#&*A0&Iew-DYTkkCLk(5rtB~ZOj2ybRq@ePISV%o z4)UyN;0#WWht`1)uzVvTmCOIf>z*B~_v2A5RKErJL4rA?vp3whV+6mxDWz91mfDTK zY2T_i9$<6cOQF7u+7sNnz%DzS^5VJqVFQP_40D?~FY;b6Di#i6jd%vTW^kATV-tA~ zqT7^b$#&I3+IcX0((fdO-%qBFcpg1X%cdjW;1U>&#tJRILS}|tVO`uMiYUPw)c>i6 z4l6KJr*_|U{WJUe9B+l4L*GVV?wbqq*!*k|CAy{)j{E6gQ&xgmqm|X-V%EKa;KW$# zPt7Fb^JHR%OY2IfeFP~&lPM#U?XgtZiz;Je!tBf|`WM9?3zn6gJh}siTlJLf2u|$+ z(JF;uCDTw)go5^el(lRSidxR_HF*{Ue<BuRgiJglufEI>_IO-){lL;~^wD@PAy%sN z=~O-apS-qlD)q2_batuq-!L<tg%>1EiYQGi{bCZFfUas8JYxyM&)LBa-UR_f7>Bu_ z|KEzK#KN21D-s~0B_fLo$#vR(v5l$XY-8U)1Axh3mQ37@m(P}X^_?~8uc-s?swf5) zB3@el|H@-QCI$&N_$70Uy`#w^@OipUFU0~eV;X04jIlh59uM0)d`NDpa(QC-=g_|3 zrG6+vtyxi{qs!zd9J_gN41YB84}NP59cI&rSP1Nf3hTK$YNkLkE^MpU8M~6Q4gaPC zX(XfF>^LMamm{CmTLf=i7I&;QDUj1nfkZ{}W;4N~QB_K06^0wyvH=o;ER<g$Y($!i z1}QDC(3G07;HRyR@6bIndmuLvG#YiztZyi8P?pW^;kvI!;Y&1Npe^(tZw|2k>P41f zKhl-W>?1$m=j=&hGk-Xcl|v3`&65?5<`07YP@1Tc2G<`c2{^A>=}y4c4x^VT_51>S z_TpKgyYynua|xXiasQlH_=xd)A+SyoIVWQ7FOI0E>~<3}>lX&Y>0}~BWh?Y^aL`VI zd8CQf;w>#oY+?VC4#^+$O7V?^=PG%JFn&liBCoxir+!i69+3r<`}Thv5}z;69feGJ z;FpTAX3lOEj<5|%EPy_^Y<~Z43e4cG8g)=;WQ{Vw<%NiYZ#=gR9{E+TUixse+%Ma@ zM_W8wmFmKI*abZ2FRBM~ui@GMS>XjIqgdT-eJEsZmC+O)Q~c2<h7aB_EIvzhw%QJ@ z??URJBoT(qNUx5jc|5?lTUJTy6+`v%yuKfM#`tV)K7Hm<I^Z}xtxvfiKjpYlZ@fPD zjJ@`rBkHwiU^*%U%EOch`-1UQOjE^tU(y}h0T3X4VUT5SRJht(O3i{=FRTL)e$qWN z&kwRJ;cW%~V+Mn<>u7QNEZPs4$1p%sHA683cl9n0WxuzJomre1p^zxx(JtyU&C?8y zXlZ|~Qr~r3K2y5esomKC+>{-!_O|X`rsk0wdA9W;K%tt}9DJ}6o1AoVTz22y>j=xG zxWhs`vf*J2DHBU&!Cd79jAMmRC6V$L2Xu8i-9&%lMD?tG1+QYuuG4uNdBGSW^R(di zRP|An57c)IpAvA!q0fcCeGXLknV1_(z$lPx5O-%(Wa84=3&ozDaEYRTzx3ScctZ%5 z>bZG<yC9#aX^*`EYnxq7mk=2FsAMDE24vmT&^+`W7+-pTnV&J)mqysHy2nxT4S{BX zZ!D6)mG&&FQxQl3pQ@jTtRo^XQ74tHbU9T=z)0j1@$GKQC596X(UMKZ;a#YpAb`nP z?QtCDXRgjHy%AOYg<Ya1t`%MJO+MH&jh$RAJR0DL*i{6O+V?qScJm({nhpj1#^}ui z4p|Spm6UwGU~Ir8zDAEoMb|ck-$f-RD&3fECzdA-3-6}d=~FaKT$>VQ*v59>7JIbk zB@F3c2o<Jq|LoH1+K}LXJ?n;DpLYY>fiB@bH<&D_*+_bUIK_)y%Q9lnN?DMB3p`>i zxeTz|@3PnB^V^8|g=a86T@Ejhe9!Y!)2lg23#b3=xb4#Vjr%hJgBi8H73B(5S_)<p z1r~<FG4`t6&OsU8L?#6{epeq&1x>q$P2$##D_^Xc2h@?vTv3uPc;f7_n4S9h*_Ulx z4!$m!ns!;Mo}`TPqbaN!wicX49eX)}81MP4I|i1@woRiCnr}>l3-nEDc2ZfC-La~V zDRjMMHscrfzruEb_AGP|F87ZTJiIYiI0n>xXQ7l7w?-hazT-ld9D%)^NB9$71OaGb z@{3QS@jT{gVY|4qD8wrKl&Yn1cdM#X6a1h!>NR1Yow--q^xQt+_g1I9)`Fy|Cnzl7 z0c~XL=$Upb3X)gpV#IRdI#11}oF2A?+Izb4<i}92n%5b)-t*p3;>II_1X_{gI1i=s z1E`h0dJLaJjnqtniwf%`460=lDW0~hIOKeZ=Mqese^!P{9PX}`W`^EzBh|Khj#yO6 z1TvAZC*Tpa_%D}L7-}_Y<X$!?l6sWNj`OBpu58cIN<+Mc;(*orDkFH|p!;!8#j9+t z_rt_u;brJj3VSTQ0Y(Ukg`%JMKlc0KIyMlfE#Ag84tMyfGA)maB4@?<PsS@fe*40N z>ElT*LETo_Qaxop3wV!C?Ol2k4t_BFt72C*+?CI{M-a~)haCfRkAyoAmf+gAXE#3_ znm{x@3jB|Gcqo|V6f_EUH$7<KjF}kN|8aJz2g@@8C_5;?n8B2nwTOSG$l?<Wx{#v{ zbwfiMa|*-2s9^)1l^g-)o`|YtgTTK}9pE#0_4*w+T@K9u8i;#v3a)8F=Xhj3FEmc> z!B}3+g2qG16Ei8le~i}^&)%(DBC4I!Gw6>z+=lG%FDk94(j*nKM2IZCKs5zwq|F6g zP)Im{uL4DG2(0~?tpr`Bh0U|lHOiPy^@FGdSj}+ScK*jFd$P8fa~)&w6*m2<s7$~% z1V~;-OHaU_*Bsx46^3^k4=!n2-UQD%8;aaWyCSU2w=5Z!q+~vZ-Hmm)=>Wkb$jEt4 z7kGE%-E~Ax0@9F3(C&<mT4x<3S%&|$V#l_Y{w1|g)#p(*-4uYik&6R}ItmJ%LYa4% z%8ytCSI`s+?9H*zU_0M<+?j=LNU6h3F}oL<B2}ArF2|S4sflb15=>r&tWgPv88=41 zqA^U}HEq2|zTAR8bNR&|YF1^!u*Y9@K4faN5rmOGr{oDvL1*93;Aw?n8`z~;UqRsM z08EBw+0ESj;dUUr)Q7)r#8X2u9SOA4y(Ke~fZ<^-E9fNO<ykKz6|>y4{d6UTO<L)q znG@Mm`+gU2wf1k?AU?m82Vhk&h}0=WmR_Q931fkaHhQ@Pkc+0k+y)Lq?NLHTI=1%$ zhEW`n&$?+X`LiiOeW&Gn`(w892yd?<MCbT&`kMBco$@FwU&8?!;jOZLsyn$T$UfUm zjJEdCvR}8PNcB*1^S}M~k`&Va(SqfB8ey}YLP~)FdiJ4*+d!d<3{dE5AJB)nqIAbV zXBPD~3L4dM*u7!DUl_MS5$tX%o~cB<w_R<R%!hvOn}5MRC5PfMvq-8n+J6zNkkq&~ z-nG;zjv-C>oK(`>@%i@aP<hx7$V{}-Q5mtFgURu2vBVEcu6PDFT8g<5oZ!^Zsr6un zg}+9C%>qdoMg>4e+g8@>uc~STyL&a^-ntf)8>${-<ig2btDv^Lc0`L}M@Q4+kpyf0 zQx5<a2YtEOv7K)-6$wDWuCjnz3?}}`7^b$^dzCG)49cx~t)qNBCa53M<>B7=2Icl< zM85y&0d+ezR6X%)A5a)9+%L+6`LGel9Ll1~(`mD(83%OvF=EHC@l(3A()|l@U~u|D zW-3a@l|~{fCDSN_%bW;DKnWZ18tVk+bSD_M&wYYR-&euptWf`&^^UpftlGFUI-pd4 zc+Z47Q@pbE+*~642+uCRFC&O4C1AA>e1$4Ei6*5z_AK7z;vR{VIrRbr61JvfmM%8e zy5`cR82b7J*TR4n%#e_C*ZC0J{Awl);w};Xso`AJPYEl*tah>d?CoV$^^lJ`9EaQ_ z_w^b{R&u-+x!pZS2-K&M!NmPGD5VgxAzap-B^56ym?(XjaF3*jK-Nb!-uoy)1vsik zca@XR_IAxFJ?$db&Mv-#E?ynwc|qr2^T`a%?G%}CW*Vor1M?UC;Hr|vY435eMAA3% z?txV<+27Uaj!b_B=6Cj9D~B4!$ReX1T`Vqkvq1$A8CmN<K;;>FTSc<d*BA#5p#KM{ zpGCoJOLm(tBitm#6A1WWa!|!|q-fM{Yn=J=i<a<i`>57NnUxGg4glp>O{mVXX%ras zMMWeoF)RaJ2t(cmfmiv0fAR=yFk}N-qIVl{$Uw^tH(P!Zc$`?XSWT?M&jo<EpkCXP zET=I0K)~z;+zg2=cTkz!ZmR}(<3#Ve3}fM^c6{>M%-1dJG3%o2zU(Sa=!;#Wz%kp3 z9YpY5czJxWhVn2OX1G6a?zc+>0-}6sgS&qW!Ne;z%$5sheTmG!Q;maU*e7CmRFH6* zR~<oIVfrq~L|*cTgW+N#G}F8eMBS|%aB2!wTcGDeALB_bE>A$ZIa|)7os)Yb*3=!3 z0`x;{6L$+Jc{%-hHVb&>plFEyU;%7sgF@`EnMQJqNH-7h;}@z|xnH$~$=#bUWbZ|| zfVi5n<PA>>S6_s0=K{1#ZW`uyjJSkirZdCLN={%~46Rb;Uku03Te!o5FTb}x2^Q}= zQdofJTIXQ-H?>GzYX}$Vqb5@#c}hkq0`MdjbLZGgMHN2*tH0^}YJV%`zkgi)uV_I7 zQXE3-F!deX{F1~oWE=tDUq0{?-=pWF+jEhAO$h4SD|OW+oLLL+`n`Lh5(7@X&F23f zP9cbHEy=r|I79IH9%wTBTu3u4#LLRzs8T>S%o`Yh=Ypl&|Lnb_TCMz>NOyZ)=L0FG zv0C_AXhTvj-Iw9RJxzj!|IpCL2+HYgO|i$8aj(=cCOj=s?9`YwM#qr(xd}65bw!{$ zl4NNQ=jkMyYSYyN$Mm2M;Sc_Nw`g_f`x^$S8!gJIy0;o9gu4`G<62|MsJUzcr?3bD zGe)0ctSwDnACiFVqiDPrAPF&bUTbn4#)@6+rDygk7C|(w8S`=`UPO?gt-_h>S==7k z1h}F>nQTcwEH8y=i_QfzPtJ?aHlm(7s;D!>O99AGAfN;|i8v1)x=ee0o&P|JOal1d zg4e)(GN!zVWp*wOZF#KVX9g7-WIg0SxJn6zzd@Tmr*WDGeuNW9SyJUdW56!j@10o? znp152Up<BxvU0v;n0d9GYd&x2hm2%x3d9Zr%&UY!24iTW*QFb!Z+7-8oc7z`t7?yF zg1PE>+<{@-O&xh{ueh&|*!2|6^-BijchOnmKJLo3klMCB&N|-+Mt^wed1gpj_JPy3 zLxR*1|AFq_7kTO_uczj-g9ZB+$k(?tgMWEdpF<K>(uVpAb^5{GS85xY1uj?d)*<j^ zhc@iv#CD*~b#kepg~15$xS}@==xF&HW%2o`X-=xhmJ{}X;ButGL@ONS8iIt>S(taA z(w&R38`MQ39S+6oK#=A&to?aSpjs-A!TQ~mAO^Gnp7rsZnvgPFWo=o6HO3JL10KY! z85<;)#`P>-pT3-WUk;a7QcxUEixSOdc_%DLRadQ<cBRu>+a!sJ4f6rDD5}^@s`3V> zGAB4}(NG7UXf*}iJ2<&F=_-}=3<YUo`lBvWCi_yif7X5v{z!uCVCsHdC&8${uqwvi z9H6V1v_A^SMA+ADqE(1+2<#;CJ%bsvnD-7nl5EF$$mZCwg9q9)tfWi@ijBi4%bcAD zHxtLr9O<&HPXRLFph-$tHd-4n$*g!f*=dDN6BOK(^sCc_aZ!bvH54<&21sE=HAXg8 zmv9E0l`R7a;F2*mnLJxNwX@x1zrQAri(u(I0ayz=hRottbu`GJk*_}Xc$Aaxmz*iU zntDBHt7Cv$HR)=xNpr0}j0F&KsRGldJS$zIs&}$%pB`9Z#lacQ4ukM5xYsgR$OUXU z;R&|8H!xvSsQr5q=RgE__4R{;=nZYl^p|}v()mEF6()HpM@nyJeN`AT`3!ihs-ZjF zU?v$P7we%LPvG3dNNmMODU692(KfYmkhFd_e|@+e2TG*cZS=zjwJAd~?YHZN4z0As z{ERs-zm*>)II%{;D|NlT2Y`BaO&Yvc`{DfLxG}Jo*)$L7;X>hfjGsHG>4CJy_3gdC zH;FMbIFshVIH&bPH%F+E+4U7J-YDOm9Az@Sw4?S<7_jz~r9>8L^L-|ZXw$Jt`Ny{1 zf74<?$zNnC$te=wYJh~8=!{?=O2Ma4_<f=CQDZsFY%MweGQOyhnFFC9=q^gzHh&?2 z$Zl9T(NvgbA#oiBE{_udO82oA`|K<-S#xVm`T0>W{rMz<%r!Q|;|<OMOROT)6Af;~ z#>eIi%O-XutPeeJG7-GYGbrf1lfZsidCV5t4eGi?T2*qojQF1v*kr=l?=$U?j<1aX zB}@?&IeCVrF%RXP=D!BLjvDTfqaXt384<;1wH*7%mnPC|u(0*LW(XiMs?Rib&`!Dr zI2H{2elV-WuL9RmdZZ(9Zm8Jd<@4XAfJ`T}yCSbTWnU7-!`dBC?K<S@%aBCG9%fG! zBg#RtY?FY{1lB;HP$#~Ni4Ykz^d^?uhFI+z-S~4DE@C=>th*hK*4)@521u!(pLZ!l z#^7~tJObNo;x&wzuYa>~-Y?H%%-`m8Q>YX}8e_)o+G>$zV;Uy(evJrNWu&LG!~<{> z%P+Dvymf6#uq*VXza`c2lq&yXM#|4{v%LZqF9OlubNqG_{`Ldadz0giSH26!KtDCb z2TF;4DPEOtpP9KOz5D?C7YCvd!K%{Z)VXrkYh6-xxCBgv)xd*iX6Vj#D63m#hQG0j z<q~>u^Y;B44brzVD?MqW@PLD@T7#5#U`;uTRgL@!m;*qG2(iwL7m<oDB4hheocf91 zrWzIwNdIjwhpM<(gY+T#Jepmo7BL|OA`H~1et8c~DC`9VBaj%exo+~QYh{Zx`>oaP z@R1^#-P|$@&b^sBiN5fj+*1leH-_=Af;x~1<(txVG)_hR0`$Eht4fa*>olwX`FB_8 zS_#E-(NR8q9>zYET&&+-Ik0TR&+7I8J@5RXb&fMbUFWE&DzRIU*E+XTO*fgpQYgDP zY|pcn9qEbB7XST2b70u6GNQm4sBOeJOBo^hKqPwwqa1KZ#C+dFjc3JRslbpx)Y)Q} z33|<I%}Wdk|AIBej<K``+93tNc;FDJw9uE0sowHDrIq~qyND;tj^Bp2i|#BAR&Vw{ zB-@q}Od3Dr9Z<k^*B?4}lvkb2l-=UVv?gP*0<H#kH#(keZ!Vht2}f6Ri;Q(XP^Hs9 z>t!{HmvK&gUsY0T3yfF!SoM33ixw^)ErX7TW#|36oksQ%o@y;f;6V#J?V`IKcL0lw zb4Y69XaS_1$&>pHCfN)NI!+7j&vr5ug>6E}h{t!gg+n3&TN;I<iOL|+7VhhlO%Pc) zs>Nfnuh_wCA?DN}o&p}}MYg1o^%ox0i(AA?K}CghnyU5^EQ=mBbaaD1qmK6*=i9IB zyK0x~R*X<T_mE0?L8<ScPjN>pxgz^(#Uxk^>R^GZQ%HJmG`z)Ci7cJaa3yR@9LW|> zECkkA+?o_JKcGQUV(cAq2e+Qz-%!}`=Acf>mKMn{RbRruyizR3Jg40Q!lR#cZy!!j zns)4vSWub+EPe1R<EB4R3p~bpXf0xM>k9kn)N;&KL9O+5T3}OSJL<da=seP{$13pD zyY2YrGpPbQZ@Qrx2z=y5;vXr-wa${wVBq!(;_d;+KK`cNeVUp4M$M+shu{-C+p(V2 zpF#NEvk%a;T9!fLY+V_>O!8Tzb*x?C;=6Yor*&2nBl}KujEXW1i&2-AcK4s$-iUq8 z*l-z}ief-|gSejzF-q3_kDZP{nYf~|3ey!VurALv;nXMdR1QX6=r~*@6wkyE8kw|~ z3;gwEJD!Vu^$h_+cAq%?=B;In<wqx!m;&Fmr8_%`*BROtWM{FTNvXsVbWxY0Tze2p zu+Q&$yl|tPXdr(sgPR_XtB>>`e#2*!x89JLL7n(@(_-#FmSgZPT$j(_^Zp@nT!DAd z(%QJbt#Ua81<+f=g5VfS)o)D*KUKtj_?l~m)QD(Qc<hZimz!QZXemmxdLwD9M5Ec! zyX}x?<bs^i?xq!ivhyB}pNmaeuFLDBD91F=oW!r4`lJpJutI>$k*fRC^Zf}_`_V4% zBe)^|8`(=fcER->T$IL*UssGVE~$}+I25K>{?PW7=NXAo)Q5@%>ZrHkwH?K;WJs`O z_MuqhIIavg@l|YMqwe8fLk1}YJmS9k$qQ1EvKXCPUMk4B!-|B8rnPRk#fJZ*;YjqG z?I}>cw{~TZIExT(?3h@Bqt6XOox!yH)13<KyO~rG-7nUVUV9BvjtNM|1bEP(Qctpc zHW}O@o>z!zJOnkiF0@hDp(;g6eR~lJ|H#?C|2yt*a|HCvTp0W61LP2eM>YGIr-1`~ zk^2qd3FjCbZscB~;y7~`WwqNfLKKlS9dDP`e539-3)VOs0(#Z2M#OYX9UyNX`?+Eo zL|tI{Z<d<E5k^^UL;IQdWLji|oetT)dSP%kITJOn=x1>Cpiu6)<Di_U=;H#z)(ZG{ z<l2MGhpo}?Q_tHUL^l(gh{pNz*iBD@3?$J;=pNy+)L>=GAS+EgN{@DVUy=ho+YN7a zRk(Mj+D9X)>Ajm){!3z<yDANSeFTG3lqkRPCHV!YK_@w0r}>d+%FCqG=E#Z3FVl6^ zqwc8)=vQejk1w&WS2>rk($D_v7k2Ag^!2!<Q3>z=d8hLa$1n2RoNBD24$xKaYN4s2 zl25QxM<%#(Z)<7P5GE4kwPfJ4OyCKPE99OEU02O}N}&o7?+XITMqrusyO7=Wf!LK) z65xu8k}{gs&|Z2126--vM{{W&YlDQ%B+(4<|IO4)>;>sPxX##y@?|9Yxw6&F>VL?i z=wkLi;8SZx!WJl+x&!dam6{LULS;p;S<R2mAsfO*sJ6#`Nr1=0`tMQVL!gHjO7FbJ z^fsU#Zy4UvSvRG<14mwo2+LE{PZEtG%97k*Dj{oNuT`oR0OdANo_h8EhjeCd3^NDI zTj^)%*JtI2p~y#o0)+uV>rV$Q+^RpbBUKMf58toiShF!}L=c|9VXx|n60{;5ZJR@M zw#l6qG79VMLSjU(C5$V!G}p4!d@}>-7SYV$V&?osW;v-<OoagyDNtvS$%062`Y!ce zA3Ve@Zr|EX=#4JO@;iS<A~FsKfA#Bb*@PjJXwD%ynNMbF;azLKQN`<9`9&oN-gZ_O zHJ>lgQ6!7!Yvb%kltaoS6;V#WTr49-g2X3BkB~Wr_`P{<r^!<CW+bqs#s!9leBgIZ zPC3JpAlVbDtd6*yqB7r4k*B2N77Ok_TPw%^%_X6%XhyaI^LDcb!~ROFE%NqzH~jE= z<$XP%yO*=fJ)SNiSd|XNt$7x9f?Gj@(%`@}zBx{u`u38*dmg2Gj9pfk@!xo%5e)0< zcxuATB$>B)&A-o4@Kycdo$3%>2maHS0p>*lAD!~BlpTrJT?3Kp>|&Vd&{uB|N8NJn z<)Tpt2urtJ62Ra>=$}W(|JIdhk<Sh-osr&#d)?s(f?BDhWRm?9G`YYjkIXo3j`V)T z+o+JEHBI5(D~MX4HZcuI0u`}?JN0Xb2p#4^K{uQK=%g@4<P>*V<QyhDF-B(`52OiK z)A2(|4~X)=pf?y<rjv{A4WqnFJ})mM{rK`74Mblvp5XH#oz{m)Q>YozrmiHq0L@Ci zI6i|*6)cYOQ;h$?G_KNCqJR`E<=?(XS~B_#JiA5YSo^|ktFtI)6Mqv49)ws1ulLb0 z1{yo@85<qTBIEKu_oeGEm^f`dQ)s`VcPQ$tt6!a$^<)C6MO@0J|BtSeTBZA|-u?N8 zC)kTouIeVO|G;JjO3Hq}-U(OVn=}?ifPx4H&#XzWMq`mnM?wwrytK%<MU#oiYqllB z0i6vGT%*B)P>kU;fwb7rQ*qL#kZf%5$>OYy`PY}<2Wh!2h|=0nE;L9azd;!W6E=bP zFr(-^VLB(Rc4B}$$e8=B>sO!6XXGnBbMt_wBEJN3sXez>{w!viVHKsWn)%+`UniTa z9DmoeF{>Z7B9L{y)h<$vgINrfAg(b6Tw@=;J}@zClAlTaI;)O?1JL4c*)^#*>1c0y zG-7R6)3P4*uOsX-wF_UQ<be7s0kw+yjEn(%gNZAwkM{#vAs(w)jt2B$+jef51f`*C z<^sOc<~Xk&(+@5pA$6duLe|b7bqAjr%^P}EwTUPcGF>zs3MaJJ_TGz0DTMj17P)HW z$+c4)csx(AP(iFB(WP;$GpTVJslmu)QTQ<h<e>{iN&hOqIjr|j*{!`k?^#tw=#Dih zG1b;7{K0*6Y>bXbsV=1G7`REH(nYTo(5a}-2fwi^8SLiLJ?0te7r!Ytv|mXU97S8y zuSpZ>_iDL|+g!zzc5!R)Iud<4Yw)m@p}^)X=`3L$FJ*zgzw>~Yw6@;f63kmIbPfW7 z{vtA{SO=V0bwoY}x@<sh^NtqEGqAZ^o^rHU#&41;(>KV4Ksq1bjQ{362D={^vgae{ z>BMew8m&ZzrR8Y(hhOVg6^-|5c2Tm)*b7<LJkDlHT{NT0I|Qk8kM(0yxsxjLV<dfm zSbpCrG17vZzbU_9Xh#SmVAnMgDPD5ULbBcRTTvxw4Go`y`M`oPM{>9f3nfnMJKMIb z?1E_u^#4qGKxd$|8H0()@X5@;`T-WOIj4H^!5NFe6aScU`&<{Ct~c}cuut;zU29iZ zZDk#->CBQuJr<SynAqwYBb!+U4^qNW6{dr5rW#jcr*%{ROVx`_{TsJmH@bT9z}O7( zbFvmrr<`wG4o%mgg))hx5nIIuPKl6x`bQF^`QD^5JZOeLj(*BmLJbETxlasgN|~Im z*Rf}>M$YC-<+HstFQjkIUyFKsbqX5`^Nm*1#@UazllE>f7pZ$Fd+zuR%XHpmkv4bO zM?5AFk!^Pk>hG`4H06>K_!lBwS9D3i{$cfnTO}ktz!{b_5#i;1pRQ&xajzQrerXcU zs87bYx%%Vm9xANyhiMq~xoWY^7aGSxK_@i(9@xJ+fXq?t{y$Iw&lr4-u?)}!P}57; zYYpx4PbWhk=RbSN5JmWIAAd`6lL@#%7x`D)6#a%>&trkG+=vd)kWEUO6fFQP-0_rN zh?UqHTpVg5dH>Sd?9>1|5VTS7U{o>3_EK!9oW0rnxJ=5!LKb^}{7tKlt@@NiERdzm z$C0Nyub-8Q<`u;b3kse~{yI_+=hL8VYos8<n#$({Hn_J<TJ$`n0iU6{kl;6dLU6pv zP%BUI(b94~>Z(FOdw}IHHo)!n;H*uVU7DFuSmOZbfl=#m2Vt>KPM9o9$+7B?upBRp zp6_@S=e&Yp*nOc8te2uLMD`u^+22z`T07JwjLE49c75i(VEBo*8c>1$4X&99>2Y;i zm5kDpw}8z#3L8DE9)vbVF};Zyg?Z3sgvOXliS^Kgsh{$<ghG5cF0U*eFNj6Rkw0T` zc1WO@27s}_WX)34*qsf}!P03Ux7tC-Mt$_eWm(wYQD!gDegiyH{{8(~F2Tkag%)t< zfhdk=xjB(zN0>hEBB6X*V7g=b$|rOgq$s>KX|-t<YP1K^Q60Z<o36eWsDALoIHkdh zLb9j|vhCrYaMVO!uj^@@hlvt_x4%+}Ic50TFY4#u7pzbdtMv$#d6R<qwTa81vta~9 z$vK!z9O^?qv)<kH%nzNVeP$-WhWDv`QH{r@P|Kg4a+p~eIeUEw=tFBBn{H6-wK=LA zPKdC3q>BUnlH?jPj>qSY=XuTEu6`ErKy2@#nYISdt|GpVwVNTw&VEOb?*uUu2Sv{% zZ`X0+{>Otbz(yaq7SMP~9kv1-*ZGks;A61DQ%7A>=LV(!^jwF&7?R6Ok^D3)$DI+R zh><GXh_ZSc4t1lImPR}Nr&@qaE9x~{uSB2fi%uZqa+TjBh1TzMR3wh9IB|CnT$7YN zDZwNI_G<tRzAj=I%uHv#9<vbR#w`zs18g7}Ci@A=c|3MVz$1bvY=W|%9V?d}Cu`5- zfA>ITUjj0PVU)DtLFE>e@sT{WET}TqWrCqP|CZ2)oHL3J01)IVG=kRO<gBBw4-5s} z>SB1x#Wu9ZFAWQBoz>uSaH0>aup<IKXK-~;+Fr#1yOA=4FHn}ziCmg)G*G(8AD&8Y zb75VL+V93$9vI$ti?(AwP#2@I2u=)?PA-~Rc}t~EcA*+~anxzm;QX%rmp)aL7)~Mg z$FtUrO)xXsIXkonSlHsDd{L;kt!RaA&jq;PdCy~i!_1XhFH`(+1YUT$@cOxa(BHaz z5W$e!h&j04r8xD0UM0Hw&<gwc&)?5;q2sW58R~MtA+Fq26b~$+;uHCEOOWBj8|&ow zrP9RfKj<qLP&r)M8}Hoyi2I9&$u-lsiV{)<BD)eBG+aYGK?nCZ6+JPF+%+iXyh$2l zRS8mK1kjF?k}$(a<z22{LfY=6vVg}^q3>oqKsY9gY$O{PoU8H>zVZsSQVhfYrz;d? zKl$6>ik{mdKJ89B(>ZY;9Asusv?c{t73-qsEhp5k14=YdTI<V-irQ*pzrgc0)dnf! zXyurAY5x-{E983%%`4Fy1+Vo*I_R!4?F<3+pzZ>;@GD6Xf(&g)F=(Pd8{VmkARla$ zi8=wz{kkyo?j3V)wbC0<53^~&<pr}dV}{m~pMOfwTKSPXITFiDTsido)bSFjgWtxR z|7T+VuJ@GzkcZ!<?kfNBZQAzGk+<bVa&829DlwrLT~&_VZ$+{!U&8OOv!3M|U95{$ zv1d1U(F(ldgc$mvNmk}AL>WT8xl$l_x_M{}IrGN4abgt>_}?;bBSi1(&C)Jbdu=!5 zBl9t4V37x;Ik|Au?M}B@^mv2mQsEw0;L~-TWe@TbKZ3HYtbh`U6%?}ekbeA5TI>hW zt@h_z2^er&{c}$83zDUicd8)1u=rKj?nR_)og(}cA=zOcmu(@UPvuN{o3kzpneF3D z$)Y@gU+zQ1!I{4iXX^dwYN;Cv3&NO+z+VEeW@yMAV=eGWBU$uJI{pMz7GDQ-LrUS4 z?t;bKca+v9nTljcxg0wD#BvhsmI*A~5I@IFTKtvbOvRu7=z-cbKm~VOPPdhF7x=u@ z@{Gi55@Pj_Jhr^7CQYCHPaRsdoxwE(W=vCbnlLAPNMYLQtRVd=)&F=v-&f}z#xBz< z8;#+~MTR`6X@orK(xHy&PxDAB{P*wX6YK`VPgnIaxZTEyuj;Cz1K*|r!%`c8;7l4Y z4pyYMY(AusWQx6JYc_(b2z#*Y+PV%}k$NANpO6=_bZ!2xm?k3MDEoK=f@H+aoLDl< z(4VcUbpa8aRk5bhZn)T-pIj%Werv?XLeoM1XdmjFwKtwQ+BgMR4sxK+kEq$m)p<H) zqruk8oagI1R3$xl-10iCl1+|fYse@zQzEI;=S|vm+jO*DgTw-!N#}f4qr+*~%n7z7 z4y7v&Gb`4Yg3aU;wAyu>Vl=6xdmvVj6b3ousqTJZI%e>u!oplpxF+IH>E^thyvz=T zKdTs<#rrh;&y4RBsRB-xkN>1PB}KQjdY-#h!yX&~?Vh3$b~(LPT+ML0--L`}#|*}r zRghfQ=i!mki{_aR`B#z<XSi~xAe~b2a~|*o+pB49JQq7z5l4jYI<qP!tM@uW8G8fI z<gP1x`-e^fD~MKjTK1T>vW^h>-x&%(&#zeKR%^-t%l;_sf?i%4Njt3E6d)0AoH-7@ z`&hrL@x|+mN)e?L363b?()Mi_+c3ypaaHwjr$96a`<gu~181<(rUCI^8!hTQ&NCQk z_lu<%&pPFM6pc0%;OCt!k$IneH!WG%cEG<+#|8z9a-K$mQn7I>{Ofr)Jki8L$%xcw zZjx~SBw3Z!FvMN(9hj3>AZc+&)MM9inAKx}64^)Byu8tW^x>zUZ6{e@NNQ;qlzE*c zM!}BMvMIhskz#!jKChwb940p~U`oXWI44}ck~|3=r-Rqugn}HVy+TIw`)L%=(BX!m zB=|7$iU$Zb4_S@)$dp~;wv$rtq7dL&Sc21275Sr__A+f>ikDmyfxwP{?s*zV&^4q< zO){D>{yb(xD$E7uU9rlvtCxx5s2@~07{Uay19jnD7ttyPJ!&i)zwAV?zPz!llrB)q zn8nWB0&#TX999$IcDo5wCtju~=5gBW2NZT%Zny%!E1H8dE`#q@x%@ER?PjXAcO{R* zyVqGObN=cN%4&tU99Am5aTX%_=7WH{qhd-r%l|L~nj>{##wV-|Mdf(RmB#%~cO97C zu8FRr0#upc3x#X&O{XzC{<19nr&wJWe5i&`M$JW;w0O%&r6*K}ofzAbb&vPr0ii|c zqyRilDxCSR-3%hdmRBHSdC{4gf)(!z*qiteTB`gtB{iw!Lt9x?$&_UrsGqQg-gL|r zOVasz`PfVQMa*v9F(RhPnb*`dKlmfsBgG1Ux?l+BE&|)|nTB1Lm?16R#1(RY&Sfn# z%@)V!HP)Gj(PSd@_wBj(k^fop^0G7+?b3D3tN%73pn|0Nf9^(}T)JJG1pogqSmf|> zTc6al12mzVXl}s-5XODQe9_A5Ka-90eAGkrQKS)g{}-mzcF9Ql>HuIS;b=HzxKzOs zhFmxa1;b&{mxOWz<pIX>P-^z?s5-j&)JWd;0QhxYn=*5%BW6L~gkm>r)=3eNpd-*! zh8raF^FJnR!FpSWSz~M#8Y+tK>3$t8K~`F6gdU4igmB^dKUeI^cWn(uHTaT!SlY@6 zHI;?zhIbTA0%RtG!kvvUKSFZ%_ATVXkCJ*qq6Ce45w+da!+7`{j!ec1Sj8G$M1pqN zR53cjX6kW|S5s}_iBO7zbN}J+Ucwg96I-G_mU`l>a4JPuv6ll$J!yBw)m0f?9H}_` z&)2s%og~57J5NwJCB&aP0&D9f*zNTs2YMl7Z`AD^$J&=Qw+Ac+(A1|p<4A6^fMh9J zffI0)oL*zzT8;-?Y?93O7KBmcRp-f031aUNWOGWd;xH(6YQEO44H%4i*Uf08Gh)-n z<$<$VqjiBGWjENRFo`I6!=kbS---}Exdl>ACc(@JueTX(MAS;vqbYiW1(}+ETqHBv zKLh_Q3Rkb}=pIPX`i@brgGBI^KV6}P>)m*p=c0F<ZH|d&TUHH~-8&_hj<&_y)MTBt zeUkTF!~+DL@=}d}WkIhPJ1Gbq*m6{lutQFBJ`q(wn`Ok(tkZhmL@+&PMw&9`p&vtf zObt~16Nr0i8xU56)_geWBSiPL-ks77zXTZAACWh&ToQSrEAEkKhhu6>2Xh?stWzrD z-eWp9?1ASEfd=^cU#rjFD&LJA8Ul1r`LqI{MQVb=ljEY{p=33^;*QtpinM<|Hb@!C zhrDL^;9@Nk2WBEc1XT9L{N@}|1?z0t;aCvB8)}C4=|?5aZ|K5?RC|qrL~;L{V=JK3 z%J!!>zm61naA@?WA&!O02AE)JXBJW#uE0f2ZrD`bCOv#N-+@#eVHcdjE%LSGf(fUb zefNe|eC235F0R8rI*!{9ZjTGSIv1r`WIBXpeoahjqphLy`2BJk>p{hjWvM4qa+z#t zVDSl01~b2V1Jx!YdPjI~Wz48%YfraYt7###uIoC{Lu;K@qd`$qu^B}ls)3-FvDW)K zFR8|;Ii5FR*)uB7^Hwq{KBIyh^zN>{St24^U4o@zh5lPmgYo{BYtfEaLUQS-^?ZfW z#IF_q5ct_pM)@)Fzb9{3c}MK=?OQ0<^!d1)hckd(E3XxZxY*VQCh;{$IQ}dDIaKM3 zWW+a?=tDpCV72w4U1`R(j|xmu)LBj*Pi@{G8VtWsw)}smgxx{yosJ~YAAfwGG2+Cd z(M|^P;(m4U+<f>_()=lzcRtmFFO86rB1m7vfxXocbW<`~W43J05B))k`ag;bkc}U- zZoU0kZ65$XK)}E0p)3;LZgS*=M&OBV?R=>sC!O)Z`k;3<`8%2F!}|^rv6T7NP3yn8 zi}}?az8d=D7ScG&!Ai4Z?U{DCP%!O5PZ-q3b<qJnY5A)u9E%5Uq8h7Hkqf9_Lrt-& ze8396MQ~pNcDSO$89tu!aE(W=SeE>99|Kl(d`1@R8F^v#pRHnWi3Usm9ZHvB9&;UT z|7GhZQEF_;86Pa`BU`vJgd%q<T4gA7>}xelIRM49aZdj)tM(h~R`0C=^}<$_pI@4< zKR7ioX#u-rU71zWKO+Y-wpUr*t$iB9Mvl3BevVu8eA3u{Tf(=2VI&E*gCZ)DD2-as zBN7u(ErB=$3zoav(;U8^6&_+ZOf9dHz-<g3Oh}=#@8TYEt3f2xTYq;87cu*H|GN|O z;A&*&E{5f@$IP&JRqJB3#=2cN9o25G!)?n`-=Pk+VxhX&1MYiJOKS0%XIsPd4mC(1 z7iCBYpRn>yps3R;dl%SvI>^`F;nz82nH4WhCQB7?`m^&>oWUIQ<@S_fh!EuMI;EQ# zmay^E1eZ?mwM)6wqT(pQVY$Vvjdu6j=0kCk)}P!e*h1VE0OLt4r2Q}APG$5zOw?Wc zLDDmK<UV7JnGp!a;(A+a`>Q#`?Em?kWc48mxudhwPl^`ohV;qfY^+SfR`?UX*-YcF zD6}Jl@5G>n&|Nl+{B}P~^o&-ePkN^!GL;5xYa0%@f8QP#nJmR3B_OU^l{mT*?vhJU ztFkD|*)ypr#xhKRo=bG&{o>L`TEIEv&m^j;PuyIHBu`eK4S%IqqoDi>HsE+Ah`52P z(twSH2k!up?<gKU8{%p-+lb501jl9`)_sjq?LD*JtVNWm4A%kO41||Zk+Lx9(;-!D ztq!z)K@FzIaeR+sPjvE4(*hII?Qs&iE7Fs~-MevJH1cJQcaBqv^Vk&L#`6Kbd$HKC zD+PGV5M!?6`5inGYZQkR`>P;ihNpFaZ?exPY%gokeM0(GP%NoYL7dHFUpXp8s+w1} zB-D`FmERc9OW<iDXSpj@^(F*+-kqrK0ZN1*!b=Vl!&gNXFEilaWx$;lYtFnhQgKyv za28Ck!Dd2!@+!BAdMXO#te^}3#cl$>hB$8>UwqkFyPhej{>(J9^;m90{6jvKtKD77 z9Pd1vPqgP0q|E%@S}!}A9PRQ_Pm5#c*Www&6*~}>bI}&tjCK3$xBJ~pHB|{_tl%xS zq#DcaH1B(=@tr=g#ywIDZam2tP$N1tT@)9_qU9-~9bB~Jr>cV@zj_W55=NE=K-W|n zA_(EVPzH*LFP~aO?w9g(U@!)Btlp_rn6EL=^)qES8$58Cfo4$aOa$=AZ<QJ7|C}4a zdRNTM4vjF~#~##i_-V2?IoRO{%m9|2fp+@fp{&Eoy~zTvI_&=R7f7*CK|e?tamZDn z!D2j|1CD=aT+%AN8!F{03ES`%Sdx%fNZRwn&NPwneUslo`0zB-;#c3!@7y)j9ePGe z#A^-?!CMl@yRz2ApL-x;_di3B(>T-^e2p-c?c*w$$Ia@Z&<pTb9DD-OR3f>{t5m18 zHt0}3uDc=DIZqH{_)^IOF@9#@9$LK*?7~mH8!cunAxyB~vogK^v1apvFoB^-pIZ58 zZ}BBwS)GwzGIrU!+ZSJd6S2FKQ&q&tWUY7{;SpoBw`@RyK#&(<eo&y|oOnY|oOWte z9&F(L|8pwmfh|ai3#(R~a9PEOghQ&$`7F-9eTNIrj|=9W9Rjus_QJShZq2MoD#&Sd z?vIKkgoax<EP|M8k*{Vi;}&${$z~XQl{r;jmj8utReLhRa#do8{kdXR@-<Irxx%*t z)+c^0pzica!`%=(l1xX&@p3Z1TXt^2Mk1jHVx>I`qHF>WY2Nz?K*Dv+Hu^53IU2zd z1AgS`q2&tIhbG87_W@oYrrXk^I;8&iKHKy9+t5AP>&!lLd&3iGD4@zs)QX1=Qd4$* zTmB8}x}kXZ)W8)9F-cSTq7Er@uRo(Ih4j*dy~>E8vec$12<}rrYgxIR0?-Yq%=5tk zUz*pD%hs<=4&9ve!y@pg3Xie)Chp+sR*E<1v+;kG3eK%j>s^dTz$npjGwD{RQ2~=v zhI=<?go1d~&PN^K+o_=U!H!<Q$(U_P0nx3xSpu*VTMwK|DBoqX9=td`C-!s!|F=3M z%N4pM#bgkk;OCnxb=}Nz-S-2em<o!(2()U!*tIic(`<no#l?|f&6p1Xx)w&$H<D(q zU%{$-lvk>#1W*B_AFQ~lON$K@tZD#8v<l#ZGd!ulyOGO<*XfOcpM3V;e+p+Nw?2Wi zanw3(RUY4{<KavEgndFfJjZnn0LJ>by(mKe=+k2qjs``x21ODgojDFSjy|NZ+4KmD z-Ixkk$yw!oPhX*1KHaB2_=HLI(9Rm!L;eBY+=W9?CTXFZ-zM%>d<3^38%ahq44wnx z8$?3PEL3%mKD=CSN*JR*wt`||4<sI|)BQH78LJS^z@)fUiS<VWe2Y3!(R7%TJhzvY zjL#tKR5xM2@I?JXt+tc>SrFeZsNQGg1Qd)O3<9OLeA6F)6(8>`9M&8s|DvI=l{%8b zErFN*7^e6PdF4EnS#!SY7&2zKZkPSe;c7}vC!+9fKQC#^c|1y!Zl*s(&K4|*38@Cf z*1NTWY(G4bpj5X;xo3o-1@-|J_Zuq)tW?9qKt{*EB?)<MmE*%a)QXPE3xIb@7eWC$ zAHgwStmni-Ys9iE<(DHeEe-E=UsEyg&W8Ny8u(f3BL>ZxPs2k0v9a&Oyng$x5OsX5 zJn(52E0ujG`A1!J-n=b~`9idN<-5x0(U&t;v|Yrr+b4~lQ6xYHIYK=?XWB~7m*Xs- z?1Z;{{tV@5!qqqm5C<k0>L=O>Wv|y6Hoir07Lp`#C;cjXbtuaSW>mD;cl%)f^d_7S zbywF0`sy0eF@<xQxOSatFUFn+k#@1*@@rjKCDn3J?$x(f!pFOn6T~3*F?b43bqJ=! z-DRG}M!_+9Eim(q=BisrPyg!Mw^yvn`jwCL^4;>(6T6StB<5@VqI?OdcVdqzyY1ef zZzDcauvwO6Fmoa0_qeaHlramjEB+w^Oyx=<Q3*_+qz%gzKmR>!oc2o8X2DIOK>j>? zGa!j$n$`Dia=33}85L_;A*Y;_!$H<4q3ztcfC1GQZZCvaVdg75?w*K4&$o#K9E_!F ztz^)x+>YLOUYGSn4o8`zb(K~>_(>Uspk@YyeGk_CwGrd#yyk6XLt@%Na|sU0tTjZk z(mM5M(+f%U`r(1Qn9)6NC6gnL_1Xmi8k9I33xg|%v)>u6D&pVwjrYTVTK*ZeXNcMZ zW`CRiVsZAE&Sl0iC~lTY%2y67)8c0Cu(7s6O+g;RT041NkHm*y??GKi{8XY1)3Z;n z3lAR%7-q-D9WT9%Iu4$m;VsiMzgd9}=JPjxd_J}Hn1^3^KBDKIr6bRt)_Tmu0SvkQ zUm@i4wf*|t=zl1M)vMBg%MmP_IGcuv{U#!bHGZusX?g~~4XYUBzfi+$UI?adL(d%L zQD5t9L3p@qdk1$)Wip6`3wVkyoFNIAnxhuTPmnWz0z3(h_m_W=3~b>llCjsdW_hJv zPUPc_lNruwro&9qU3pv$E7`60B%lCR-tP$@UnVgSYhn%sM?LYYS586eAB2PLCIYKs zx4dFwRX_8vd5oIgyB?L;-|Oscy6snn8&a+3OcmJXYGW*<Rp$f>9%F_Npst*~fu1d) z-xD6SdKJMQf?ak=4I*I6aLfZZ#!dLXmhzlG9IQdIU{k$sTl0HB;bfWE{J-Dp>J=q$ zAFMLD!gldj-|H1EEjk)KCTo|*SeiL#>r6LUgyX2^9N;OmU7dLdU0;LjC-DdF33|1o zM}~ong;1;T<hz20ZPyvAGGM1Dc@u#>wxm3MP}_7m9FhVYByf1)=%g2~JkY*kRg()G z-re@Y{kvs0_hc>DxRr$T^|s5j(rfG5Dn#6<$fQvhO3**L-b80)@Ivv{U~L%!FDeGF zg<|ETorz@YJ3u=32+r=s7nU+N%8IgXs6xoZb9TS6<Ldgi7!90ZIl8=wOLN3g2sN(S z;(UTi+iphV@g|@v=;5;S`s|(aiPFNb$1Sv<qclT~tKYpya%w<BUc8|5qC$DWQ@0NX z%4CPSbhZFY_LrOx@TME1XP2nXK&gt-d1wv3Snd5P_(O}Q+SfPOCW=JuA2WvmtsSJD zhV{q~m{41F62UcUiH$@wQ1-`%f?81nG5_RC&)F2Fx{@M^N_`wNfe816x6fk!PBXrp zXx2Ty$@n!^5;65#>+JvT@BfC#eN!$8qy1vx9a5<E*O%?8#wM6ZNjqNEFK{h8e%D*2 zL`psvG7oa4XiJ*J_Vfk}*VXg(mLNlHoX3unnI?S<^IT=Fhc&L6`kD7RNJCl9EDK`L z(%7tcc7HB@I-c^>OZ`dj?WbOLC90#ep2L0K`H?Khos5uud$qFedcjA7d+DO^QFC4g z8Z*YY;%MTK$+E2$X~feLZbFX{(m}<(x$pxmYO3ZSIhIModrU8TSj@|bskvx__E#Ne znB&Kq{H7hwOEI0?*A`Pc-BPix2Y-bQG6yL~xcDQFQX68Q@TWn`!<k|T$=p2~qwU@6 z@t&2Z+Uo>srXE{dP2xBJ&Zu_7*4Ih;!=UvOJZJLOEhWmEke`eWO`MP>+&(~CyDdR5 z{;kCJ_~B;v_Q}C)YllZ7PQ3wNcEW1UXU0^q>IlgL>`YO*a#b>>RVD$K)@a-;c4g3g z5&tNZb9F)4f3m_ib(a_-h`Sh!IZ4n=vKqJPu9|krIf4l$CvdgU(=_79zAq0{x4F=Y z50V|J76%7Wl^q6N5R+xZ`hzh!H}|WYGU%v8QSb5KQToINPXGrnlXP!ixpVah6t##< z8fn-u6|}&iUL6bfkB$tT4Ke+9gsVU{GbxV4+aZzp_`@~g$$;5rQyTBi=AQz)h#|OW z;gB4aT-Ra-H9G?sJ~WN2p6BV>mx@jivDc!<5QI7f%W2k6FdhiAUCYs%_Hq@CS@9i5 zj&Lwz8R@U|O1VsR5;--m;$U}8v#TY6+qE!CIqC+{7o}R?KoSP0av#2L43q~3oE(Pa z#_$BGeyefip%KBznI&Ok8mY;AWQ%yx?9t_Vb?0sgKI;P*4r6+KUQuHE{jWY!f)G6V zw6D(Jvp7xY9sZrV8o)`~3%!T5b?vHB#y7c*B<<5=5*#VA6}C)93Z_iSYQXhVcN4m2 zL!FXjy<ra8CAqp+JyOrusvDmL8pC)G3MkZT#W><T8L|oG{_J~5xfREm7=>CxU#pG0 zOxx$ddCv?@%)zHBGav$E0gU_`+P2i&{P<z@`SoNnE)+w;cLHWFhwc~9ji2!IyMPjT zk#&zqbvMY3M%A(}Dp44X;xC_^#%FAQ4O`W5Q<xry$6Vuy;#Ag|vMsgOOCCpz#rou| zXylSs=hdln-7f#`7y<g*xs(nGKFqP@lXnKE?+B!`(I%8^eB9lTemaE&{{6@q%Oipp z_B9K|z`W@+7i)Q=N+IP+Z<shjLxCq#&YkTFT_W!?pd?<cL4$jc!Z*)f3SkEJAdd6- zu<?8ov+AmdAo<*3f_Ca^JSeasF>~aYV?$IC<P8;hwFlJWZ8xWJ@er`8E;Oy8$&0jB z_@v-)OGS47;P2ho|6NipbbCh+=qu*3!CW;z7Ce;};TUNmGODuK&zeO9_xsi7U!yA` z-T>!q#g=>FoP_98{{lH?Fchd=Typ~%RmVJ^lL-h{J5>Zj>6%Aju>5%RjBKu2QhP|v z)wg1wxq>3lmM@NUP!vuj03)`pWdktV?7DcH1Sy<_bCY*3i)j)&7#Wu-(WP=j_l;0G zV~^Ve2*bv)n7yW)Ht<)8shNyH(kP+?yb;qx$YLw_>d*<ncR^>1cn7~JhItaPtRnYr zIvu9dU$F^M5o*w&EpZQAV<D7nh3#i$F&O*3m@)|R-?mCB<7^rD>CYfYYh`^_7!0qK z^cN;%2}FK_yAp~BU~HXJk4R?JNvSHCah4d%7TKhk6=DB6esx9Qq(0&mSKyHooP9|) z;fPu=66@680A0;-KfIlng;w4GIb`d^Gw0qk?koCW0oqM1+mlpB-BE<)1kD=gJwj*w z)G~vbkruiE((n|M#}C&q5W?F+oo?9py3tG*{V|r+^H5so*>Nf&g_Q;v#)bE3vlI-p z^d~v&IjcG`-)<BUCT$}YMiVk51v|MNVE!tiYWzumi+2|yj)!<jH+W8A{ckc4Eeqik ziSgPrtca6ehvk{-D}3ioqfF4_Br39|P?fxi5B7J<9ndv4;+JviADq71#n=LWfEgy= zq%BN7?csuo8%dY?AR?GytHvRAi4vPqby&0`$aV~mmLG#X(eMkxp=J9=KeWu0KT6Wb zH<!3<zAz%FKf5%gdI#{tKG3a%O1A?6+B;q=SukF>!gI(BX6VgF9bpxBMdIYH35D`3 z+wqb4swgy}<TEs)&7`B%J8-syl*<KJgj#sHb;3RU5^Y{8`{}FeU9;IxIleXuz`d7& znEQVWRyRtMaL%YTA=orxtbiXQU_TE0ZKM8eFT=m>c5$dvb0Ws=<y+D{hhCEZRSl&U zAr!-PmHYM)a2z3QMZs8GE}qlT;rC(n*K1KIrsb=?a0YnJ*F8ArIa>(H%yj^Ua5fNC z`D}-PJ>deop(CsqY2+d5-eI;}%FvYI)ZSA=^8He+tx;^eOy>w372Tq}{UpOW{aWJ* z$J#RE0>%ClT#V(t@pi3d@RO?|FD8q0VR^;}cV7l3#@AA=n}q4Sej|;nN4NO+5MiF* zb?-}mJ(DTN9TetoErT4@2Z}0dUqg?sNj^&Hx5@UdNOAys?F7$ipXoAL`Sn4)w0&7z z^eUcef-2Q!LA7l6yI7LYDeN@kL)sJm+&jS}w<EER!H_L3%Zy4V)Ir<>ZXHH<+?#|V zX<NH!uVnL7aJ?z?pfpn~XUN-Ox1ewEWrk(?Outg&DCjG}<g3G632|cXwI-t8f7Z>X zla1c{RK&3pQ6l&#rkiNyWuZ{KGH?8VmegAvs}TD|Z?|_)KN!C${E@xzw&xlWVV^FY z5uq26xz*ajbbgn$#={+gm5r%IUT!Kk-NTX~p8d6n|MtFiN8FLB4Q>;e|M{~0z2#e= zg}GXJ4HLu6@M$qP%FuBZSEhH@bp^?Kcy!VNraMjLaH>yAR%8MqSx)Ma0l%M-Ec{?` zz1r<oyde%7BzMqb3POL9xbe`Jtg6xhYmD(r^=3EJ$*B+BpV-Vwyk~^)QD2F2w*yZ? z5xc9*_$u(rk%NfqCmR~+3X*w`3I3j+=wr#&=l2FvKf4sB`o;<Pq<Ax+3qEyFWbBbf zjp~0ezO1@8AU(8&t57BdV=^i@FSnK#mn<Ra4c>z;wB9Ch)wrfZ&1O}uH=mmgy|$11 z?yC@!sI{8^^0}<tKr)+fkk-8))$pWXG_^nI`Ni*krG#X`S9qIj_a#=VlY8T!X0lU2 zx7%EZ__7ZE^|bln7h@C{vn~IoGH~l$`3wR%_XQvQza%~h7g42qNHh13en&-OT-=^k zlz(EAZTEI-pQapEdF8=m)L8C7ZAw_7svZkoD&vtw#K==kNS@EdRcC@if@tKmN75BG z|5X@zYAtWc6$7y0lV`g&)X9NlNgE`%chd9Yz%Y4+{Og?QkA{M3C@|pdC;x4{WS<u; z{7lHen3rd?_Hu2c!Ts@!P6Xqt2G<WWIHQQ>r*5aslA063aQ<kok7M6v7Dy5&9@~W+ zr*){VSCP=js1UiObyi(w<^5rL^h#l}B<=9>RXz{;mn-vQRTnOrBBi-8(FG|f3QaTa zc@cRX&)H(Uri@A}%At(Mhk>qs1O4)-4+p;!0rH{Q#^`Xym`;2^_{**iFF3-Di~<VB z8eQCNp@v>>ZpZytJ-Jo*U|!NzK`9>kw@V{;56NJ=ceQEH@Xq`EaAILR6rI8eXKM?F zdC9`ERk5X>$&<9G#<}b`nh+fq{gdX-i2aC9wTKgcpZ7O}R0qhXN7_re>D{U^ci4Iw ztLpT$P)hl2RT;29`IyhWwJ&r8!hr=|kSFWH2?owHTA>g>m`#+kyO4>tM%g6D-8JJ^ z6qGSFomhX4@>Rg@+jQ1zJ>6i7UdiRi4?gf_d(GaZ(`p*`z!E2qK1QB4NwR%4%jL9& zWAbc21!~9C6p9|9kOUSE6s1pK#8FGV>mnDC0gDx(s}trwC_cCfnt6c7;<t(3ITjb$ zV)-JB8f#t-BhYZcTzUPAao-d&N~^dM6Q<Vwi?vKI#o7J8t-9gcZ|u)EBn@>P#R5?> z^E>reEn#2`9c02-)i9E0C7h$=E~wfEIi;2(z7DR=fgNR7$ezXft|XkF80vz}jjsZc z<ZbUiF)Iae(UzGvNdYKNfN%4=5*BAkn*KFnU6IXWoGdcqSg%Czf-GYwNn9xsFsUSn zTa?V&uU=M7rCEVZ^iau9q!|Bc!SjJFD-8j{+Quu2QX<%fbEOl(k&GFKwzgqW^)a`L z$HZ>n)b6*5a^xJ2?UCj~rOxmqoNIDq??X;;kcp6KcltD5m9#XV!h8FiT8sx-rCs%< z6EQoK^M863{9*9g5l@8K_2Ce0k$<4X<Zmu)s6}WAoXQc>2?aIIUrG3O5PpsXBlqvZ zJBe@uZ+)&{QUZR2CsVCf2Xt|fBkuFv*7ZJDgD&hnJw@(o@H6&?9K*qky>ZEmT!bd* zb>0X1^05)HJzJ2Dzr;hz8xV@<=2@qF!`yo6Sg3w;81eeS3%49@QN5?2ldLf}&5N}f zrq9fHcI8xae1@M6&c(K^G^6i58(aAra2$u%m4`b<GM!VLCI#m7oj<_Zrdh}aBS<UQ ziPVlVBV4(=WzV^$J2O>pKG!MyVt~6L5ydZmZS56PRE0^-l}Oi0+!k<z5F1LMu!%#4 zfq(x&BcA4TLEK?_5@0MTs@zU@jlR=|KIScGceK((mFVHHtS-hL7@zoa3d1kINv;uF zIW~z$35C3Eo9E{|-Esfu<?GjZ924w^W_>|bh+;DR>^{}wb~Gs9D*~3N?|XK99XZ_! zvpT~jCCy|ls?wThWj5ToYL5llxU=p#`Pr?bWPMCQUp*5Gq@bx4v`)ziD=QUomWoP) z$+KPb>f^wtQIU_cnqWla9_>%%Xn8<$OzQDR?@&48q;-P+m&MPjQhhD_xP@5jZ;4E6 z<)cPi_B<8dp0v4r5W3yfRQtNaBLj)i7=@(Ir5o_iK4Eg5M8gja{E(Aw7+<WSNJ%|g za?vl^SB3PPS%r;lDDt!Z*a+K9D5kx@kw(toIgj;P1dKLkm~-`)jUHjkYYk%!VUbML zLqzC?hsSq@HJVz!xPkSKH`WUVj%^pn6<5uosp=6Ue<f_g{yp(kzWYHJ??yGk<Uk4j zx4E*5bvL%${A^eV<d4?KdDQy`H@usbki&Hz6<ea_hu%lhM1i%TGsAbaY^fD2K%BI* zdp5#P>OpZ#&nPTxa$WxFQ1}0Mv>rj)HL9(tt-!WAXS<2rat3(Xw0*g{ME!o3$ROyd z2i9)Z#o<4DvQh4(+tDeL&HQvpUZ(_s{5Ybi+aBI5<R!!0M{e<Rky0yYJm<l&h8{sm zDoOwQ%r(~akNYL%zM6#zSTLYgr384A5;B%BDh<3}JdiDxbYy2S*2Q^^5Y_2#-<5I= zUXhD4LH4srY}jShgMLz{%iL?n3uiPC-CB@$Z!mcR6MXJhh)ZJ743cwP%5m(o2T>?i z{os13(s&VW`4Kf!4}|<al<)8&!&|v6`F>q~Zcy+jO^@4Ldyhv8;mInfNUa4V{lySY zeL=n5!-w%S=jNE|kAGCI;p93{-r5CPzM0BT+-OQOZ*<G>ua@=|099|wjf`==<x>mn zli7m72iz%i6R_Z0V<>U}3ddaSP*$+jo2YhAVNywSc~)(t7YJy3W|D>2<<&yN)qqyT zzL$?@nrQS$-iG^R{LSW)^uvYV1C}UEANNR5$We!SVh?j-7t!s$RZV0h_$R~SMVcrX zZV<VCEKBg<%W8%T4xxym)H{RgF%+pZF=Tg2U^2eP>rZlroWg?*Wc&duKBvs!SDY>R z=o5Kfp@l2gl<|_F(&dyS(G)vA6ZisbVNg)}4z9qzshD5bx=Uo7Y<yY<B*qD6eXM9B z?8*-NZ8c&sW&fVulOr7?xI1Ym4E+{g`-=k3H%qGbt<LcJ5_N8G&P;f0(s~&jRB`IL zN`O>xR9bt9HcKY(r*R1H?p9XFDE3GXl1>)!!&OggxPn2cL2xt3oiU)8+1*2{-EP;D zaOx>Zogo64__1pA8Xai_+^#xHlz>kPS3;shP_?~2a*oDx<Ogut9G`6DAo5VCFu)lw z`!&o3oiqr||JwQ`g;8|sh%kFhaBz`7C^4`T-ASlf@URmYc#U+<nvbQ8sXCzkmtrH{ zmAHnkB%%0iiofn1M6rOwqOHuvC4<J+-*+|B!=5dNAN`tLqJ)q^syPH}AAgA$mx8J& zl^eKgnu(UM;z~8{`E5NhW)Rl0TNy2y-NT|n+@~htl^1garL$}B0m@qP^5h;2^Yx4a zPh*F_u7gQRM6&eBz3vG?1q!Rt+wt>X4rW)j?#<+hGZN16`Slfi5G^==hy<ZU7BqWZ z>0?MO_C+W8rvONxf<+@eg;1eyD7F{~g+kI&`AL+yWzE7|W8omWcQ~O~oqfi&_^~hI zQJBh_2_X3W7Kg+H0Rxiyv~QN1Stl_fQ9TCeG~$xn7(n-*%r38M;R|<g9by!1s7>Tj zr9iYhru)Y%4Qqe#Lawd=gCGLp52(8$8JX3K)%RlqZ1A1_yABU(UrMUf>pf3U<n<+R z`RF_;C5vsQ#~+r8;*xG-Mk53D5e_{4NrOBuUKR<ugw?-w%66<s%t2QMmmLBJuYJyb za4`vA$OV8hCRW0Ky%~fsW>Eeeg8V1S%?1u-i9o)v__3$yUd*r-sO+-X9Ex{z1^p4v zv^3@eoIf2#WU?_N(w|b)U@f3kkylUEScdA2t8~ESkY5IuNg7^h9H6L<>8kE3U$d{x z?!Lt7WmQ0ZIVKj0Q*2+TSUYCV<!jfm$Z*jYw->AjmWJct`R)Y11RmgK*2WMcKOg8V zpZOFWnWpV^m(@K+6xmuc<dS9B=JzWNhJ0BUo?{5>EQj-Nl=)had6!u*7mAy%T-NLz zC%UH4wpNF}L;^~^!?go;gq9bRAxz`apW`h-K1Em2e4#u%LgNYbvYb9Jl;man2l%1z zgs-)xL`>rdg8x-i9Ls)F=0Q+tr$;I;_gA3yOWzLcy)5@A$hbd{myDLx5uZePH=+3# z{~}<^{RBP)-;Qh!j`tl@w=EJ$9owNR;J9l)yI*^oUs%LK{Oi&qS{+itkGYesOXRBJ ztqAeQ6B$5NhlO5t8JnPv<E4YqTADriyb=nL^Tz+vQD@9Ds_{QtaK_Z76$<vk>JW_2 zx881|ceCG6qx&-j;ZHb!s@)oV9F4B|_&BAZaGa;Mj6*dc4rF6bFy8JBfoyV9TL?x+ z3RaM)-5GUO-nKJ}2Ty*IQ`5k8sUxZ9)m-yV{2!x|Bs4UU>ujLLo%JJacoFQ^<sa7x z$t&?<HRjFmCUYqI8L1U<Ya{+D`1S~v35jsilvvrBR;}VXc0PVKbYFAF<7UJb-G}0) zjH6IQw)}D!Ovb6Bsg^|95)SzcD7t3PWX4maj8>Q8t$n_Anud^xE-U}dN4ud)dcybj zmf3uCd8Zpq9x};DzHQyQR5YdqCk83^WNYCES9E9K_F6i6b}g507BZ3lL;k)<{qb6H z{t&FJv=eP9Q9vT{Lgh@S!c#=6dZD&6;hn&)Aon79ANF|qh)KdAZC{XWIo%T6%<;6Y zyZ&?mwL2nmYcy(-nFgD#(9j2}y&6Jvf;%1xCg%JM*~g>JN9weoDY=!}K3?QOnCksX zr+fw6`FSS|gJuYDB|=!C+>jmC3fS#cT@T#k4Pm4QWC=c{gOl2Wm^1{upoQuJy+$lu zc9u~KwB^GBd8J}`iz(+g3iBtL6rUMGwwjkzdc?cNJBZ)J4f5Dd+_iW+HLfO4CjCyd zmjVc$umoereDAFug_}`4fRw?l0bSFcYzR#zcGj2aML@k7gtmy{khe)61d@GLXUcO( znc_T;*<c)0m8HDa35trx&Rx&O3!CHE{G7xMT=5f&*ffF6#+o3(Epnx`r<$4yw36w_ zuzdeP^Q5i5(ohR#3wI=MnimV9|Lxc43MLkuhWO=o4+~1Ng5+z6?>ec_NE4#%On!;h z^%L=H^2UB7lgc5!@j6Y1T(K9Q6Q)}A)B@4lo#O`5CB=G^BKxok<ufQ5dKCqNCLOwC zo*TM?d9NFDtr{fhy0n>^eIt_uW7(?ZK=FU5Hx1~~Poi)Rr?O%~{wzL<?i`zT%0mj& zuGhmu&W5t_F&7f|s9!j;@No!I2bB*0ElqKkgVYS?066BXpg8@<#7y#qZdJ}^!#S4F zfk>Q?Yqf6YXt9H=b$MJ+-xtaokktLY8eG(?Cw@xULSdi?zMWNH_p`Q;(H<;B)_l>v zK01<ucqLt2KYOK;6kzO>K5!h`&FEz>XV%|{WUK%3m0#$aU<)x1XHI5<H1qJj94RcD zFTe3;L_#FjQ%^sTcQj-X<Ycz*KoCZUnr;hI{C`GOAtekR=bVM$O)(1h*VM)q`!!lc zBl)aj7DKr3a1V!N1oHFtzLf%xolFC8ILJXZns=}}IcWkrSTy@qXTcNQiNj=Y(79P% zTS%*p9qH|;D#$$;edzO7ik0X=hyXp&!@lsMw+*i#7%jc6jVo{g+gdXBPrYCf$DBpL z2#hAGW%n^5QDQ5NGg{hP5O3Z;)~IDQ{g7SXM9}p#Tu+0a$5y79YtQcUPn|8%-WOIX z3C^xm*X3D6MwQJkvO>+)?`j0}G5VD4hCqg}a-A*qc6B^hceS&d!CDA`b-l3}UtL37 z_=86;g?TlDBp1^r`Qt3eA6GKe?nfzoKAl(2b?A0ZG8;2-E*XeaFWIw9B?IqP7>c?T zJ6joVBX{$vI)o@-NmX6{a9T&s@-ealitjl{7Ivlk_%669vc{Z!`zQXd_Jt$7EO~^% z4;aW#IU=4nc=v?YxN#VDZ?8G?6cT9pF=E)*&ma3^_YYmTb^R@G(K|s~6WDx|BWx8; zdzYvd#?@UUl&<m%oFmi55VYIn0f@?iT?wA;4T){tI|RL$aeRsL2$R~7*5nlbXOO8f zu5n+}^`u)<=206@O!@g?v(Ra^7E6Pf;Bb9Zk_r<TB9O`EzYruE_$9{QJ0s*vrX`N5 z^D4aUBPLd2CBB6mb|R-(@XeJEBVZ(!u6Ay;h@VR%;XX5)MRTe~dT8lL)d|WGn-Rmz zK8KV4oTgq>E(W(CzQWgb@pqkA>-v&2MlV?U4<%*H`y3KVX@=K7rf(XkUF;AoXaK8< zf`Vd3GS}UXE^%Ff`O&&RpD6ef6I2KUuSr}2X*sl%izc9>V9ATjg)z-~=cl=V^n@$u z@!P0X3z>hJ4q|@8szUrBkK+k)CX&z4LF*=2cC$&DGoKZmc)8g(PtP7*Y0$h6^CQgL zWcN{l+|1ytXKn5>TJEO0>8g1A;9uMh49ENvwG|YoeqHY-9wWSLj29y==O|pkWoG5* zJ(7lzKrNq?ri<BQZ6FhzbX<@VKQ6YO0<H4aFx1b~v>w&i>vTFFY*s8^Q$<3_xla@G z$$z>@^AG6o!^&LAOUk*jxLK_MOg>@=9p9%E9)H1KODD}XAF|-Y43w^|dGaRz=wsRP zL&tGK?3|B1Dot-3(lh?NZa`&hh67#HYSV7e{}V~^r?NV?)wlzi@nZFq$rTF^X_j|V zjO6F(B%G@JKNk8EPH4_~uuF;KSi@(8yF&;2C(6#Fje|{R0+e?dn*Uk;^6V<DIhlI_ zX<TVro;|FAX`9r6c!=yyJT&}Ib{?F!r&|c^Z(sD|Tf~I=^Dm&zh2}vD@4#~G#FeYX zJhpJks==!O|Mc=o;F~g$Pgk%<#2o%aY&kl^qHTR~*DnUne}wnYK0>%kC$z18O2)PS zwIn`IJXFnqwyra;seU)mt~TJ_ddpU8&s57bm^>gt`TzCcv8m&5%$QDH^Yqj<hz+xa z!GP(@PsVIk-F{5c@GOA-0Ax0gQKP3FMW7OsG0J6>tbU!RFot}Xc$<Es!z(p>LbRJL zurp2dAs?vmNz?sg9zsQ<!Qv%2p9POHLmOs@be?n{Fo%I$7xi_?r>w(8Pho|@`*eO2 zv>Zx}5o@o-vc@5O%<#jPPAgcGCM1YrrEhV<m^&6eCcc=1sX=%{&cPF{_$0j)lCS%a zgZYHNzZg2vpL2<4L(Cr}Xg0c(wGR&Xs{zg%{_UAKaY0&LRqWi)XTDa9Qu!BBhNM;S zf@$H9VJqhh-T<xUIkm^PW=Kc=twQux&kz&e={z?q9*vv<1(NyLcI|Tp&F&}{a;AEZ z>?9&cy^u{k#8RwSqieWM8BSb=R*Zd%HxkhlgcR-pe!g17yaL5iMR!BO9U0E#P_u)r zD=$?Pu=vZ^w8`JKiE{Gb+O((RQ7tU7$DlaZXx#<I!uInf;y)0JDtQLP-`)xEO%x;W z%C!7MghMEC=#EC9p{X$n0#|&vJTQ|RK2;4}k=g#d=!NvOzx(Hskt-lPM?se4VJ1)1 z$Zw~zcmphh1Pa+f{n9p^lO;J%H7i0y)fA=}2^N{rSBm8i@h&xoMZ#<Y^U+;E?6Psf zDYSLB7F@)CRbJhsRwLXzu&v8G`xhs(t156F6xIt+ULXX(`3VtQDqh6j2KXV%M7Lx+ z(TUkUOTin#diG-n6tE!NsdB^BV%6~;H`)w9sflMWU>gB?I_lx6=F|!+(^~-^@(eby zd3BY3%qFvdBI*M-8gNH&|1@~(4cd@TJe`@FU{E6zJ>$eQJ`9s@H)atJEh!N$8PwKs zH86eysG4yOKv=~bC%VQeTt7yi2b+;MIGGy4N*^$5^S8xTC%zURNPAb`ajkEC6;tG1 zoJ|1?Dsps|8Aw+e3kEUzu*sYe<zs9xeaksGwV&<;`Ae=3-S#384r&yDt-y;Xa2Zr$ z+oxM9#cP>ldnq5BCV?A@O401Ij;*zNf%|rJ2pFD8=2>cAbI(3C+w@2VKm9RI^Zg99 z{mZ?ippA4yfU>}q0ZvOzg<6uGwANh~_`BqMt<0^dCUD`ck7_U{*+8v$VP3WydeT)P zihW~S2N^+pn~hqz^yjT!gWn6YeQ6(BA$kX(@&Bx)PKK@JQ}pB1JKZxX>S~T9dtA#5 zFwjbB0@Ri6X5@Kz!6Bi9qiUV4zxBtn@mC)*2e>}otK(FN7A|pf$X$)w_msO5ZyfRW z5kOVM@{NG<01KUXl$ZAWPQzt7@zJV8aSYcYge$&z#NhIzfUrZ8)+XapS{2K-qDJ6D z1(ZOBNePWC4}b@ocjv`k@sqH`ws*KekKk$rHb^<&X3^n~-kP`niW+IcY)4yO#jiZT z5Uu?kRw38w=>^1W!uFYGbT(G9m@}<SPm#t~U&&SThM|$V31gX()V}XeXK`r)`9R?6 z)dT34A$-&kEy~x0<lH&Oz1?tR!&K0d9`*C=ueOCUeR+*AWESQvkd9ldm5{lxsI{p$ z*3Q9=ZnEpoBbH~yVO}a|q6gZtC^}0h3~x$5Np)9p@(b+|3WJ$L9+9TOmBVy+hHrQ2 zwahI0nW;o|dBc{#*5H+>h?SO|fkwyJAbY$B?vFdd1yYHt%%x4r*M^Ofegxg_g^TX) zn4IN0vNNmRuNYPXL)se@aumz)C`H4#JO$%K0?Kune_%OjkRxA!fsRCSzP0-}=0k`K zMzR$nXSdeLC388vD~6L0-5+^J__5yYm{%rwM@IxT$*ms_o)9w85==1)G2Gd4`<j3D zd&v#cx!^L=l}mS>H?{6@_G3Qhv1q@}%s=)1c#6w9r6E{}_N4ZV!X$<g)Pa!;cAHVb zTCpmY)|24t(8|h(=TT70hsde>s3PcC$EDkSLkFd-{K0yRYq$ne)N++q5xMH8uT0AL zqj9O#W>X<1(|sGS&_EQ-ykzYvZ_!lBx69LDu~KYO3I=Z}Jlrqb_5ik(L7nPkgV>jg zLlr))Po*{b3gn(beNlPRR9p<?e?SKBrnJMd<)r>-z#xL8D6Cqq!CYh1_^3~nsnFFi zY07m>cAK9Q^UJ}(;cCr0bYvc2B!H0enI&6$zmL`G*M|qe*bs@}g}eOvjr@v=7P^HO zr`rD%KL6VxtoJGUPc2kk5CegAF+Q7iEj;p+ISdt0C6uDCMtL$RV@#v$_WY0!g-OPh zD7_qje4nIG=;$axcxsk!X?ONQBY%{W&OXRov;Wv`0-GE>*I?U3=>(A!v7+J0X_eIz zc;GWW_p`0TIjWyoxS_A)iL2lFE48+FTsHrmz)JQ6&C-x4$s77!oauOTaG!yST6M}9 zxy1)y+v{Vnf!^g(t^PUL_le>im)IrLI0YOVAxUZY+N6Ws90STmDZlcQ;PmZ^_?1dL zn1-{Q&!+astat!X#^Cxs3U&ViCUT&}W<~j4*p183)6*d9MSH)jaa#@l%$^tzhdA=$ zE3Fmm;iQ7=SjR_s9QK%kCDk_reunLZgm6tyi{)E3a}`DFXS(6x5$+YfW+%+VIr*FW z%0)2{$PUu+U7fh}D~MX;s+0Q6<OlF{0cXE&S`<zf^3I%$@27W{)|Rc50zhyZ0}2XY zINeKgRh6)Kj8n1g_vD1-hc1<5oR~TGRGo2BweHqt^x33KALOHS3;sd=sLnMG3Fl6? zz|8sQx@2?JCz!10sL|BQuCEU5$?2TvQ5qbsHt3{;tZsrD(c-B4S7s4^pqaNzN?q80 z`Vb{phuVEhfXk~L@CTKnYs$!dVa^P3>FE#lyn=do#=aSQxX7zmfI_IJfFeNjC;t~X z;{i8JJR8^fzQ=qoU32wA;>ja^P;kYZ%hB)}W`1P^iWUXwbyG#8-O&!9ejArhma^v+ z0@Ht8N{!kZNoMH*<r9~%e{fTztZ}cGa#8_K@bVf6svA!X9!POY|JALOpm4<}lz*!! z-JZoQ$yaP;C`CEe7+&WV(nY~8a7DuWI?#s`BgsS9e5bL@CdyjzAa%#GT~0Z}5T!p^ z>A2fbE49rqb$@j088ukSKS>jSY>tUN)K@}W!MXY98~a)vffL*dhv)^I%!Pm!QeIoT zzjtD5`5Nt6b<TYnO8l5dv)6pLeQfA!S(4&QE1P!v;eRKUr50>79GZxtbxU{1hWycN ztb_Sg@SzJ8{aM*XevW&>FjE+CA9zShTmVdS@@dRP<Ag_a6pN1gnxb?X{-eyLEw^_w z`8;ehZMVS5B5Dy|L+SP{vl+Z7hj+Ke$6TeWeu0uZiEZ3APAT$#xdO7H=8+1kU|3bb zVw9#`pMYzuw*2};=@+@<!V{w$9X2h&*3B|<;uk#*fq5G?6zAa8j_xzI_76V}(8c`T zE+mwFGJ~Fq6cVyY-~p6CtH$8E5UfIeUy_Zm_Nrv*Q4Q5%yuH9Z8cNZ*?1~g}EC%zx z<$u)TvV_kz{7N-YZBGw+t3~x4B4ad`mpu7hBQ(3!-bwtG4))PryqLEyOu6(xZE+ww zW;p&ciDhq85dY>;NFcVOjbx*@k^E%K7qAZV!%~)RvX4>YT(ESN{?bkWH!^;JJzzMH zVy4<|M_gJ>E!0&KrWFl^`VqX+boL4R-MVXa8N!wD(gTc<K^DL`?5fRsSk<8cjO3he zfoVlwOb!>F^R?jriuJjb7rEgx+vKMQEfX~1@-~~oHrt)kIp5W-pl_Gd6FF77ButLu z)F+W03dXF?un<x0;SyYX_9W)_UMBd~q_2K2#;zC_Ly$@WPwPl}%ARHSJ}K)QD;-Cw z1uDq00mPV2jsx%(NBl0o?eez;GGvSVuez9U!LDCOFG9$pAvWk$fXNoAAFrB*hzJ(X z&NDy*IIrT4TJ$9a`Q9t+7GGPmPTi(zvbWf1@3s#XRDqztie+AjyNl~cUC@S~=+6wv zBs-o-SnM=rO8;rDfPtsO_kj7U5>Y^)tP9!Cg%rF<Q&Ua;Fv(2a0w^g@H?qD`Hx++M zF9agVXI<|-V?83b^~k#c*kEeOwroFZOe$=*V}Yo-FARZ22q8qaAKFk-DLz_q)uexC zg*MF(gI0BAL<!$vNTh28<pv2k8q-rh;y6{eOc2|_8w6p~<;0%bCHUXVp$BB_HG_?i zP4!EmszQ$=-PxPozB4zZ9ttNcw!Q=H1CG<M^#`Sm*lz?r$tu83qahM_bQPppv$tjs z@^*gL^^1bAi5yyN`2!29cXcwc;T~q_Y*Bq>tgJfRp1MCWA88_YM^Z(JaR9=ha>E2} zB(fxX&8p!eXeD!}SLT@{=qmrAtfoIff=~lk!_wPRB+IhYYFPAKI$36FM|i;Rt%)5X zG{+rFNmm%j>FhQ|5k7cQ9=QTAy{%yUwE1Qc(M(qEH-Dww23y<blmsjDq_ICDMr;p5 zX;MSsiu4uoNq0<g0bl8&cUMf{F!&F3;f|Sbc-DKfB(+Kca>izC?hzvLM8sYIF9|#B z!WE;kgE8pCv{e3dQFV71&gk+85mr}`3UvYAVBhqNx$@hf0nxws+GbuqLHR^fODH77 z2$Ru4ZQ>Fe1Mre7(oW`=So}eSJsec&Z~vta>Y$zYf;$6nPelE1o$s<19?Ai=Ph{}l zUJxQaF_U)g9QinzPdSi^Y9yF&p|!K#J;)c!lt$W@0^gD-9L>+||D6MjU%>|d@t;qE z+)9ZQ!Vfa>^ZdIj2J~%qjUPOGzXdNCJf~R{Y_&UrN+^=p@snBj;q}d&EW+>30xrt* zMz4i|jDWG)PHQvY*~@HSzG%6twZy!JX-tEbDl5GWNGTv{=D_kQM78C1cn_~h89^!= zw2*|ui*~RLZFYeY{H%%$t<Ro6f#n`m3`$L2Ns@lX%)_&t!`dYeJ$hp7%N{JIQ{aeD z!QOfoas&(OFqT)GSOQXZRzH^&%nckq-jz!V^5Zl{+bnarBC*UaLqE?Sr69wVM(RN^ z0jb)%gIoE-$PCmkg+35dA*ZBCI*<L)H3gC6(#|ygBdAQw2Q8IUq^V8c7vfH1s}yEQ zBKh_xNreeUvzE9%?|JB;(f6QXH*Oi5#CTPO`BRZ_^*|xi=G#rwAtq9NMYEv_T>KgT zXw9cuQ~#xKa=jyER-Uq-22#Y`z~i}kAuHtSwwh{QnmnO#KzxxOb%&5|vYthMVcWwJ z)(L(p!2DN}ZS|d&7z9L`b6LW-l$!G>raCEyJ*YH=k1s)`Uq?8$5g_QJ0r}|-gR=R8 zGcv)^C~7+rv_)C?drP5IA6eX?2ufPif3<aSM9Y=R<<sKfO*z$<1V3gAGxf0A+J*IO z#DpEB7w9)@HN$J|FB9p+bTBee`cq)$ck+mwOsgA!JinS`4c{@Q`lqaX`zt49YkM~B zjW-L{cP#4qauH`8L0Gc3U_9N0T=Zv*HZC?@7+6~%wHunl;v|uV5K(9ABW8j>m`)(y zr`_`3m!i_Q208$i-B8`%k9Qv9I!IEkTRR$2TkR9wqj?;bk<A2cdKCfGB$MbNZsN-r z_p|M|^Ulgnvf`|*#mMG%*w=a=9xdoxc}^9&B3=z@_?_8CXq==V|GFPKDzN9qGD68< ztp4SmYMu|Bg~dvYH!utMtr;Lps-Xj@cDyUwog0^BUwV9iGSzg+y5!^>7I;VF!t;m} z1*d?Ny?KbgGgo#B(^<A*zo@!)#du<c8Ta13dP0h)$^4)7{;5U_7se>s%VTGn3@4(K z{Wajv+HJLi&0K>N`rf>U>9ptUCG{H_Z$m$HpiUOHXs{>>58u33Gahpa+5US0@+rFq zkm$?vACG3?nx#F*??~a9(E~XtDC`^xlF^2)=<=s}3=rg2Yh<1u5$*fFi2siE9{pTn zq3~6Zsz{Rfbt7-n%|bjc&jbWoPu~N>zh31f3|-aN(wq?|da6At3KI0mHA77HN)3~< zxBYuSTOq3l8=&FNx8IOKCTKgymiAcrUf4NFGZD#p>_1gVK?3`pBdG=b-ZEE%v{@Q# zhJO^5RQ)^i`=g(=eRBd$aK(Fwl4Le((-ru)lrr)%3E6`e?}hw{4YgRxooYc9obmbf zTB9+#IFZpBvELPHcO7`!I16cx@3As^ed9vnfM5sVYj42JDZS>^PsBR(-_oza52az5 zF*n>s;w)XpQ$I|<@p%3`VW9I2kfzBLv-+$I8w>A@V{fVu!7VVTzh-EjU{SsNj(xnZ z<1LA_4>tIt1@)HvDd77cfcl&@7mFKsSO1B_EN0g2T=Hq|;>5hF>Y|P0eAvCovot*j z%*7r5R?})erD7X{+e>gur|7Elvf{Q49(yz3^%WN2Etn9ZENTQnE2}_);ZdZN)y7mP z^&Xk?<O3{C$q_;j`r*()hL5$Oj-HMV)uI*6OD3QvXx>Ka8;N_^&=z9wpa)Zpi!60( z+--eb`EuyDlOUo-_u@|U@RzCpWE^ZvU(!R&MD^Bb%^CtrIqGejg!7j;0Ym#=M^X3P ziAcv{Nx5W~=VkJVGT`-Vd<yJs+}JdP^M5b?rW&nlu={+TV`SUSvWL(N5~i*!e5}Qq zE;uRH&YGh?B*x>A&7h&dN6+XRY1M2s!d?Rfw`$*S3N@Dp3edK#!~w8vt@S2QvW#mA zP~R=|>BUL%e1E!ZinJfh>K4aXV*`v?5{Y8ZH0h;B8#WJafM9*3!HzFE&x1d<&9n6$ zmkV<ClN57|0u+wZ3!G^zM(cKybW0Sv+e&r;w1X_<GmLNfeU8_p;p3ADvV>aR-Bcg{ ztv{|L^uc-8S4%|J%aQJIp`$B!+S9QQ8n>+q*4x45F%+?s`$c4CyQlKrJj_!QXncs_ zb_630!jqO|Z_DEO1he7{s<P0<-AAX)iw3zIr@XqGPmbx3%PS>SIPojC3Sl6HS#g1E z^;aU!%3k((qnZ2hOpz67L*Y-c(bpK81oM1x^IsLfNY*ehZ8@c@ZCsbsY|TRO59~#0 z+*Q9`MM#mad5F-S;F5cWVG62Fn4$QnT9RvKo=_Pg`qAiX0>uOsI_sME&DQnRVi*I@ z4Z6cd3UOA6h{s#553lfcG4Dz@)@}Jd)w><)S^h6U#=?IsY{JR}5y<S8U};DV#<a~+ zwj-_jFv=s*kM}yg3Fgd8gJvyj$f_O$USkW%4+WbJqZeq@9CFm*U0n5u-P7jgE}(FC zlNiN3=A+Ut^h!|ZkR}nZkH@zJy{KpyZY$=YMV_)0e03JIQfnXN=aywcm-$#G%k?jz zD@<coMB}FG(p}&AU3?;&SWQ-6OJlFv%s!${V1Es{3M!F#r_i_zbW|--at$1M?bPcS zU@2sK1YaE|Spms8wMp^#kN=HZsdz>vg`Wt&%Ys}+P&2<3eu)DOTo2j~1&Ro>uSNCC zQL54HKO}lrFG8dkZsWa(IW=|X6YK1RBZZyn!QXnGfE+l7g5;KW0sewP=%}$lB?i3N zFm#9>YP9bU3~Bbo<(6_VgyQ{M4b;E!gyX673-wdaQukFUI8ZoJa?Ek*uzqy3=pqNv z_cdv_Iq3AB3^E8*4a&T>)xVAs9DYDd?XLkb3eNQb?Xye4Sg2b&-~Qfj)<Q$0lK%T= z)jRr}cHf>vnLy110cwK4DzUQ+{I*_D2E6aFa#~{JFa#vUY0kM|vEWQQ%Izmtu(w*G zkD=$U9V~<O6UyyK9QHo<F5psT%Hg&Q>6uwdvRqqF5t01lkuPZ_V{x>`#mEtq)kN#g z@=`3bWb$Y7`J!)i-P8lO1$$4Aeng_tq{qNt(cDnYhw+GK;}<ozPYV5&9CoH>WII}! z17TIW^SCMcA&?p!LDi)@_b{^qzbpkiV-)j0NlZp9ZrHv2i{9GAnKa`zSq80{-p#eJ z8fWx=&QT+#eX<hD>Zi$HVrX3qCkP0-3B50XO+PC4OrfO7R(EbDBeC2hY@T}cED4c7 zC&p@4g)&JEH<)m+r2Z8{FdCyqFLW72^HvB@!PDGp-KdJa+qxC-bn?DFKdSc8V375^ zd-Y=-U%(K`S7knuSGqvo^7@by`>U#MjqPuOwuV$Krl@iQjyX^w|H&NFYG%6K&Cil4 z%1gWzxTHG`2K-Z-@2wM_Q{Lg$&|(@%k<`@{{I$BS@~(_QC)O`eUf!TB1>2{+PVY&G z#z%j|iuL3MMGOD@5y*J|OJbj1H!u2%@(=CU!OM-iI-e+Y-?E3@-_dRI>^%@}M))Z4 z_~cL)Hv_=p$AKaP8A*)20yok>`ZborcN@DfRLlQ?m7la@HrqB4Y1B}rxuI_VYT$4F z`*2RoUfgksPspZ@2`B{Yr4MpD6Rmid`)YBiMz}2w9Dz5t(z@h10?p{yf45ce=2gw8 zJ7#1C;5a*~Ya{L3CReMunlDG6GA{AzA+auIR4KA>2jqpRIrIwi&N$p)M^owrM>3jg z7HZ3Hbsr&9VbpjM?rwvLHGffUTFOxS%_oe-x8ea80fE-eyLKj`5N+hBXe1qoA?lIL zO<NPYvAA#mb&D~9Los}yRmE@HYVNK|FcX+aeD`#4;a_cl#bsyHyi70cbJ+W?Y+4^v zg{e#c!e%IJu~ku*&UEYjTmPVE+!Tw{Y_RD70e07V=Btp&7`kz6VLBLg1F_B(19^V; z_BBBa103ouIC2l(kurovp@yo$C(vfo?Ep#YE!l3H*CnHuI-CK@gr9p-ch`we>yH^G zywlZw#oe6ju3{gIp+HPxIZmM#7ZjI^+dgWcXm5>3&1cei*zri>(LLfT46*y*it<@O zg{E5TB%iVL)|4gp8N2s3QNq<%^My0p&INHJdosAD?GU7?9xJy^d=7hKFMMG9YKfvv zkxZ1l-F1kLuxTeCk$CC*2NCQf)%V{qe?c?m`3D3L#`fB8d&mcKU(Zf`(ACj*AK>#O za`k>>4>E#$59jXkG$-J-iZS)v7`3;Nj$+LK`=3S&&uF5H@z5QibpdyO@*RW07|o>2 zUZQQ-2Ja#(Iaw2r>{M!kG5*JYidn8<lsmyT%Hd0>COdVkmEUlrGI!nxnK8w7)J)4Z zc$2FY{#j*haLQz%M)BJ|?FV43ozhyBG?$b`OQqnFaqwzLWzp*c7VCx$i$b9Hbv}4& zqpMcsE<J?0qGE4@l_+O`5|rO#POV9<?3x?PCoP3iXB3!G93!k4>6)i;_4Mcfd|jd% zjyl@VRmJDBbRwr32E#1qYr3q&*QH!5^|3(t%hG(s1EIc##?EmDvkI8)%;@XW)`+gk zqX}6DL((9Jqb5{+9l@GP(u}4<EAb+jFRh?^&+OTsiwjv@^j<ZmbMu5hS860E+yPhB zq2_q+=)F$NT6<;?=3vceQavnLrpnlV4sqBCJmwC8c)nSu*t*0z6>L!PAJdgoB!6<L z0cwz9O`IlJ54NiG1r6YH%SfP%#Yf24HCbg>o`G6IeM5*Py%%wUG)thDx=ctB(l<7J zFdSjpWIEifvlg*cSF-*n`^vHGC^LD}6GpZ1D<jT1wYN*4dpIl4Z<t35Sb3rOn#v+K zOPo-`vPacw&*8Xi&~_bQ@VcS6rd5e`3X3sd6MvX8p_Zgk`XXyrbu-G9dpKhUrzV&y zMK(<QL<4N29YeGN35k$y<Vdz^{-t>yFc?1~V=tXUk0pEoD}!~!l8t#jd)(&Phw_zb zk1D_x(@l<!ay3G=BxvCn8`5Nlu`R$FHD2Q8z-@pWz#4`m68E6GmYgibvV-%!n$F*m z2I53cLjjNG3Rp5A^(@nTokkxVZMaF7BCqKcS*;!&_A$dUoQxFW;RetK!<CIy;}Yg4 z0-+n+tRuYc(5IzjsSl~80fCn%CH$*xMfpOPCgke9KeUEK$+3T1tFIKjF(kFJ@t)D! zMIz&%p3)sd=V)BcxZqh3yF9f_i0?PVL0i@vBDBEtf<=Q^7n#4EK&=TSzb`{chx!Pf zXe`m0B~g0t^50VBAtBGYTxy>=D);deAUw0@i72rZfTaecoPpLH9DY%iWupdX{K-{( zbSO^W<3cdTH?0p2llDE1`6mzk6s&Rq(CobK{A=S`z2#-Bhd<OkLgm^BI#u&;=?zuU z+udoPd)R@aTai@{kBl0ClL{r%2P!d5%5>a3onDG*z;5{!z8z_;Kuv~_KNrI$1W&73 z!lJ;}BqU2qCJgl~jqHizD^S%wR(-=9B5tgtt!19b%a*fcOa6ccXt8xGL2vzi9EsI5 zcXL{OBlcG%rxqyw(mhOl@OBn#h8nq?jYRL5phWlIs7HcM#>tO9X;lKmWK=tjv9xo$ z_eFsFrF$>urd9f4QUX|-c})~YOmEG2N%$Kc8PyP#6qo%cDDeJvL>`aIGYP3%;Os@b zig3r3vX}*_RU1g*@eYxaJvFN(kRL@)OP|@Mxy(fuswq)Yc{qF}C!76%EJ=}++L0{# z`Ca;4o1iBO0BzzI;bGnsSXJa#CJHcIkq^pnPgJm;2y)`r?VQh^mP&?I@`-!29S#r< zb<!+Cj|m5+ZabKdflQ6XdLs|-ToVN&`HCC4aYzCj-z>^X^RCZl*C|t3#ZWIJm)@Lr zO9x)b^jWrAI3f(hwDJA_wBSz}iVY<c{Ns8?AeRKX@pk}2A|@mcGo$=VFd(p}<Undn zQmnGJ&kIL!X<H@ZNtfTAh#XY=knD1egE;0X3sNMS)k_>pi~rc8Flg)r&Dq1dLli%5 zo60cKSZR$x^zF&9so7MUC|B6mCX-A~(?sbZ{jU6V9>S%W<JO#%%+$Ug=W`(LQ6V5J z9gkTf5&UJ}mTY->vu?Chkncz7jOh{OzfOrh;Hjqvwh}Hcx~$~4Xq`60ktv}youMk< zG|K(QAya9WNo12Lh!i}YIoIZ(xiv^-t4q6b(1ox~E8RT|^X1Y9LqWL5P*P()X#M+U z|Jv*F`W+4jlC@-wJrRDRkNOD+1RV;{Bd<QHBZ!~ru2;W<bh;oet{*t4&O7a;XV7Jm rS@FXPm=he1V#rta_sbW@ST3WXTJ*PNI%fZtNgV6!#6W~@SfYD@U)t5{ literal 0 HcmV?d00001 diff --git a/mindspore/lite/micro/test/CMakeLists.txt b/mindspore/lite/micro/test/CMakeLists.txt index 0862bd70fb..6d1e9c976c 100644 --- a/mindspore/lite/micro/test/CMakeLists.txt +++ b/mindspore/lite/micro/test/CMakeLists.txt @@ -1,14 +1,15 @@ +add_definitions(-DUSE_GLOG) string(REPLACE "/test" "" MICRO_DIR ${CMAKE_CURRENT_SOURCE_DIR}) string(REPLACE " -fvisibility=hidden " " -fvisibility=default " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") string(REPLACE " -fvisibility=hidden " " -fvisibility=default " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") -set(LITE_DIR ${MICRO_DIR}/..) -set(NNACL_DIR ${LITE_DIR}/nnacl) -set(3RD_DIR ${TOP_DIR}/third_party) +set(3RD_DIR ${TOP_DIR}/third_party) +set(LITE_DIR ${TOP_DIR}/mindspore/lite) set(BUILD_LITE "on") -include(${MICRO_DIR}/../../../cmake/external_libs/gtest.cmake) + +include(${TOP_DIR}/cmake/external_libs/gtest.cmake) include(${MICRO_DIR}/cmake/file_list.cmake) -include(${MICRO_DIR}/cmake/wrapper.cmake) +include(${MICRO_DIR}/cmake/package_wrapper.cmake) include_directories(${TOP_DIR}) include_directories(${TOP_DIR}/mindspore/core/) @@ -19,4 +20,4 @@ include_directories(${3RD_DIR}) add_executable(micro_test code_gen_test.cc ${FILE_SET}) add_dependencies(micro_test fbs_src) add_dependencies(micro_test fbs_inner_src) -target_link_libraries(micro_test dl mindspore::gtest ${SECUREC_LIBRARY}) +target_link_libraries(micro_test dl mindspore::gtest ${SECUREC_LIBRARY} mindspore::glog) diff --git a/mindspore/lite/micro/wrapper/fp32/matmul_fp32_wrapper.h b/mindspore/lite/micro/wrapper/fp32/matmul_fp32_wrapper.h deleted file mode 100644 index 5852bcb0c4..0000000000 --- a/mindspore/lite/micro/wrapper/fp32/matmul_fp32_wrapper.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_MICRO_ADAPTER_FP32_MATMUL_FP32_WRAPPER_H_ -#define MINDSPORE_LITE_MICRO_ADAPTER_FP32_MATMUL_FP32_WRAPPER_H_ -#include <string.h> -#include "nnacl/fp32/matmul_fp32.h" -#ifdef __cplusplus -extern "C" { -#endif - -void InitMatrixA(const float *src_ptr, float *dst_ptr, const MatMulParameter *params_, bool is_vector_a); - -void InitMatrixB(const float *src_ptr, float *dst_ptr, const MatMulParameter *params_, bool is_vector_a); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_MICRO_ADAPTER_FP32_MATMUL_FP32_WRAPPER_H_ diff --git a/mindspore/lite/micro/wrapper/int8/add_int8_wrapper.c b/mindspore/lite/micro/wrapper/int8/add_int8_wrapper.c deleted file mode 100644 index a179c668d5..0000000000 --- a/mindspore/lite/micro/wrapper/int8/add_int8_wrapper.c +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "wrapper/int8/add_int8_wrapper.h" -#include "nnacl/errorcode.h" - -int AddBroadcastRun(void *cdata, int task_id) { - AddArgs *args = (AddArgs *)(cdata); - int stride = UP_DIV(args->out_size_, args->thread_count_); - int real_out_count = MSMIN(stride, args->out_size_ - stride * task_id); - if (real_out_count <= 0) { - return NNACL_OK; - } - int8_t *cur_in0 = NULL; - int8_t *cur_in1 = NULL; - int8_t *cur_out = NULL; - for (int i = 0; i < real_out_count; i++) { - if (args->arith_para_->in_elements_num0_ == args->arith_para_->out_elements_num_) { - cur_in0 = args->input0_data_ + task_id * stride * args->in_size_ + i * args->in_size_; - cur_in1 = args->input1_data_; - cur_out = args->output_data_ + task_id * stride * args->in_size_ + i * args->in_size_; - } else { - cur_in0 = args->input0_data_; - cur_in1 = args->input1_data_ + task_id * stride * args->in_size_ + i * args->in_size_; - cur_out = args->output_data_ + task_id * stride * args->in_size_ + i * args->in_size_; - } - AddInt8(cur_in0, cur_in1, cur_out, args->in_size_, &args->para_); - } - return NNACL_OK; -} - -int AddRun(void *cdata, int task_id) { - AddArgs *args = (AddArgs *)(cdata); - /* no need broadcast */ - int stride = UP_DIV(args->elements_num_, args->thread_count_); - int rest_count = args->elements_num_ - task_id * stride; - int real_count = MSMIN(stride, rest_count); - if (real_count <= 0) { - return NNACL_OK; - } - int8_t *cur_in0 = args->input0_data_ + stride * task_id; - int8_t *cur_in1 = args->input1_data_ + stride * task_id; - int8_t *cur_out = args->output_data_ + stride * task_id; - if (args->support_opt_add_) { - int8_t *ptr_in = args->arith_para_->in_elements_num0_ == 1 ? cur_in1 : cur_in0; - int8_t element_in = args->arith_para_->in_elements_num0_ == 1 ? args->input0_data_[0] : args->input1_data_[0]; - AddQuantQrgs *ptr_args = - args->arith_para_->in_elements_num0_ == 1 ? &args->para_.in1_args_ : &args->para_.in0_args_; - AddQuantQrgs *ele_args = - args->arith_para_->in_elements_num0_ == 1 ? &args->para_.in0_args_ : &args->para_.in1_args_; - AddOptInt8(ptr_in, element_in, cur_out, rest_count, &args->para_, ptr_args, ele_args); - } else { - AddInt8(cur_in0, cur_in1, cur_out, rest_count, &args->para_); - } - return NNACL_OK; -} diff --git a/mindspore/lite/micro/wrapper/int8/add_int8_wrapper.h b/mindspore/lite/micro/wrapper/int8/add_int8_wrapper.h deleted file mode 100644 index c23e32e3ae..0000000000 --- a/mindspore/lite/micro/wrapper/int8/add_int8_wrapper.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_MICRO_WRAPPER_INT8_ADD_INT8_WRAPPER_H_ -#define MINDSPORE_LITE_MICRO_WRAPPER_INT8_ADD_INT8_WRAPPER_H_ -#include <string.h> -#include "nnacl/int8/matmul_int8.h" -#include "src/runtime/thread_pool.h" -#include "nnacl/int8/add_int8.h" -#include "nnacl/arithmetic.h" - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct { - AddQuantParameter para_; - ArithmeticParameter *arith_para_; - int in_size_; - int out_size_; - int thread_count_; - int elements_num_; - bool support_opt_add_; - int8_t *input0_data_; - int8_t *input1_data_; - int8_t *output_data_; -} AddArgs; - -int AddBroadcastRun(void *cdata, int task_id); - -int AddRun(void *cdata, int task_id); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_MICRO_WRAPPER_INT8_ADD_INT8_WRAPPER_H_ diff --git a/mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.c b/mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.c deleted file mode 100644 index 109532eb48..0000000000 --- a/mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.c +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "wrapper/int8/conv1x1_init_int8_wrapper.h" -#include <memory.h> -#include "nnacl/int8/matmul_int8.h" -#include "nnacl/errorcode.h" - -int Conv1x1Init(int8_t *src_weight, int32_t *src_bias, int32_t *filter_zps, int32_t input_channel, - int32_t output_channel, int32_t input_zp, bool support_optimize, bool filter_peroc, - int8_t **packed_weight, int32_t **bias_data) { - if (packed_weight == NULL || bias_data == NULL) { - return NNACL_ERR; - } -#ifdef ENABLE_ARM32 - /* InitWeightBiasArm32 */ - /* weight */ - size_t size = UP_ROUND(input_channel, C16NUM) * UP_ROUND(output_channel, C2NUM) * sizeof(int8_t); - int8_t *packed_weight_ = (int8_t *)(malloc(size)); - if (packed_weight_ == NULL) { - return NNACL_ERR; - } - memset(packed_weight_, 0, size); - RowMajor2Row2x16MajorInt8(src_weight, packed_weight_, output_channel, input_channel); - /* bias */ - size = UP_ROUND(output_channel, C2NUM); - int32_t *bias_data_ = (int32_t *)malloc(size * sizeof(int32_t)); - if (bias_data_ == NULL) { - free(packed_weight_); - return NNACL_ERR; - } - memset(bias_data_, 0, size * sizeof(int32_t)); - if (src_bias != NULL) { - memcpy(bias_data_, src_bias, output_channel * sizeof(int32_t)); - } -#else - /* InitWeightBias */ - /* weight */ - size_t size = support_optimize ? UP_ROUND(input_channel, C4NUM) * UP_ROUND(output_channel, C16NUM) * sizeof(int8_t) - : UP_ROUND(input_channel, C16NUM) * UP_ROUND(output_channel, C4NUM) * sizeof(int8_t); - int8_t *packed_weight_ = (int8_t *)(malloc(size)); - if (packed_weight_ == NULL) { - return NNACL_ERR; - } - memset(packed_weight_, 0, size); - if (support_optimize) { - RowMajor2Row4x16MajorInt8(src_weight, packed_weight_, output_channel, input_channel); - } else { - RowMajor2Row16x4MajorInt8(src_weight, packed_weight_, output_channel, input_channel); - } - /* bias */ - size = support_optimize ? UP_ROUND(output_channel, C16NUM) : UP_ROUND(output_channel, C4NUM); - int32_t *bias_data_ = (int32_t *)malloc(size * sizeof(int32_t)); - if (bias_data_ == NULL) { - free(packed_weight_); - return NNACL_ERR; - } - memset(bias_data_, 0, size * sizeof(int32_t)); - if (src_bias != NULL) { - memcpy(bias_data_, src_bias, output_channel * sizeof(int32_t)); - } -#endif - /* InitBiasByzp */ - /* bias = bias - v2 x zp1 + zp1 x zp2 */ - for (int oc = 0; oc < output_channel; oc++) { - int32_t weight_sum_value = 0; - int32_t filter_zp = (filter_peroc) ? filter_zps[oc] : filter_zps[0]; - for (int ic = 0; ic < input_channel; ic++) { - weight_sum_value += src_weight[oc * input_channel + ic]; - } - bias_data_[oc] += filter_zp * input_zp * input_channel - weight_sum_value * input_zp; - } - - *packed_weight = packed_weight_; - *bias_data = bias_data_; - return NNACL_OK; -} diff --git a/mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.h b/mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.h deleted file mode 100644 index 462574366d..0000000000 --- a/mindspore/lite/micro/wrapper/int8/conv1x1_init_int8_wrapper.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_MICRO_INT8_CONV1X1_INIT_INT8_H_ -#define MINDSPORE_LITE_MICRO_INT8_CONV1X1_INIT_INT8_H_ - -#include <stdint.h> -#include <stdbool.h> -#include "nnacl/conv_parameter.h" - -int Conv1x1Init(int8_t *src_weight, int32_t *src_bias, int32_t *filter_zps, int32_t input_channel, - int32_t output_channel, int32_t input_zp, bool support_optimize, bool filter_peroc, - int8_t **packed_weight, int32_t **bias_data); - -#endif // MINDSPORE_LITE_MICRO_INT8_CONV1X1_INIT_INT8_H_ diff --git a/mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.c b/mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.c deleted file mode 100644 index ddaa1ecee8..0000000000 --- a/mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.c +++ /dev/null @@ -1,224 +0,0 @@ -/* - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "wrapper/int8/conv1x1_run_int8_wrapper.h" -#include "nnacl/base/conv1x1_base.h" -#include "nnacl/int8/matmul_int8.h" -#include "nnacl/int8/pack_int8.h" -#include "nnacl/int8/conv1x1_int8.h" -#include "nnacl/errorcode.h" - -void Pre1x1Trans(Conv1x1Args *args, int8_t *src_input, int8_t *src_output) { - args->output_ptr_ = src_output; - if (args->pre_trans_input_) { - Conv1x1InputPack(src_input, args->input_ptr_, args->conv_param_, sizeof(int8_t)); - } else { - args->input_ptr_ = src_input; - } -} - -int OcOptPre(void *cdata, int task_id) { - Conv1x1Args *args = (Conv1x1Args *)(cdata); - int cur_stride = args->thread_stride_hw_ * C4NUM; - int res_stride = args->matmul_param_->row_ - task_id * args->thread_stride_hw_ * C4NUM; - int cur_hw = MSMIN(cur_stride, res_stride); - if (cur_hw <= 0) { - return NNACL_OK; - } - int8_t *hw_in = args->input_ptr_ + task_id * args->thread_stride_hw_ * C4NUM * args->conv_param_->input_channel_; - int8_t *hw_packed_in = args->packed_input_ + task_id * args->thread_stride_hw_ * C4NUM * args->matmul_param_->deep_4_; - int32_t *hw_input_sum = args->input_sum_ + task_id * args->thread_stride_hw_ * C4NUM; - - if (args->filter_peroc_) { - PackInput4x4AndInputSumPert(hw_in, hw_packed_in, hw_input_sum, args->matmul_param_->deep_, cur_hw, 1); - } else { - PackInput4x4AndInputSumPert(hw_in, hw_packed_in, hw_input_sum, args->matmul_param_->deep_, cur_hw, - args->conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_); - } - return NNACL_OK; -} - -int RunArm64OptOc(void *cdata, int task_id) { - Conv1x1Args *args = (Conv1x1Args *)(cdata); - int stride = args->thread_stride_oc_ * C16NUM; - int cur_stride = task_id * stride; - int res_stride = args->matmul_param_->col_ - cur_stride; - int cur_oc = MSMIN(stride, res_stride); - if (cur_oc <= 0) { - return NNACL_OK; - } - - bool filter_peroc = args->filter_peroc_; - int32_t *cur_left_shift = - filter_peroc ? args->left_shift_ + cur_stride : args->conv_param_->conv_quant_arg_.left_shift_; - int32_t *cur_right_shift = - filter_peroc ? args->right_shift_ + cur_stride : args->conv_param_->conv_quant_arg_.right_shift_; - int32_t *cur_multiplier = - filter_peroc ? args->multiplier_ + cur_stride : args->conv_param_->conv_quant_arg_.quant_multiplier_; - int32_t *cur_zp = filter_peroc ? args->filter_zp_ptr_ + cur_stride : args->filter_zp_ptr_; - - Conv1x1Int8Opt(args->packed_input_, args->packed_weight_ + cur_stride * args->matmul_param_->deep_4_, - args->output_ptr_ + cur_stride, args->input_sum_, args->bias_data_ + cur_stride, - args->matmul_param_->row_, cur_oc, args->matmul_param_->deep_4_, cur_left_shift, cur_right_shift, - cur_multiplier, args->conv_param_, args->matmul_func_, cur_zp); - return NNACL_OK; -} - -int RunArmOc(void *cdata, int task_id) { - Conv1x1Args *args = (Conv1x1Args *)(cdata); -#ifdef ENABLE_ARM32 - int col_tile = C2NUM; -#else - int col_tile = C4NUM; -#endif - int stride = args->thread_stride_oc_ * col_tile; - int cur_stride = task_id * stride; - int res_stride = args->matmul_param_->col_ - cur_stride; - int cur_oc = MSMIN(stride, res_stride); - if (cur_oc <= 0) { - return NNACL_OK; - } - - bool filter_peroc = args->filter_peroc_; - int32_t *cur_left_shift = - filter_peroc ? args->left_shift_ + cur_stride : args->conv_param_->conv_quant_arg_.left_shift_; - int32_t *cur_right_shift = - filter_peroc ? args->right_shift_ + cur_stride : args->conv_param_->conv_quant_arg_.right_shift_; - int32_t *cur_multiplier = - filter_peroc ? args->multiplier_ + cur_stride : args->conv_param_->conv_quant_arg_.quant_multiplier_; - int32_t *cur_zp = filter_peroc ? args->filter_zp_ptr_ + cur_stride : args->filter_zp_ptr_; - - Conv1x1Int8(args->packed_input_, args->packed_weight_ + cur_stride * args->matmul_param_->deep_16_, - args->output_ptr_ + cur_stride, args->input_sum_, args->bias_data_ + cur_stride, - args->matmul_param_->row_, cur_oc, args->matmul_param_->deep_16_, cur_left_shift, cur_right_shift, - cur_multiplier, args->conv_param_, cur_zp); - return NNACL_OK; -} - -int RunArm64OptHw(void *cdata, int task_id) { - Conv1x1Args *args = (Conv1x1Args *)(cdata); - int cur_stride = args->thread_stride_hw_ * C4NUM; - int res_stride = args->matmul_param_->row_ - task_id * args->thread_stride_hw_ * C4NUM; - int cur_hw = MSMIN(cur_stride, res_stride); - if (cur_hw <= 0) { - return NNACL_OK; - } - int8_t *hw_in = args->input_ptr_ + task_id * args->thread_stride_hw_ * C4NUM * args->conv_param_->input_channel_; - int8_t *hw_out = args->output_ptr_ + task_id * args->thread_stride_hw_ * C4NUM * args->conv_param_->output_channel_; - int8_t *hw_packed_in = args->packed_input_ + task_id * args->thread_stride_hw_ * C4NUM * args->matmul_param_->deep_4_; - int32_t *hw_input_sum = args->input_sum_ + task_id * args->thread_stride_hw_ * C4NUM; - - if (args->filter_peroc_) { - PackInput4x4AndInputSumPert(hw_in, hw_packed_in, hw_input_sum, args->matmul_param_->deep_, cur_hw, 1); - } else { - PackInput4x4AndInputSumPert(hw_in, hw_packed_in, hw_input_sum, args->matmul_param_->deep_, cur_hw, - args->conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_); - } - - Conv1x1Int8Opt(hw_packed_in, args->packed_weight_, hw_out, hw_input_sum, args->bias_data_, cur_hw, - args->matmul_param_->col_, args->matmul_param_->deep_4_, args->left_shift_, args->right_shift_, - args->multiplier_, args->conv_param_, args->matmul_func_, args->filter_zp_ptr_); - return NNACL_OK; -} - -int RunArmHw(void *cdata, int task_id) { - Conv1x1Args *args = (Conv1x1Args *)(cdata); - int cur_stride = args->thread_stride_hw_ * C4NUM; - int res_stride = args->matmul_param_->row_ - task_id * args->thread_stride_hw_ * C4NUM; - int cur_hw = MSMIN(cur_stride, res_stride); - if (cur_hw <= 0) { - return NNACL_OK; - } - - int8_t *hw_in = args->input_ptr_ + task_id * args->thread_stride_hw_ * C4NUM * args->conv_param_->input_channel_; - int8_t *hw_out = args->output_ptr_ + task_id * args->thread_stride_hw_ * C4NUM * args->conv_param_->output_channel_; - int8_t *hw_packed_in = - args->packed_input_ + task_id * args->thread_stride_hw_ * C4NUM * args->matmul_param_->deep_16_; - int32_t *hw_input_sum = args->input_sum_ + task_id * args->thread_stride_hw_ * C4NUM; - - RowMajor2Row16x4MajorInt8(hw_in, hw_packed_in, cur_hw, args->matmul_param_->deep_); - - if (args->filter_peroc_) { - PackInputSum16x4PerLayer(hw_packed_in, hw_input_sum, 1, UP_ROUND(cur_hw, C4NUM), args->matmul_param_->deep_16_); - } else { - PackInputSum16x4PerLayer(hw_packed_in, hw_input_sum, args->conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_, - UP_ROUND(cur_hw, C4NUM), args->matmul_param_->deep_16_); - } - - Conv1x1Int8(hw_packed_in, args->packed_weight_, hw_out, hw_input_sum, args->bias_data_, cur_hw, - args->matmul_param_->col_, args->matmul_param_->deep_16_, args->left_shift_, args->right_shift_, - args->multiplier_, args->conv_param_, args->filter_zp_ptr_); - return NNACL_OK; -} - -void Conv1x1Run(int8_t *src_in, Conv1x1Args *args, struct ThreadPool *thread_pool, int thread_num, int8_t *src_out) { - int row_pack_count = C4NUM; - int col_pack_count; - -#ifdef ENABLE_ARM32 - col_pack_count = C2NUM; -#else - if (args->support_optimize_) { - col_pack_count = C16NUM; - } else { - col_pack_count = C4NUM; - } -#endif - int hw_thread_count = UP_DIV(args->matmul_param_->row_, row_pack_count); - int oc_thread_count = UP_DIV(args->matmul_param_->col_, col_pack_count); - size_t thread_count_hw = MSMIN(thread_num, hw_thread_count); - args->thread_stride_hw_ = UP_DIV(hw_thread_count, thread_count_hw); - size_t thread_count_oc = MSMIN(thread_num, oc_thread_count); - args->thread_stride_oc_ = UP_DIV(oc_thread_count, thread_count_oc); - bool parallel_by_oc = oc_thread_count > thread_num; - - for (int batch_index = 0; batch_index < args->conv_param_->input_batch_; batch_index++) { - Pre1x1Trans(args, - src_in + batch_index * args->conv_param_->input_h_ * args->conv_param_->input_w_ * - args->conv_param_->input_channel_, - src_out + batch_index * args->matmul_param_->row_ * args->matmul_param_->col_); - if (parallel_by_oc) { - /* input transpose and input sum */ - if (args->support_optimize_) { - ParallelLaunch(thread_pool, OcOptPre, args, thread_count_hw); - } else { - RowMajor2Row16x4MajorInt8(args->input_ptr_, args->packed_input_, args->matmul_param_->row_, - args->matmul_param_->deep_); - if (args->filter_peroc_) { - PackInputSum16x4PerLayer(args->packed_input_, args->input_sum_, 1, args->matmul_param_->row_4_, - args->matmul_param_->deep_16_); - } else { - PackInputSum16x4PerLayer(args->packed_input_, args->input_sum_, - args->conv_param_->conv_quant_arg_.filter_quant_args_[0].zp_, - args->matmul_param_->row_4_, args->matmul_param_->deep_16_); - } - } - /* matmul parallel by oc */ - if (args->support_optimize_) { - ParallelLaunch(thread_pool, RunArm64OptOc, args, thread_count_oc); - } else { - ParallelLaunch(thread_pool, RunArmOc, args, thread_count_oc); - } - } else { - /* matmul parallel by hw */ - if (args->support_optimize_) { - ParallelLaunch(thread_pool, RunArm64OptHw, args, thread_count_hw); - } else { - ParallelLaunch(thread_pool, RunArmHw, args, thread_count_hw); - } - } - } -} diff --git a/mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.h b/mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.h deleted file mode 100644 index 10c2366009..0000000000 --- a/mindspore/lite/micro/wrapper/int8/conv1x1_run_int8_wrapper.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_MICRO_INT8_CONV1X1_RUN_H_ -#define MINDSPORE_LITE_MICRO_INT8_CONV1X1_RUN_H_ - -#include <stdint.h> -#include <stdbool.h> -#include "nnacl/conv_parameter.h" -#include "nnacl/matmul_parameter.h" -#include "src/runtime/thread_pool.h" - -typedef struct { - int32_t *input_sum_; /* per-oc */ - int32_t *filter_zp_ptr_; /* per-oc up round */ - int32_t *left_shift_; /* per-oc up round */ - int32_t *right_shift_; /* per-oc up round */ - int32_t *multiplier_; /* per-oc up round */ - int8_t *packed_weight_; - int32_t *bias_data_; - int8_t *packed_input_; - int8_t *input_ptr_; - int8_t *output_ptr_; - size_t thread_stride_hw_; - size_t thread_stride_oc_; - ConvParameter *conv_param_; - MatMulParameter *matmul_param_; - MATMUL_OPT_DP_FUNC matmul_func_; - bool pre_trans_input_; - bool support_optimize_; - bool filter_peroc_; -} Conv1x1Args; - -void Conv1x1Run(int8_t *src_in, Conv1x1Args *args, struct ThreadPool *thread_pool, int thread_num, int8_t *src_out); - -#endif // MINDSPORE_LITE_MICRO_INT8_CONV1X1_RUN_H_ diff --git a/mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.c b/mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.c deleted file mode 100644 index 71e8db0fee..0000000000 --- a/mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "wrapper/int8/conv_init_int8_wrapper.h" -#include <memory.h> -#include "nnacl/op_base.h" -#include "nnacl/int8/matmul_int8.h" -#include "nnacl/errorcode.h" - -int ConvInit(int8_t *origin_weight, const int32_t *ori_bias, const int32_t *filter_quant_zps, int kernel_h, - int kernel_w, int input_channel, int output_channel, int32_t input_zp, bool filter_peroc, - bool support_optimize, int8_t **packed_weight, int32_t **bias_data) { - int8_t *packed_weight_ = NULL; - int32_t *bias_data_ = NULL; - int kernel_plane = kernel_h * kernel_w; - int up_round_deep; - int up_round_oc; -#ifdef ENABLE_ARM32 - up_round_oc = UP_ROUND(output_channel, C2NUM); - up_round_deep = UP_ROUND(kernel_plane * input_channel, C16NUM); -#else - if (support_optimize) { - up_round_oc = UP_ROUND(output_channel, C8NUM); - up_round_deep = UP_ROUND(kernel_plane * input_channel, C4NUM); - } else { - up_round_oc = UP_ROUND(output_channel, C4NUM); - up_round_deep = UP_ROUND(kernel_plane * input_channel, C16NUM); - } -#endif - int pack_weight_size = up_round_oc * up_round_deep; - size_t bias_size = up_round_oc * sizeof(int32_t); - - // init weight - packed_weight_ = (int8_t *)(malloc(pack_weight_size)); - if (packed_weight_ == NULL) { - return NNACL_ERR; - } - memset(packed_weight_, 0, pack_weight_size); -#ifdef ENABLE_ARM32 - RowMajor2Row2x16MajorInt8(origin_weight, packed_weight_, output_channel, input_channel * kernel_plane); -#else - if (support_optimize) { - RowMajor2Row8x4MajorInt8(origin_weight, packed_weight_, output_channel, input_channel * kernel_plane); - } else { - RowMajor2Row16x4MajorInt8(origin_weight, packed_weight_, output_channel, input_channel * kernel_plane); - } -#endif - - // init bias - bias_data_ = (int32_t *)(malloc(bias_size)); - if (bias_data_ == NULL) { - free(packed_weight_); - return NNACL_ERR; - } - memset(bias_data_, 0, bias_size); - if (ori_bias != NULL) { - memcpy(bias_data_, ori_bias, output_channel * sizeof(int32_t)); - } - - for (int oc = 0; oc < output_channel; oc++) { - int32_t filter_zp = filter_quant_zps[0]; - if (filter_peroc) { - filter_zp = filter_quant_zps[oc]; - } - int32_t weight_sum_value = up_round_deep * filter_zp; - for (int i = 0; i < kernel_plane * input_channel; i++) { - weight_sum_value += origin_weight[oc * kernel_plane * input_channel + i] - filter_zp; - } - bias_data_[oc] += filter_zp * input_zp * up_round_deep - weight_sum_value * input_zp; - } - - *packed_weight = packed_weight_; - *bias_data = bias_data_; - return NNACL_OK; -} diff --git a/mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.h b/mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.h deleted file mode 100644 index 0eff8d7a3f..0000000000 --- a/mindspore/lite/micro/wrapper/int8/conv_init_int8_wrapper.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_MICRO_INT8_CONV_INIT_H_ -#define MINDSPORE_LITE_MICRO_INT8_CONV_INIT_H_ - -#include <stdint.h> -#include <stdbool.h> - -int ConvInit(int8_t *origin_weight, const int32_t *ori_bias, const int32_t *filter_quant_zps, int kernel_h, - int kernel_w, int input_channel, int output_channel, int32_t input_zp, bool filter_peroc, - bool support_optimize, int8_t **packed_weight, int32_t **bias_data); - -#endif // MINDSPORE_LITE_MICRO_INT8_CONV_INIT_H_ diff --git a/mindspore/lite/micro/wrapper/int8/matmul_int8_wrapper.h b/mindspore/lite/micro/wrapper/int8/matmul_int8_wrapper.h deleted file mode 100644 index 27a64007a6..0000000000 --- a/mindspore/lite/micro/wrapper/int8/matmul_int8_wrapper.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_MICRO_WRAPPER_INT8_MATMUL_INT8_WRAPPER_H_ -#define MINDSPORE_LITE_MICRO_WRAPPER_INT8_MATMUL_INT8_WRAPPER_H_ -#include <string.h> -#include "nnacl/int8/matmul_int8.h" -#ifdef __cplusplus -extern "C" { -#endif - -void InitInt8MatrixA(int8_t *src_ptr, int32_t *input_sums, int8_t *dst_ptr, int batch, int row, int deep, int input_zp, - const int *weight_zp, bool a_transpose); - -void InitInt8MatrixB(int8_t *src_ptr, int32_t *weight_bias_sums_batch_, int8_t *dst_ptr, int batch, int deep, int col, - int col_4, int deep_16, int input_zp, int *weight_zp, const int *bias_ptr, bool b_transpose); - -#ifdef __cplusplus -} -#endif - -#endif // MINDSPORE_LITE_MICRO_WRAPPER_INT8_MATMUL_INT8_WRAPPER_H_ diff --git a/mindspore/lite/nnacl/CMakeLists.txt b/mindspore/lite/nnacl/CMakeLists.txt index fcc6558ed7..3f7da15e73 100644 --- a/mindspore/lite/nnacl/CMakeLists.txt +++ b/mindspore/lite/nnacl/CMakeLists.txt @@ -21,6 +21,7 @@ file(GLOB KERNEL_SRC ${NNACL_DIR}/*.c ${NNACL_DIR}/fp32/*.c ${NNACL_DIR}/int8/*.c + ${NNACL_DIR}/infer/*.c ${NNACL_DIR}/base/*.c ) diff --git a/mindspore/lite/nnacl/arithmetic.h b/mindspore/lite/nnacl/arithmetic.h index d4f6a11c4c..198a2b6fce 100644 --- a/mindspore/lite/nnacl/arithmetic.h +++ b/mindspore/lite/nnacl/arithmetic.h @@ -40,6 +40,7 @@ typedef struct ArithmeticParameter { int multiples0_[10]; int multiples1_[10]; + int eltwise_mode_; // eltwise need } ArithmeticParameter; #endif // MINDSPORE_LITE_NNACL_ARTITHMETIC_H_ diff --git a/mindspore/lite/nnacl/base/tile_base.h b/mindspore/lite/nnacl/base/tile_base.h index 679ea3d486..7a499a3aa7 100644 --- a/mindspore/lite/nnacl/base/tile_base.h +++ b/mindspore/lite/nnacl/base/tile_base.h @@ -24,6 +24,8 @@ typedef struct TileParameter { OpParameter op_parameter_; int multiples_[5]; int dims_[5]; + size_t dims_size_; + size_t multiples_size_; // shape correlative int in_shape_[5]; diff --git a/mindspore/lite/nnacl/concat_parameter.h b/mindspore/lite/nnacl/concat_parameter.h index 8b22e93468..35386464e5 100644 --- a/mindspore/lite/nnacl/concat_parameter.h +++ b/mindspore/lite/nnacl/concat_parameter.h @@ -18,7 +18,7 @@ #define MINDSPORE_LITE_NNACL_CONCAT_PARAMETER_H_ #include "nnacl/op_base.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" typedef struct ConcatParameter { OpParameter op_parameter_; diff --git a/mindspore/lite/nnacl/conv_parameter.h b/mindspore/lite/nnacl/conv_parameter.h index 174240b894..95934df573 100644 --- a/mindspore/lite/nnacl/conv_parameter.h +++ b/mindspore/lite/nnacl/conv_parameter.h @@ -21,7 +21,7 @@ #include <arm_neon.h> #endif #include "nnacl/op_base.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" typedef struct ConvParameter { OpParameter op_parameter_; @@ -51,6 +51,9 @@ typedef struct ConvParameter { int output_unit_; PadMode pad_mode_; ActType act_type_; + int channel_multiplie_; + int output_padding_w; + int output_padding_h; } ConvParameter; typedef struct SlidingWindowParam { diff --git a/mindspore/lite/nnacl/crop_parameter.h b/mindspore/lite/nnacl/crop_parameter.h index 4ab5c58303..6730a0be6e 100644 --- a/mindspore/lite/nnacl/crop_parameter.h +++ b/mindspore/lite/nnacl/crop_parameter.h @@ -18,7 +18,7 @@ #define MINDSPORE_LITE_NNACL_CROP_PARAMETER_H_ #include "nnacl/op_base.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" typedef struct CropParameter { OpParameter op_parameter_; diff --git a/mindspore/lite/nnacl/errorcode.h b/mindspore/lite/nnacl/errorcode.h index 50d7d76bce..18a50290cc 100644 --- a/mindspore/lite/nnacl/errorcode.h +++ b/mindspore/lite/nnacl/errorcode.h @@ -22,6 +22,8 @@ typedef enum ErrorCodeCommonEnum { NNACL_ERR = 1, NNACL_NULL_PTR, NNACL_PARAM_INVALID, + NNACL_INFER_INVALID, + NNACL_INPUT_TENSOR_ERROR, NNACL_COMMON_END = 9999 } ErrorCodeCommonEnum; diff --git a/mindspore/lite/nnacl/fp16/activation_fp16.h b/mindspore/lite/nnacl/fp16/activation_fp16.h index 3e8a2309f5..6fdb3220a1 100644 --- a/mindspore/lite/nnacl/fp16/activation_fp16.h +++ b/mindspore/lite/nnacl/fp16/activation_fp16.h @@ -21,7 +21,7 @@ #endif #include <math.h> #include "nnacl/op_base.h" -#include "mindspore/lite/nnacl/int8/fixed_point.h" +#include "nnacl/int8/fixed_point.h" #ifdef __cplusplus extern "C" { diff --git a/mindspore/lite/nnacl/fp16/lstm_fp16.c b/mindspore/lite/nnacl/fp16/lstm_fp16.c index 8ffd6e174d..ac8071600e 100644 --- a/mindspore/lite/nnacl/fp16/lstm_fp16.c +++ b/mindspore/lite/nnacl/fp16/lstm_fp16.c @@ -93,39 +93,38 @@ int ElementOptMulAccFp16(const float16_t *input0, const float16_t input1, float1 void UpdataStateFp16(float16_t *cell_state, float16_t *forget_gate, const float16_t *input_gate, const float16_t *cell_gate, float16_t *state_buffer, int batch, int hidden_size, - float16_t smooth) { - if (!(smooth >= -FLT_EPSILON && smooth <= FLT_EPSILON)) { // smooth * old_cell_state + float16_t zoneout) { + if (!(zoneout >= -FLT_EPSILON && zoneout <= FLT_EPSILON)) { // zoneout * old_cell_state memcpy(state_buffer, cell_state, batch * hidden_size * sizeof(float16_t)); ArithmeticParameter parameter; parameter.in_elements_num0_ = batch * hidden_size; parameter.in_elements_num1_ = 1; - ElementOptMulFp16(state_buffer, &smooth, state_buffer, batch * hidden_size, &parameter); + ElementOptMulFp16(state_buffer, &zoneout, state_buffer, batch * hidden_size, &parameter); } ElementMulFp16(forget_gate, cell_state, cell_state, batch * hidden_size); ElementMulAccFp16(input_gate, cell_gate, cell_state, batch * hidden_size); - if (!(smooth >= -FLT_EPSILON && smooth <= FLT_EPSILON)) { // (1 - smooth) * new_cell_state - ElementOptMulAccFp16(cell_state, 1 - smooth, state_buffer, batch * hidden_size); + if (!(zoneout >= -FLT_EPSILON && zoneout <= FLT_EPSILON)) { // (1 - zoneout) * new_cell_state + ElementOptMulAccFp16(cell_state, 1 - zoneout, state_buffer, batch * hidden_size); } } void UpdataOutputFp16(const float16_t *cell_state, float16_t *output_gate, float16_t *hidden_state, - float16_t *state_buffer_in, int batch, int hidden_size, float16_t smooth) { - float16_t *state_buffer = state_buffer_in + batch * hidden_size; - if (!(smooth >= -FLT_EPSILON && smooth <= FLT_EPSILON)) { + float16_t *state_buffer, int batch, int hidden_size, float16_t zoneout) { + if (!(zoneout >= -FLT_EPSILON && zoneout <= FLT_EPSILON)) { memcpy(state_buffer, hidden_state, batch * hidden_size * sizeof(float16_t)); ArithmeticParameter parameter; parameter.in_elements_num0_ = batch * hidden_size; parameter.in_elements_num1_ = 1; - ElementOptMulFp16(state_buffer, &smooth, state_buffer, batch * hidden_size, &parameter); + ElementOptMulFp16(state_buffer, &zoneout, state_buffer, batch * hidden_size, &parameter); } TanhFp16(cell_state, hidden_state, batch * hidden_size); ElementMulFp16(hidden_state, output_gate, hidden_state, batch * hidden_size); - if (!(smooth >= -FLT_EPSILON && smooth <= FLT_EPSILON)) { - ElementOptMulAccFp16(hidden_state, 1 - smooth, state_buffer, batch * hidden_size); + if (!(zoneout >= -FLT_EPSILON && zoneout <= FLT_EPSILON)) { + ElementOptMulAccFp16(hidden_state, 1 - zoneout, state_buffer, batch * hidden_size); } } @@ -151,7 +150,7 @@ void UpdateLstmGateFp16(float16_t *gate_buffer, const float16_t *input, const fl void LstmStepUnitFp16(float16_t *output, const float16_t *input, const float16_t *input_weight, const float16_t *state_weight, const float16_t *bias, float16_t *hidden_state, - float16_t *cell_state, float16_t *gate_buffer, float16_t *state_buffer, + float16_t *cell_state, float16_t *gate_buffer, float16_t *state_buffer[2], float16_t *matmul_buffer[2], const LstmParameter *lstm_param) { bool is_vec = lstm_param->batch_ == 1; // input * weight @@ -192,26 +191,28 @@ void LstmStepUnitFp16(float16_t *output, const float16_t *input, const float16_t // update cell_gate TanhFp16(cell_gate, cell_gate, lstm_param->batch_ * lstm_param->hidden_size_); // update cell state - UpdataStateFp16(cell_state, forget_gate, input_gate, cell_gate, state_buffer, lstm_param->batch_, - lstm_param->hidden_size_, lstm_param->smooth_); + UpdataStateFp16(cell_state, forget_gate, input_gate, cell_gate, state_buffer[0], lstm_param->batch_, + lstm_param->hidden_size_, lstm_param->zoneout_cell_); // update output_gate SigmoidFp16(output_gate, output_gate, lstm_param->batch_ * lstm_param->hidden_size_); // update output - UpdataOutputFp16(cell_state, output_gate, hidden_state, state_buffer, lstm_param->batch_, lstm_param->hidden_size_, - lstm_param->smooth_); + UpdataOutputFp16(cell_state, output_gate, hidden_state, state_buffer[1], lstm_param->batch_, lstm_param->hidden_size_, + lstm_param->zoneout_hidden_); memcpy(output, hidden_state, lstm_param->batch_ * lstm_param->hidden_size_ * sizeof(float16_t)); - if (!(lstm_param->smooth_ >= -FLT_EPSILON && lstm_param->smooth_ <= FLT_EPSILON)) { - memcpy(cell_state, state_buffer, lstm_param->batch_ * lstm_param->hidden_size_ * sizeof(float16_t)); - memcpy(hidden_state, state_buffer + lstm_param->batch_ * lstm_param->hidden_size_, - lstm_param->batch_ * lstm_param->hidden_size_ * sizeof(float16_t)); + if (!(lstm_param->zoneout_cell_ >= -FLT_EPSILON && lstm_param->zoneout_cell_ <= FLT_EPSILON)) { + memcpy(cell_state, state_buffer[0], lstm_param->batch_ * lstm_param->hidden_size_ * sizeof(float16_t)); + } + + if (!(lstm_param->zoneout_hidden_ >= -FLT_EPSILON && lstm_param->zoneout_hidden_ <= FLT_EPSILON)) { + memcpy(hidden_state, state_buffer[1], lstm_param->batch_ * lstm_param->hidden_size_ * sizeof(float16_t)); } } void LstmFp16(float16_t *output, const float16_t *input, const float16_t *weight_i, const float16_t *weight_h, const float16_t *bias, float16_t *hidden_state, float16_t *cell_state, float16_t *gate_buffer, - float16_t *state_buffer, float16_t *matmul_buffer[2], const LstmParameter *lstm_param) { + float16_t *state_buffer[2], float16_t *matmul_buffer[2], const LstmParameter *lstm_param) { // forward for (int t = 0; t < lstm_param->seq_len_; t++) { const float16_t *input_ptr = input + t * lstm_param->input_step_; diff --git a/mindspore/lite/nnacl/fp16/lstm_fp16.h b/mindspore/lite/nnacl/fp16/lstm_fp16.h index b5bf7ad2a9..e3aae4326b 100644 --- a/mindspore/lite/nnacl/fp16/lstm_fp16.h +++ b/mindspore/lite/nnacl/fp16/lstm_fp16.h @@ -37,7 +37,7 @@ int ElementOptMulAccFp16(const float16_t *input0, const float16_t input1, float1 void LstmFp16(float16_t *output, const float16_t *input, const float16_t *weight_i, const float16_t *weight_h, const float16_t *bias, float16_t *hidden_state, float16_t *cell_state, float16_t *gate_buffer, - float16_t *state_buffer, float16_t *matmul_buffer[2], const LstmParameter *lstm_param); + float16_t *state_buffer[2], float16_t *matmul_buffer[2], const LstmParameter *lstm_param); #ifdef __cplusplus } #endif diff --git a/mindspore/lite/nnacl/fp16_grad/activation_grad.c b/mindspore/lite/nnacl/fp16_grad/activation_grad.c index a9406e0262..da48f5253b 100644 --- a/mindspore/lite/nnacl/fp16_grad/activation_grad.c +++ b/mindspore/lite/nnacl/fp16_grad/activation_grad.c @@ -53,20 +53,3 @@ int Fp16SigmoidGrad(const float16_t *src0, const float16_t *src1, size_t length, } return NNACL_OK; } - -int Fp16LogGrad(const float16_t *src0, const float16_t *src1, size_t length, float16_t *dst) { - int i = 0; -#ifdef ENABLE_NEON - float16x8_t log_10 = vdupq_n_f16(log(10)); - for (; i < length - 4; i += 4) { - float16x8_t src0_4 = vld1q_f16(src0 + i); - float16x8_t src1_4 = vld1q_f16(src1 + i); - float16x8_t dst_4 = vmulq_f16(src0_4, vrecpeq_f16(vmulq_f16(src1_4, log_10))); - vst1q_f16(dst + i, dst_4); - } -#endif - for (; i < length; i++) { - dst[i] = src0[i] * 1.0f / (src1[i] * log(10)); - } - return NNACL_OK; -} diff --git a/mindspore/lite/nnacl/fp16_grad/activation_grad.h b/mindspore/lite/nnacl/fp16_grad/activation_grad.h index 985708bb15..79c53584b7 100644 --- a/mindspore/lite/nnacl/fp16_grad/activation_grad.h +++ b/mindspore/lite/nnacl/fp16_grad/activation_grad.h @@ -21,7 +21,7 @@ #endif #include <math.h> #include "nnacl/op_base.h" -#include "mindspore/lite/nnacl/int8/fixed_point.h" +#include "nnacl/int8/fixed_point.h" typedef struct ActivationGradParameterFp16 { OpParameter op_parameter; @@ -34,7 +34,6 @@ extern "C" { int Fp16ReluGrad(const float16_t *src0, const float16_t *src1, size_t length, float16_t *dst); int Fp16SigmoidGrad(const float16_t *src0, const float16_t *src1, size_t length, float16_t *dst); -int Fp16LogGrad(const float16_t *src0, const float16_t *src1, size_t length, float16_t *dst); #ifdef __cplusplus } diff --git a/mindspore/lite/nnacl/fp16_grad/arithmetic_self_grad.c b/mindspore/lite/nnacl/fp16_grad/arithmetic_self_grad.c new file mode 100644 index 0000000000..7cd7b4d7e5 --- /dev/null +++ b/mindspore/lite/nnacl/fp16_grad/arithmetic_self_grad.c @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <math.h> +#include "nnacl/op_base.h" +#include "nnacl/fp16_grad/arithmetic_self_grad.h" +#include "nnacl/errorcode.h" + +int Fp16LogGrad(const float16_t *src0, const float16_t *src1, size_t length, float16_t *dst) { + int i = 0; +#ifdef ENABLE_NEON + float16x8_t log_10 = vdupq_n_f16(log(10)); + for (; i < length - 4; i += 4) { + float16x8_t src0_4 = vld1q_f16(src0 + i); + float16x8_t src1_4 = vld1q_f16(src1 + i); + float16x8_t dst_4 = vmulq_f16(src0_4, vrecpeq_f16(vmulq_f16(src1_4, log_10))); + vst1q_f16(dst + i, dst_4); + } +#endif + for (; i < length; i++) { + dst[i] = src0[i] * 1.0f / (src1[i] * log(10)); + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/fp16_grad/arithmetic_self_grad.h b/mindspore/lite/nnacl/fp16_grad/arithmetic_self_grad.h new file mode 100644 index 0000000000..3d894581ac --- /dev/null +++ b/mindspore/lite/nnacl/fp16_grad/arithmetic_self_grad.h @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FP16_GRAD_ARITHMETHIC_SELF_GRAD_H_ +#define MINDSPORE_LITE_NNACL_FP16_GRAD_ARITHMETHIC_SELF_GRAD_H_ + +#ifdef ENABLE_NEON +#include <arm_neon.h> +#endif +#include <math.h> +#include "nnacl/op_base.h" + +typedef struct ArithmeticSelfGradParameterFp16 { + OpParameter op_parameter; + int type_; +} ArithmeticSelfGradParameterFp16; +#ifdef __cplusplus +extern "C" { +#endif + +int Fp16LogGrad(const float16_t *src0, const float16_t *src1, size_t length, float16_t *dst); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_FP16_GRAD_ARITHMETHIC_SELF_GRAD_H_ diff --git a/mindspore/lite/nnacl/fp32/activation_fp32.h b/mindspore/lite/nnacl/fp32/activation_fp32.h index afae986998..c682ec2b3b 100644 --- a/mindspore/lite/nnacl/fp32/activation_fp32.h +++ b/mindspore/lite/nnacl/fp32/activation_fp32.h @@ -18,7 +18,7 @@ #include <math.h> #include "nnacl/op_base.h" -#include "mindspore/lite/nnacl/int8/fixed_point.h" +#include "nnacl/int8/fixed_point.h" typedef struct ActivationParameter { OpParameter op_parameter_; diff --git a/mindspore/lite/nnacl/fp32/layer_norm_fp32.c b/mindspore/lite/nnacl/fp32/layer_norm_fp32.c index d99f303f37..c05fda2ea1 100644 --- a/mindspore/lite/nnacl/fp32/layer_norm_fp32.c +++ b/mindspore/lite/nnacl/fp32/layer_norm_fp32.c @@ -87,12 +87,10 @@ int LayerNorm(const float *src_data, const float *gamma_data, const float *beta_ LayerNormGammaAndBeta(dst_param, src_param, gamma_data, beta_data, param->params_inner_size_, mean, deno); } } else { - int x = i / param->norm_outer_size_; - const float *src_param = src_norm + x * param->params_inner_size_; - float *dst_param = dst_norm + x * param->params_inner_size_; - const float *gamma = gamma_data + x * param->params_inner_size_; - const float *beta = beta_data + x * param->params_inner_size_; - LayerNormGammaAndBeta(dst_param, src_param, gamma, beta, param->norm_inner_size_, mean, deno); + int x = i / param->params_outer_size_; + const float *gamma = gamma_data + x * param->norm_inner_size_; + const float *beta = beta_data + x * param->norm_inner_size_; + LayerNormGammaAndBeta(dst_norm, src_norm, gamma, beta, param->norm_inner_size_, mean, deno); } } return NNACL_OK; diff --git a/mindspore/lite/nnacl/fp32/lstm_fp32.c b/mindspore/lite/nnacl/fp32/lstm_fp32.c index 276375c863..35627feb91 100644 --- a/mindspore/lite/nnacl/fp32/lstm_fp32.c +++ b/mindspore/lite/nnacl/fp32/lstm_fp32.c @@ -117,39 +117,38 @@ int ElementOptMulAcc(const float *input0, const float input1, float *output, con } void UpdataState(float *cell_state, const float *forget_gate, const float *input_gate, const float *cell_gate, - float *state_buffer, int batch, int hidden_size, const float smooth) { - if (!(smooth >= -FLT_EPSILON && smooth <= FLT_EPSILON)) { // smooth * old_cell_state + float *state_buffer, int batch, int hidden_size, const float zoneout) { + if (!(zoneout >= -FLT_EPSILON && zoneout <= FLT_EPSILON)) { // zoneout * old_cell_state memcpy(state_buffer, cell_state, batch * hidden_size * sizeof(float)); ArithmeticParameter parameter; parameter.in_elements_num0_ = batch * hidden_size; parameter.in_elements_num1_ = 1; - ElementOptMul(state_buffer, &smooth, state_buffer, batch * hidden_size, &parameter); + ElementOptMul(state_buffer, &zoneout, state_buffer, batch * hidden_size, &parameter); } ElementMul(forget_gate, cell_state, cell_state, batch * hidden_size); ElementMulAcc(input_gate, cell_gate, cell_state, batch * hidden_size); - if (!(smooth >= -FLT_EPSILON && smooth <= FLT_EPSILON)) { // (1 - smooth) * new_cell_state - ElementOptMulAcc(cell_state, 1 - smooth, state_buffer, batch * hidden_size); + if (!(zoneout >= -FLT_EPSILON && zoneout <= FLT_EPSILON)) { // (1 - zoneout) * new_cell_state + ElementOptMulAcc(cell_state, 1 - zoneout, state_buffer, batch * hidden_size); } } -void UpdataOutput(const float *cell_state, const float *output_gate, float *hidden_state, float *state_buffer_in, - int batch, int hidden_size, const float smooth) { - float *state_buffer = state_buffer_in + batch * hidden_size; - if (!(smooth >= -FLT_EPSILON && smooth <= FLT_EPSILON)) { +void UpdataOutput(const float *cell_state, const float *output_gate, float *hidden_state, float *state_buffer, + int batch, int hidden_size, const float zoneout) { + if (!(zoneout >= -FLT_EPSILON && zoneout <= FLT_EPSILON)) { memcpy(state_buffer, hidden_state, batch * hidden_size * sizeof(float)); ArithmeticParameter parameter; parameter.in_elements_num0_ = batch * hidden_size; parameter.in_elements_num1_ = 1; - ElementOptMul(state_buffer, &smooth, state_buffer, batch * hidden_size, &parameter); + ElementOptMul(state_buffer, &zoneout, state_buffer, batch * hidden_size, &parameter); } Tanh(cell_state, batch * hidden_size, hidden_state); ElementMul(hidden_state, output_gate, hidden_state, batch * hidden_size); - if (!(smooth >= -FLT_EPSILON && smooth <= FLT_EPSILON)) { - ElementOptMulAcc(hidden_state, 1 - smooth, state_buffer, batch * hidden_size); + if (!(zoneout >= -FLT_EPSILON && zoneout <= FLT_EPSILON)) { + ElementOptMulAcc(hidden_state, 1 - zoneout, state_buffer, batch * hidden_size); } } @@ -164,7 +163,7 @@ void UpdateLstmGate(float *gate_buffer, const float *input, const float *weight, } void LstmStepUnit(float *output, const float *input, const float *input_weight, const float *state_weight, - const float *bias, float *hidden_state, float *cell_state, float *gate_buffer, float *state_buffer, + const float *bias, float *hidden_state, float *cell_state, float *gate_buffer, float *state_buffer[2], float *matmul_buffer[2], const LstmParameter *lstm_param) { bool is_vec = lstm_param->batch_ == 1; // input * weight @@ -205,25 +204,27 @@ void LstmStepUnit(float *output, const float *input, const float *input_weight, // update cell_gate Tanh(cell_gate, lstm_param->batch_ * lstm_param->hidden_size_, cell_gate); // update cell state - UpdataState(cell_state, forget_gate, input_gate, cell_gate, state_buffer, lstm_param->batch_, - lstm_param->hidden_size_, lstm_param->smooth_); + UpdataState(cell_state, forget_gate, input_gate, cell_gate, state_buffer[0], lstm_param->batch_, + lstm_param->hidden_size_, lstm_param->zoneout_cell_); // update output_gate Sigmoid(output_gate, lstm_param->batch_ * lstm_param->hidden_size_, output_gate); // update output - UpdataOutput(cell_state, output_gate, hidden_state, state_buffer, lstm_param->batch_, lstm_param->hidden_size_, - lstm_param->smooth_); + UpdataOutput(cell_state, output_gate, hidden_state, state_buffer[1], lstm_param->batch_, lstm_param->hidden_size_, + lstm_param->zoneout_hidden_); memcpy(output, hidden_state, lstm_param->batch_ * lstm_param->hidden_size_ * sizeof(float)); - if (!(lstm_param->smooth_ >= -FLT_EPSILON && lstm_param->smooth_ <= FLT_EPSILON)) { - memcpy(cell_state, state_buffer, lstm_param->batch_ * lstm_param->hidden_size_ * sizeof(float)); - memcpy(hidden_state, state_buffer + lstm_param->batch_ * lstm_param->hidden_size_, - lstm_param->batch_ * lstm_param->hidden_size_ * sizeof(float)); + if (!(lstm_param->zoneout_cell_ >= -FLT_EPSILON && lstm_param->zoneout_cell_ <= FLT_EPSILON)) { + memcpy(cell_state, state_buffer[0], lstm_param->batch_ * lstm_param->hidden_size_ * sizeof(float)); + } + + if (!(lstm_param->zoneout_hidden_ >= -FLT_EPSILON && lstm_param->zoneout_hidden_ <= FLT_EPSILON)) { + memcpy(hidden_state, state_buffer[1], lstm_param->batch_ * lstm_param->hidden_size_ * sizeof(float)); } } void Lstm(float *output, const float *input, const float *weight_i, const float *weight_h, const float *bias, - float *hidden_state, float *cell_state, float *gate_buffer, float *state_buffer, float *matmul_buffer[2], + float *hidden_state, float *cell_state, float *gate_buffer, float *state_buffer[2], float *matmul_buffer[2], const LstmParameter *lstm_param) { // forward for (int t = 0; t < lstm_param->seq_len_; t++) { diff --git a/mindspore/lite/nnacl/fp32/lstm_fp32.h b/mindspore/lite/nnacl/fp32/lstm_fp32.h index 2bc060dd8c..ef5ff9f954 100644 --- a/mindspore/lite/nnacl/fp32/lstm_fp32.h +++ b/mindspore/lite/nnacl/fp32/lstm_fp32.h @@ -32,7 +32,7 @@ void ElementMulAcc(const float *input0, const float *input1, float *output, int int ElementOptMulAcc(const float *input0, const float input1, float *output, const int element_size); void Lstm(float *output, const float *input, const float *weight_i, const float *weight_h, const float *bias, - float *hidden_state, float *cell_state, float *gate_buffer, float *state_buffer, float *matmul_buffer[2], + float *hidden_state, float *cell_state, float *gate_buffer, float *state_buffer[2], float *matmul_buffer[2], const LstmParameter *lstm_param); #ifdef __cplusplus } diff --git a/mindspore/lite/nnacl/fp32/pooling_fp32.h b/mindspore/lite/nnacl/fp32/pooling_fp32.h index 96c712bd77..71033e5072 100644 --- a/mindspore/lite/nnacl/fp32/pooling_fp32.h +++ b/mindspore/lite/nnacl/fp32/pooling_fp32.h @@ -22,7 +22,7 @@ #endif #include "nnacl/op_base.h" #include "nnacl/pooling_parameter.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" #ifdef __cplusplus extern "C" { diff --git a/mindspore/lite/nnacl/fp32/splice_fp32.c b/mindspore/lite/nnacl/fp32/splice_fp32.c new file mode 100644 index 0000000000..0682925169 --- /dev/null +++ b/mindspore/lite/nnacl/fp32/splice_fp32.c @@ -0,0 +1,30 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/fp32/splice_fp32.h" +void SpliceFp32(const float *src_data, int src_row, int src_col, const SpliceParameter *splice_parameter, + float *dst_data, int dst_row, int dst_col) { + for (int r = 0; r < dst_row; ++r) { + for (int off = 0; off < splice_parameter->context_dim_; ++off) { + int r_off = r + splice_parameter->context_[off]; + r_off = MSMAX(r_off, 0); + r_off = MSMIN(r_off, src_row - 1); + const float *tmp_src_data = src_data + r_off * src_col * sizeof(float); + float *tmp_dst_data = dst_data + r * dst_col * sizeof(float); + memcpy(tmp_dst_data + off * src_col, tmp_src_data, src_col * sizeof(float)); + } + } +} diff --git a/mindspore/lite/nnacl/fp32/splice_fp32.h b/mindspore/lite/nnacl/fp32/splice_fp32.h new file mode 100644 index 0000000000..42db661670 --- /dev/null +++ b/mindspore/lite/nnacl/fp32/splice_fp32.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_FP32_SPLICE_FP32_H_ +#define MINDSPORE_LITE_NNACL_FP32_SPLICE_FP32_H_ +#include <string.h> +#include "nnacl/splice_parameter.h" +#ifdef __cplusplus +extern "C" { +#endif + +void SpliceFp32(const float *src_data, int src_row, int src_col, const SpliceParameter *splice_parameter, + float *dst_data, int dst_row, int dst_col); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_FP32_SPLICE_FP32_H_ diff --git a/mindspore/lite/nnacl/fp32_grad/batch_norm.h b/mindspore/lite/nnacl/fp32_grad/batch_norm.h index ca64c6a5e6..9931819696 100644 --- a/mindspore/lite/nnacl/fp32_grad/batch_norm.h +++ b/mindspore/lite/nnacl/fp32_grad/batch_norm.h @@ -22,7 +22,6 @@ typedef struct BNGradParameter { OpParameter op_parameter_; float epsilon_; - float momentum_; } BNGradParameter; #ifdef __cplusplus diff --git a/mindspore/lite/nnacl/fp32_grad/softmax_grad.h b/mindspore/lite/nnacl/fp32_grad/softmax_grad.h index 06cd9cc733..85f4717b64 100644 --- a/mindspore/lite/nnacl/fp32_grad/softmax_grad.h +++ b/mindspore/lite/nnacl/fp32_grad/softmax_grad.h @@ -35,7 +35,7 @@ typedef struct SoftmaxCrossEntropyParameter { // other parameter int32_t batch_size_; unsigned int number_of_classes_; - int is_grad; + bool is_grad; } SoftmaxCrossEntropyParameter; void SoftmaxGrad(const float *input_ptr, const float *yt_ptr, float *output_ptr, float *sum_data, float *sum_mul, diff --git a/mindspore/lite/nnacl/gather_parameter.h b/mindspore/lite/nnacl/gather_parameter.h index d300970417..0a2d907b6d 100644 --- a/mindspore/lite/nnacl/gather_parameter.h +++ b/mindspore/lite/nnacl/gather_parameter.h @@ -23,7 +23,7 @@ typedef struct GatherParameter { // Primitive parameter OpParameter op_parameter_; int axis_; - int batchDims_; + int quant_type_; } GatherParameter; #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_NNACL_GATHER_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/infer/adam_infer.c b/mindspore/lite/nnacl/infer/adam_infer.c new file mode 100644 index 0000000000..b8543ef7f0 --- /dev/null +++ b/mindspore/lite/nnacl/infer/adam_infer.c @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "nnacl/infer/adam_infer.h" + +int AdamInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + if (10 != inputs_size) { + return NNACL_ERR; + } + + if (GetElementNum(inputs[0]) != GetElementNum(inputs[1]) || GetElementNum(inputs[0]) != GetElementNum(inputs[2]) || + GetElementNum(inputs[0]) != GetElementNum(inputs[9]) || GetElementNum(inputs[3]) != 1 || + GetElementNum(inputs[4]) != 1 || GetElementNum(inputs[5]) != 1 || GetElementNum(inputs[6]) != 1 || + GetElementNum(inputs[7]) != 1 || GetElementNum(inputs[8]) != 1) { + return NNACL_ERR; + } + if (outputs_size != 0) { + TensorC *out = outputs[0]; + SetDataTypeFormat(out, inputs[0]); + out->shape_size_ = 1; + out->shape_[0] = 1; + } + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/adam_infer.h b/mindspore/lite/nnacl/infer/adam_infer.h new file mode 100644 index 0000000000..f4ec666813 --- /dev/null +++ b/mindspore/lite/nnacl/infer/adam_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ADAM_INFER_H +#define MINDSPORE_LITE_NNACL_ADAM_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int AdamInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_ADAM_INFER_H diff --git a/mindspore/lite/nnacl/infer/add_sub_grad_infer.c b/mindspore/lite/nnacl/infer/add_sub_grad_infer.c new file mode 100644 index 0000000000..69d5580778 --- /dev/null +++ b/mindspore/lite/nnacl/infer/add_sub_grad_infer.c @@ -0,0 +1,63 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/add_sub_grad_infer.h" +#include "nnacl/infer/arithmetic_grad_infer.h" + +int AddSubGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 3, 2); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *dy = inputs[0]; + const TensorC *x1 = inputs[1]; + const TensorC *x2 = inputs[2]; + TensorC *dx1 = outputs[0]; + TensorC *dx2 = outputs[1]; + + ArithmeticGradParameter *param = (ArithmeticGradParameter *)parameter; + + int in_shape0[MAX_SHAPE_SIZE]; + size_t in_shape0_size = 0; + ShapeSet(in_shape0, &in_shape0_size, x1->shape_, x1->shape_size_); + int in_shape1[MAX_SHAPE_SIZE]; + size_t in_shape1_size = 0; + ShapeSet(in_shape1, &in_shape1_size, x2->shape_, x2->shape_size_); + int outShape[MAX_SHAPE_SIZE]; + size_t outShape_size = 0; + ShapeSet(outShape, &outShape_size, dy->shape_, dy->shape_size_); + + param->ndim_ = outShape_size; + param->x1_shape_size_ = param->ndim_; + param->x2_shape_size_ = param->ndim_; + param->dy_shape_size_ = param->ndim_; + int fill_dim_num0 = outShape_size - in_shape0_size; + int fill_dim_num1 = outShape_size - in_shape1_size; + int j0 = 0; + int j1 = 0; + for (unsigned int i = 0; i < outShape_size; i++) { + param->x1_shape_[i] = (i < fill_dim_num0) ? 1 : in_shape0[j0++]; + param->x2_shape_[i] = (i < fill_dim_num1) ? 1 : in_shape1[j1++]; + param->dy_shape_[i] = outShape[i]; + } + + SetShapeTensor(dx1, x1); + SetShapeTensor(dx2, x2); + dx1->data_type_ = dy->data_type_; + dx2->data_type_ = dy->data_type_; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/add_sub_grad_infer.h b/mindspore/lite/nnacl/infer/add_sub_grad_infer.h new file mode 100644 index 0000000000..4d3b959b42 --- /dev/null +++ b/mindspore/lite/nnacl/infer/add_sub_grad_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ADD_SUB_GRAD_INFER_H +#define MINDSPORE_LITE_NNACL_ADD_SUB_GRAD_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int AddSubGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_ADD_SUB_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/addn_infer.c b/mindspore/lite/nnacl/infer/addn_infer.c new file mode 100644 index 0000000000..e571d06e9c --- /dev/null +++ b/mindspore/lite/nnacl/infer/addn_infer.c @@ -0,0 +1,70 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/addn_infer.h" + +int AddnInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret == NNACL_NULL_PTR) { + return NNACL_NULL_PTR; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + if (inputs_size < 2) { + return NNACL_ERR; + } + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + size_t max_dims = input->shape_size_; + size_t max_dims_idx = 0; + + // determine max_dims + for (size_t i = 1; i < inputs_size; ++i) { + if (inputs[i]->shape_size_ > max_dims) { + max_dims = inputs[i]->shape_size_; + max_dims_idx = i; + } + } + ShapeSet(output->shape_, &output->shape_size_, inputs[max_dims_idx]->shape_, inputs[max_dims_idx]->shape_size_); + + // make sure all elements have the same size or 1 (broadcasting) in all dimensions + for (size_t i = 1; i < inputs_size; ++i) { + if ((inputs[i]->shape_size_ != max_dims) && (GetElementNum(inputs[i]) != GetElementNum(inputs[max_dims_idx]))) { + return NNACL_ERR; + } + if (inputs[i]->data_type_ != inputs[0]->data_type_) { + return NNACL_ERR; + } + } + + for (size_t d = 0; d < input->shape_size_; ++d) { + size_t max_dim = 0; + for (size_t i = 0; i < inputs_size; ++i) { + size_t shift = max_dims - inputs[i]->shape_size_; + size_t dim = (i < shift) ? 1 : inputs[i]->shape_[d]; + if (dim > max_dim) { + max_dim = dim; + } + } + output->shape_[d] = max_dim; // set the biggest dimension in the output tensor + } + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/addn_infer.h b/mindspore/lite/nnacl/infer/addn_infer.h new file mode 100644 index 0000000000..76f34944e8 --- /dev/null +++ b/mindspore/lite/nnacl/infer/addn_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ADDN_INFER_H +#define MINDSPORE_LITE_NNACL_ADDN_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int AddnInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_ADDN_INFER_H diff --git a/mindspore/lite/nnacl/infer/apply_momentum_infer.c b/mindspore/lite/nnacl/infer/apply_momentum_infer.c new file mode 100644 index 0000000000..f12207c59d --- /dev/null +++ b/mindspore/lite/nnacl/infer/apply_momentum_infer.c @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/apply_momentum_infer.h" + +int ApplyMomentumInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + if (inputs_size != 5) { + return NNACL_INPUT_TENSOR_ERROR; + } + + if (GetElementNum(inputs[0]) != GetElementNum(inputs[1]) || GetElementNum(inputs[0]) != GetElementNum(inputs[3]) || + GetElementNum(inputs[2]) != 1 || GetElementNum(inputs[4]) != 1) { + return NNACL_INPUT_TENSOR_ERROR; + } + if (outputs_size != 0) { + TensorC *out = outputs[0]; + if (out == NULL) { + return NNACL_NULL_PTR; + } + out->data_type_ = inputs[0]->data_type_; + out->format_ = inputs[0]->format_; + out->shape_size_ = 1; + out->shape_[0] = 1; + } + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/apply_momentum_infer.h b/mindspore/lite/nnacl/infer/apply_momentum_infer.h new file mode 100644 index 0000000000..a377b3a5e0 --- /dev/null +++ b/mindspore/lite/nnacl/infer/apply_momentum_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_APPLY_MOMENTUM_INFER_H +#define MINDSPORE_LITE_NNACL_APPLY_MOMENTUM_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ApplyMomentumInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_APPLY_MOMENTUM_INFER_H diff --git a/mindspore/lite/nnacl/infer/argmin_max_infer.c b/mindspore/lite/nnacl/infer/argmin_max_infer.c new file mode 100644 index 0000000000..362023f73c --- /dev/null +++ b/mindspore/lite/nnacl/infer/argmin_max_infer.c @@ -0,0 +1,70 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/argmin_max_infer.h" + +int ArgMinMaxInferShape(const TensorC *const *inputs, const size_t inputs_size, TensorC **outputs, + const size_t outputs_size, OpParameter *parameter) { + int check_ret_1 = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1); + int check_ret_2 = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 2); + if (check_ret_1 != NNACL_OK && check_ret_2 != NNACL_OK) { + return NNACL_ERR; + } + ArgMinMaxParameter *param = (ArgMinMaxParameter *)parameter; + const TensorC *input = inputs[0]; + TensorC *output_1 = NULL; + TensorC *output_2 = NULL; + if (outputs_size == 2) { + output_1 = outputs[0]; + output_2 = outputs[1]; + } else if (param->out_value_) { + output_2 = outputs[0]; + } else { + output_1 = outputs[0]; + } + + if (output_1 != NULL) { + output_1->format_ = input->format_; + output_1->data_type_ = kNumberTypeInt32; + } + if (output_2 != NULL) { + SetDataTypeFormat(output_2, input); + } + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + ShapeSet(output_shape, &output_shape_size, input->shape_, input->shape_size_); + size_t input_shape_size = input->shape_size_; + int axis = param->axis_ < 0 ? param->axis_ + (int)input_shape_size : param->axis_; + if (axis >= input_shape_size || axis < 0) { + return NNACL_PARAM_INVALID; + } + if (param->topk_ == 1 && !param->keep_dims_) { + ShapeErase(output_shape, &output_shape_size, axis); + } else { + output_shape[axis] = param->topk_; + } + + if (output_1 != NULL) { + SetShapeArray(output_1, output_shape, output_shape_size); + } + if (output_2 != NULL) { + SetShapeArray(output_2, output_shape, output_shape_size); + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/argmin_max_infer.h b/mindspore/lite/nnacl/infer/argmin_max_infer.h new file mode 100644 index 0000000000..42726b3c57 --- /dev/null +++ b/mindspore/lite/nnacl/infer/argmin_max_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ARGMAX_INFER_H +#define MINDSPORE_LITE_NNACL_ARGMAX_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/arg_min_max_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ArgMinMaxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_ARGMAX_INFER_H diff --git a/mindspore/lite/nnacl/infer/arithmetic_compare_infer.c b/mindspore/lite/nnacl/infer/arithmetic_compare_infer.c new file mode 100644 index 0000000000..9d3c812285 --- /dev/null +++ b/mindspore/lite/nnacl/infer/arithmetic_compare_infer.c @@ -0,0 +1,28 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/arithmetic_compare_infer.h" + +int ArithmeticCompareInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + int res = ArithmeticInferShape(inputs, inputs_size, outputs, outputs_size, parameter); + TensorC *output = outputs[0]; + if (output == NULL) { + return NNACL_NULL_PTR; + } + output->data_type_ = kNumberTypeBool; + return res; +} diff --git a/mindspore/lite/nnacl/infer/arithmetic_compare_infer.h b/mindspore/lite/nnacl/infer/arithmetic_compare_infer.h new file mode 100644 index 0000000000..2934cdce95 --- /dev/null +++ b/mindspore/lite/nnacl/infer/arithmetic_compare_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ARITHMETIC_COMPARE_INFER_H +#define MINDSPORE_LITE_NNACL_ARITHMETIC_COMPARE_INFER_H + +#include "nnacl/infer/arithmetic_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ArithmeticCompareInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_ARITHMETIC_COMPARE_INFER_H diff --git a/mindspore/lite/nnacl/infer/arithmetic_grad_infer.c b/mindspore/lite/nnacl/infer/arithmetic_grad_infer.c new file mode 100644 index 0000000000..591732a7c6 --- /dev/null +++ b/mindspore/lite/nnacl/infer/arithmetic_grad_infer.c @@ -0,0 +1,97 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/arithmetic_grad_infer.h" + +/* + * the Arithmetic Grad op include AddGrad, SubGrad, MulGrad, DivGrad, MaximumGrad, MinimumGrad + * according to the arithmetic_fp32.h now + * the MaximumGrad, MinimumGrad run through MaximumGradInfershape + * the AddGrad, SubGrad run through AddSubGradInfershape + * the others run through this function + * */ +int ArithmeticGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 3, 2); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *dy = inputs[0]; + const TensorC *x1 = inputs[1]; + const TensorC *x2 = inputs[2]; + TensorC *dx1 = outputs[0]; + TensorC *dx2 = outputs[1]; + + ArithmeticGradParameter *param = (ArithmeticGradParameter *)parameter; + + int in_shape0[MAX_SHAPE_SIZE]; + size_t in_shape0_size = 0; + ShapeSet(in_shape0, &in_shape0_size, x1->shape_, x1->shape_size_); + int in_shape1[MAX_SHAPE_SIZE]; + size_t in_shape1_size = 0; + ShapeSet(in_shape1, &in_shape1_size, x2->shape_, x2->shape_size_); + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + ShapeSet(out_shape, &out_shape_size, dy->shape_, dy->shape_size_); + + if (GetElementNum(dx1) < GetElementNum(dx2)) { + param->ndim_ = in_shape1_size; + param->x1_shape_size_ = param->ndim_; + param->x2_shape_size_ = param->ndim_; + param->dy_shape_size_ = param->ndim_; + int fill_dim_num = in_shape1_size - in_shape0_size; // This will not work for batch! + int j = 0; + for (unsigned int i = 0; i < in_shape1_size; i++) { + if (i < fill_dim_num) { + param->x2_shape_[i] = 1; + } else { + param->x2_shape_[i] = in_shape0[j++]; + } + param->x1_shape_[i] = in_shape1[i]; + param->dy_shape_[i] = out_shape[i]; + } + } else if (GetElementNum(dx2) < GetElementNum(dx1)) { + param->ndim_ = in_shape0_size; + param->x1_shape_size_ = param->ndim_; + param->x2_shape_size_ = param->ndim_; + param->dy_shape_size_ = param->ndim_; + param->broadcasting_ = true; + int j = 0; + int fill_dim_num = in_shape0_size - in_shape1_size; + for (unsigned int i = 0; i < in_shape0_size; i++) { + if (i < fill_dim_num) { + param->x2_shape_[i] = 1; + } else { + param->x2_shape_[i] = in_shape1[j++]; + } + param->x1_shape_[i] = in_shape0[i]; + param->dy_shape_[i] = out_shape[i]; + } + } else { + param->broadcasting_ = false; + for (unsigned int i = 0; i < in_shape0_size; i++) { + param->x2_shape_[i] = in_shape1[i]; + param->x1_shape_[i] = in_shape0[i]; + param->dy_shape_[i] = out_shape[i]; + } + } + + SetShapeTensor(dx1, x1); + SetShapeTensor(dx2, x2); + dx1->data_type_ = dy->data_type_; + dx2->data_type_ = dy->data_type_; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/arithmetic_grad_infer.h b/mindspore/lite/nnacl/infer/arithmetic_grad_infer.h new file mode 100644 index 0000000000..04323116ae --- /dev/null +++ b/mindspore/lite/nnacl/infer/arithmetic_grad_infer.h @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ARITHMETIC_GRAD_INFER_H +#define MINDSPORE_LITE_NNACL_ARITHMETIC_GRAD_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct ArithmeticGradParameter { + OpParameter op_parameter_; + int type_; + bool broadcasting_; // default false + int ndim_; + // std::vector<int> dy_shape_; + int dy_shape_[MAX_SHAPE_SIZE]; + size_t dy_shape_size_; + int x1_shape_[MAX_SHAPE_SIZE]; + size_t x1_shape_size_; + int x2_shape_[MAX_SHAPE_SIZE]; + size_t x2_shape_size_; +} ArithmeticGradParameter; + +int ArithmeticGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_ARITHMETIC_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/arithmetic_infer.c b/mindspore/lite/nnacl/infer/arithmetic_infer.c new file mode 100644 index 0000000000..466813b8a1 --- /dev/null +++ b/mindspore/lite/nnacl/infer/arithmetic_infer.c @@ -0,0 +1,117 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/arithmetic_infer.h" + +int ArithmeticInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 2, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + + ArithmeticParameter *param = (ArithmeticParameter *)parameter; + param->broadcasting_ = false; + + const TensorC *input0 = inputs[0]; + const TensorC *input1 = inputs[1]; + TensorC *output = outputs[0]; + + const int *input_shape0 = input0->shape_; + size_t input_shape0_size = input0->shape_size_; + const int *input_shape1 = input1->shape_; + size_t input_shape1_size = input1->shape_size_; + output->format_ = input0->format_; + output->data_type_ = input0->data_type_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + if (input_shape0_size > 10 || input_shape1_size > 10) { + return NNACL_ERR; + } + int in_shape0_[10]; + int in_shape1_[10]; + int out_shape_[10]; + + int ndim_ = input_shape0_size; + if (input_shape0_size < input_shape1_size) { + ndim_ = input_shape1_size; + int fill_dim_num = input_shape1_size - input_shape0_size; + int j = 0; + for (size_t i = 0; i < input_shape1_size; i++) { + if (i < fill_dim_num) { + in_shape0_[i] = 1; + } else { + in_shape0_[i] = input_shape0[j++]; + } + in_shape1_[i] = input_shape1[i]; + } + // format = input0->format(); + } else if (input_shape0_size > input_shape1_size) { + ndim_ = input_shape0_size; + int fill_dim_num = input_shape0_size - input_shape1_size; + int j = 0; + for (size_t i = 0; i < input_shape0_size; i++) { + if (i < fill_dim_num) { + in_shape1_[i] = 1; + } else { + in_shape1_[i] = input_shape1[j++]; + } + in_shape0_[i] = input_shape0[i]; + } + } else { + for (size_t i = 0; i < input_shape0_size; i++) { + in_shape1_[i] = input_shape1[i]; + in_shape0_[i] = input_shape0[i]; + } + } + + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + for (int i = 0; i < ndim_; i++) { + if (in_shape0_[i] != in_shape1_[i]) { + if (in_shape0_[i] == 1) { + out_shape_[i] = in_shape1_[i]; + } else if (in_shape1_[i] == 1) { + out_shape_[i] = in_shape0_[i]; + } else { + return NNACL_ERR; + } + param->broadcasting_ = true; + } else { + out_shape_[i] = in_shape0_[i]; + } + output_shape[output_shape_size] = out_shape_[i]; + output_shape_size++; + } + + SetShapeArray(output, output_shape, output_shape_size); + + param->ndim_ = ndim_; + memcpy(param->in_shape0_, in_shape0_, ndim_ * sizeof(int)); + memcpy(param->in_shape1_, in_shape1_, ndim_ * sizeof(int)); + memcpy(param->out_shape_, out_shape_, ndim_ * sizeof(int)); + + param->in_elements_num0_ = 1; + param->in_elements_num1_ = 1; + param->out_elements_num_ = 1; + for (int i = 0; i < ndim_; i++) { + param->in_elements_num0_ *= param->in_shape0_[i]; + param->in_elements_num1_ *= param->in_shape1_[i]; + param->out_elements_num_ *= param->out_shape_[i]; + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/arithmetic_infer.h b/mindspore/lite/nnacl/infer/arithmetic_infer.h new file mode 100644 index 0000000000..c7ee565643 --- /dev/null +++ b/mindspore/lite/nnacl/infer/arithmetic_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ARITHMETIC_INFER_H +#define MINDSPORE_LITE_NNACL_ARITHMETIC_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/arithmetic.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ArithmeticInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outpus_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_ARITHMETIC_INFER_H diff --git a/mindspore/lite/nnacl/infer/assert_op_infer.c b/mindspore/lite/nnacl/infer/assert_op_infer.c new file mode 100644 index 0000000000..5fabc13637 --- /dev/null +++ b/mindspore/lite/nnacl/infer/assert_op_infer.c @@ -0,0 +1,22 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/assert_op_infer.h" + +int AssertOpInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/assert_op_infer.h b/mindspore/lite/nnacl/infer/assert_op_infer.h new file mode 100644 index 0000000000..4e03466f11 --- /dev/null +++ b/mindspore/lite/nnacl/infer/assert_op_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ASSERT_OP_INFER_H +#define MINDSPORE_LITE_NNACL_ASSERT_OP_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int AssertOpInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_ASSERT_OP_INFER_H diff --git a/mindspore/lite/nnacl/infer/assign_add_infer.c b/mindspore/lite/nnacl/infer/assign_add_infer.c new file mode 100644 index 0000000000..807c9fd7b6 --- /dev/null +++ b/mindspore/lite/nnacl/infer/assign_add_infer.c @@ -0,0 +1,34 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/assign_add_infer.h" + +int AssignAddInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *x = inputs[0]; + const TensorC *y = inputs[1]; + TensorC *out = outputs[0]; + if (x->data_type_ != y->data_type_) { + return NNACL_ERR; + } + SetDataTypeFormat(out, x); + SetShapeTensor(out, x); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/assign_add_infer.h b/mindspore/lite/nnacl/infer/assign_add_infer.h new file mode 100644 index 0000000000..0290e88b57 --- /dev/null +++ b/mindspore/lite/nnacl/infer/assign_add_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ASSIGN_ADD_INFER_H +#define MINDSPORE_LITE_NNACL_ASSIGN_ADD_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int AssignAddInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_ASSIGN_ADD_INFER_H diff --git a/mindspore/lite/nnacl/infer/assign_infer.c b/mindspore/lite/nnacl/infer/assign_infer.c new file mode 100644 index 0000000000..fcdf7a0ef5 --- /dev/null +++ b/mindspore/lite/nnacl/infer/assign_infer.c @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/assign_infer.h" + +int AssignInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullInputSize(inputs, inputs_size, outputs, outputs_size, parameter, 2); + if (check_ret != NNACL_OK) { + return check_ret; + } + + if (GetElementNum(inputs[0]) != GetElementNum(inputs[1])) { + return NNACL_ERR; + } + + if (outputs_size != 0) { + TensorC *out = outputs[0]; + SetDataTypeFormat(out, inputs[0]); + out->shape_size_ = 1; + out->shape_[0] = 1; + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/assign_infer.h b/mindspore/lite/nnacl/infer/assign_infer.h new file mode 100644 index 0000000000..fe276b79e3 --- /dev/null +++ b/mindspore/lite/nnacl/infer/assign_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ASSIGN_INFER_H +#define MINDSPORE_LITE_NNACL_ASSIGN_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int AssignInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_ASSIGN_INFER_H diff --git a/mindspore/lite/nnacl/infer/audio_spectrogram_infer.c b/mindspore/lite/nnacl/infer/audio_spectrogram_infer.c new file mode 100644 index 0000000000..6f566f89f0 --- /dev/null +++ b/mindspore/lite/nnacl/infer/audio_spectrogram_infer.c @@ -0,0 +1,71 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/audio_spectrogram_infer.h" + +int Log2Ceil(uint32_t length) { + if (length == 0) { + return -1; + } + int floor = 0; + for (int i = 4; i >= 0; --i) { + const int shift = (1 << i); + uint32_t tmp = length >> shift; + if (tmp != 0) { + length = tmp; + floor += shift; + } + } + return length == (length & ~(length - 1)) ? floor : floor + 1; +} + +uint32_t GetFftLength(uint32_t length) { + int shift = Log2Ceil(length); + return 1 << shift; +} + +int AudioSpectrogramInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + if (input->shape_size_ != 2) { + return NNACL_ERR; + } + AudioSpectrogramParameter *param = (AudioSpectrogramParameter *)parameter; + if (param->window_size_ < 2) { + return NNACL_ERR; + } + if (param->stride_ < 1) { + return NNACL_ERR; + } + int output_shape[3]; + output_shape[0] = input->shape_[1]; + int sample_sub_window = input->shape_[0] - param->window_size_; + output_shape[1] = sample_sub_window < 0 ? 0 : 1 + sample_sub_window / param->stride_; + // compute fft length + int fft_length = GetFftLength(param->window_size_); + output_shape[2] = fft_length / 2 + 1; + SetShapeArray(output, output_shape, 3); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/audio_spectrogram_infer.h b/mindspore/lite/nnacl/infer/audio_spectrogram_infer.h new file mode 100644 index 0000000000..030883c8b6 --- /dev/null +++ b/mindspore/lite/nnacl/infer/audio_spectrogram_infer.h @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_AUDIO_SPECTROGRAM_INFER_H +#define MINDSPORE_LITE_NNACL_AUDIO_SPECTROGRAM_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct AudioSpectrogramParameter { + OpParameter op_parameter_; + int window_size_; + int stride_; +} AudioSpectrogramParameter; + +int AudioSpectrogramInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_AUDIO_SPECTROGRAM_INFER_H diff --git a/mindspore/lite/nnacl/infer/batch_to_space_infer.c b/mindspore/lite/nnacl/infer/batch_to_space_infer.c new file mode 100644 index 0000000000..b7910a7257 --- /dev/null +++ b/mindspore/lite/nnacl/infer/batch_to_space_infer.c @@ -0,0 +1,137 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/batch_to_space_infer.h" + +int SetOutputShapeFromParam(const TensorC *const *inputs, TensorC **outputs, OpParameter *parameter) { + int input_shape[MAX_SHAPE_SIZE]; + size_t input_shape_size = 0; + ShapeSet(input_shape, &input_shape_size, inputs[0]->shape_, inputs[0]->shape_size_); + + if (input_shape_size != 4) { + return NNACL_PARAM_INVALID; + } + + BatchToSpaceParameter *param = (BatchToSpaceParameter *)parameter; + int32_t *block_shape = param->block_shape_; + int32_t *crops = param->crops_; + int mul_block_shape = 1; + + for (size_t i = 0; i < 2; ++i) { + if (block_shape[i] <= 0) { + return NNACL_PARAM_INVALID; + } + if (input_shape[kNHWC_N] % block_shape[i]) { + return NNACL_ERR; + } + mul_block_shape *= block_shape[i]; + } + + if (input_shape[kNHWC_N] < mul_block_shape) { + return NNACL_PARAM_INVALID; + } + for (size_t i = 0; i < 4; ++i) { + if (crops[i] < 0) { + return NNACL_PARAM_INVALID; + } + } + int32_t output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = input_shape_size; + output_shape[kNHWC_N] = input_shape[kNHWC_N] / mul_block_shape; + output_shape[kNHWC_H] = input_shape[kNHWC_H] * block_shape[0] - crops[0] - crops[1]; + output_shape[kNHWC_W] = input_shape[kNHWC_W] * block_shape[1] - crops[2] - crops[3]; + output_shape[kNHWC_C] = input_shape[kNHWC_C]; + SetShapeArray(outputs[0], output_shape, output_shape_size); + return NNACL_OK; +} + +int SetOutputShapeFromInput(const TensorC *const *inputs, TensorC **outputs) { + int input_shape[MAX_SHAPE_SIZE]; + size_t input_shape_size = 0; + ShapeSet(input_shape, &input_shape_size, inputs[0]->shape_, inputs[0]->shape_size_); + if (input_shape_size != 4) { + return NNACL_PARAM_INVALID; + } + int *block_shape = (int *)(inputs[1]->data_); + int *crops = (int *)(inputs[2]->data_); + if (GetElementNum(inputs[1]) != 2) { + return NNACL_PARAM_INVALID; + } + if (GetElementNum(inputs[2]) != 4) { + return NNACL_PARAM_INVALID; + } + int mul_block_shape_ = 1; + + for (size_t i = 0; i < 2; ++i) { + if (block_shape[i] <= 0) { + return NNACL_PARAM_INVALID; + } + if (input_shape[kNHWC_N] % block_shape[i]) { + return 1; + } + mul_block_shape_ *= block_shape[i]; + } + + if (input_shape[kNHWC_N] < mul_block_shape_) { + return NNACL_PARAM_INVALID; + } + for (size_t i = 0; i < 4; ++i) { + if (crops[i] < 0) { + return NNACL_PARAM_INVALID; + } + } + int32_t output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = input_shape_size; + output_shape[kNHWC_N] = input_shape[kNHWC_N] / mul_block_shape_; + output_shape[kNHWC_H] = input_shape[kNHWC_H] * block_shape[0] - crops[0] - crops[1]; + output_shape[kNHWC_W] = input_shape[kNHWC_W] * block_shape[1] - crops[2] - crops[3]; + output_shape[kNHWC_C] = input_shape[kNHWC_C]; + SetShapeArray(outputs[0], output_shape, output_shape_size); + return NNACL_OK; +} + +int BatchToSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + if (outputs_size != 1 || (inputs_size != 1 && inputs_size != 3)) { + return NNACL_PARAM_INVALID; + } + + const TensorC *input = inputs[0]; + if (input->format_ != Format_NHWC) { + return NNACL_ERR; + } + SetDataTypeFormat(outputs[0], input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + if (inputs_size == 1) { + int ret = SetOutputShapeFromParam(inputs, outputs, parameter); + return ret; + } + if (inputs_size == 3) { + if (inputs[0]->data_ == NULL) { + return NNACL_INFER_INVALID; + } + if (inputs[1]->data_ == NULL || inputs[2]->data_ == NULL) { + return NNACL_ERR; + } + int ret = SetOutputShapeFromInput(inputs, outputs); + return ret; + } + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/batch_to_space_infer.h b/mindspore/lite/nnacl/infer/batch_to_space_infer.h new file mode 100644 index 0000000000..261a1f76bf --- /dev/null +++ b/mindspore/lite/nnacl/infer/batch_to_space_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_INFER_H +#define MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/batch_to_space.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int BatchToSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_BATCH_TO_SPACE_INFER_H diff --git a/mindspore/lite/nnacl/infer/bias_grad_infer.c b/mindspore/lite/nnacl/infer/bias_grad_infer.c new file mode 100644 index 0000000000..7cf1d678e7 --- /dev/null +++ b/mindspore/lite/nnacl/infer/bias_grad_infer.c @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/bias_grad_infer.h" + +int BiasGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *in0 = inputs[0]; + TensorC *out = outputs[0]; + + int inshape[MAX_SHAPE_SIZE]; + size_t inshape_size = 0; + ShapeSet(inshape, &inshape_size, in0->shape_, in0->shape_size_); + int ndim = inshape_size; + for (int i = 0; i < ndim - 1; i++) { + inshape[i] = 1; + } + SetDataTypeFormat(out, in0); + SetShapeArray(out, inshape, inshape_size); + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/bias_grad_infer.h b/mindspore/lite/nnacl/infer/bias_grad_infer.h new file mode 100644 index 0000000000..2b40694d09 --- /dev/null +++ b/mindspore/lite/nnacl/infer/bias_grad_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_BIAS_GRAD_INFER_H +#define MINDSPORE_LITE_NNACL_BIAS_GRAD_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int BiasGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_BIAS_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/binary_cross_entropy_infer.c b/mindspore/lite/nnacl/infer/binary_cross_entropy_infer.c new file mode 100644 index 0000000000..55a6342138 --- /dev/null +++ b/mindspore/lite/nnacl/infer/binary_cross_entropy_infer.c @@ -0,0 +1,33 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/binary_cross_entropy_infer.h" + +int BinaryCrossEntropyInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + const TensorC *x = inputs[0]; + TensorC *out = outputs[0]; + SetDataTypeFormat(out, x); + BinaryCrossEntropyParameter *param = (BinaryCrossEntropyParameter *)parameter; + int reduction = param->reduction; + if (reduction == 1 || reduction == 2) { + out->shape_size_ = 1; + out->shape_[0] = 1; + } else { + SetShapeTensor(out, x); + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/binary_cross_entropy_infer.h b/mindspore/lite/nnacl/infer/binary_cross_entropy_infer.h new file mode 100644 index 0000000000..6727303255 --- /dev/null +++ b/mindspore/lite/nnacl/infer/binary_cross_entropy_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_BINARY_CROSS_ENTROPY_INFER_H +#define MINDSPORE_LITE_NNACL_BINARY_CROSS_ENTROPY_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32_grad/binary_cross_entropy.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int BinaryCrossEntropyInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_BINARY_CROSS_ENTROPY_INFER_H diff --git a/mindspore/lite/nnacl/infer/bn_grad_infer.c b/mindspore/lite/nnacl/infer/bn_grad_infer.c new file mode 100644 index 0000000000..9d4a921ae1 --- /dev/null +++ b/mindspore/lite/nnacl/infer/bn_grad_infer.c @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/bn_grad_infer.h" + +int BnGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 6, 3); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *in = inputs[1]; + const TensorC *scale = inputs[2]; + if (in->shape_size_ != 4) { + return NNACL_INPUT_TENSOR_ERROR; + } + + SetShapeTensor(outputs[0], in); + SetDataTypeFormat(outputs[0], in); + SetShapeTensor(outputs[1], scale); + SetDataTypeFormat(outputs[1], scale); + SetShapeTensor(outputs[2], scale); + SetDataTypeFormat(outputs[2], scale); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/bn_grad_infer.h b/mindspore/lite/nnacl/infer/bn_grad_infer.h new file mode 100644 index 0000000000..a28f5b2f55 --- /dev/null +++ b/mindspore/lite/nnacl/infer/bn_grad_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_BN_GRAD_INFER_H +#define MINDSPORE_LITE_NNACL_BN_GRAD_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int BnGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_BN_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/broadcast_to_infer.c b/mindspore/lite/nnacl/infer/broadcast_to_infer.c new file mode 100644 index 0000000000..0580e3b301 --- /dev/null +++ b/mindspore/lite/nnacl/infer/broadcast_to_infer.c @@ -0,0 +1,68 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/broadcast_to_infer.h" + +int BroadcastToInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + if (inputs_size != 1 && inputs_size != 2) { + return NNACL_ERR; + } + if (outputs_size != 1) { + return NNACL_ERR; + } + + const TensorC *input = inputs[0]; + SetDataTypeFormat(outputs[0], input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + BroadcastToParameter *param = (BroadcastToParameter *)parameter; + int32_t dst_shape[MAX_SHAPE_SIZE]; + size_t dst_shape_size = param->shape_size_; + for (size_t i = 0; i < dst_shape_size; i++) { + dst_shape[i] = param->shape_[i]; + } + for (size_t i = 0; i < dst_shape_size; ++i) { + if (dst_shape[i] == -1) { + dst_shape[i] = inputs[0]->shape_[i]; + } + } + const int *input_shape = input->shape_; + size_t input_shape_size = input->shape_size_; + int shape[MAX_SHAPE_SIZE]; + size_t shape_size = dst_shape_size; + int input_shape_index = input_shape_size - 1; + if (input_shape_size > dst_shape_size) { + return NNACL_ERR; + } + + for (int i = dst_shape_size - 1; i >= 0; --i) { + if (dst_shape[i] < 0) { + return NNACL_ERR; + } + if (input_shape_index >= 0) { + int dim = input_shape[input_shape_index]; + if (dim != dst_shape[i] && dim != 1) { + return NNACL_ERR; + } + } + shape[i] = dst_shape[i]; + --input_shape_index; + } + SetShapeArray(outputs[0], shape, shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/broadcast_to_infer.h b/mindspore/lite/nnacl/infer/broadcast_to_infer.h new file mode 100644 index 0000000000..a7b8630a7a --- /dev/null +++ b/mindspore/lite/nnacl/infer/broadcast_to_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_BROADCAST_TO_INFER_H +#define MINDSPORE_LITE_NNACL_BROADCAST_TO_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/broadcast_to_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int BroadcastToInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outpus_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_BROADCAST_TO_INFER_H diff --git a/mindspore/lite/nnacl/infer/cast_infer.c b/mindspore/lite/nnacl/infer/cast_infer.c new file mode 100644 index 0000000000..8b84d95b1b --- /dev/null +++ b/mindspore/lite/nnacl/infer/cast_infer.c @@ -0,0 +1,41 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/cast_infer.h" + +int CastInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullOutputSize(inputs, inputs_size, outputs, outputs_size, parameter, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + output->format_ = input->format_; + const TensorC *dst_type = inputs[1]; + output->data_type_ = *((int *)dst_type->data_); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + if (input->data_type_ != kNumberTypeBool && input->data_type_ != kNumberTypeUInt8 && + input->data_type_ != kNumberTypeInt8 && input->data_type_ != kNumberTypeInt32 && + input->data_type_ != kNumberTypeFloat32 && input->data_type_ != kNumberTypeFloat16) { + return NNACL_INPUT_TENSOR_ERROR; + } + + SetShapeTensor(output, input); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/cast_infer.h b/mindspore/lite/nnacl/infer/cast_infer.h new file mode 100644 index 0000000000..6c669c7ca0 --- /dev/null +++ b/mindspore/lite/nnacl/infer/cast_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_CAST_INFER_H +#define MINDSPORE_LITE_NNACL_CAST_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int CastInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_CAST_INFER_H diff --git a/mindspore/lite/nnacl/infer/common_infer.c b/mindspore/lite/nnacl/infer/common_infer.c new file mode 100644 index 0000000000..8ec609136e --- /dev/null +++ b/mindspore/lite/nnacl/infer/common_infer.c @@ -0,0 +1,455 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use tensor file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "nnacl/infer/common_infer.h" +#include <stdlib.h> +#include <string.h> + +int FreeTensorListData(TensorListC *tensor_list) { + // del each tensor in tensors_ and clear tensors_ + if (tensor_list->element_num_ == 0) { + return NNACL_OK; + } + for (int i = 0; i < tensor_list->element_num_; ++i) { + tensor_list->tensors_[i] = NULL; + } + return NNACL_OK; +} + +int MallocTensorListData(TensorListC *tensor_list, TypeIdC dtype, vvector *tensor_shape) { + // This function will create a new tensors_ + // Your must to set shape(param2: tensor_shape) and data_type_(tensors_data_type_ = param1: dtype) of each tensor in + // tensors_. After that, you need to call function:MallocData to malloc data buf of each tensor in tensors_. + + if (tensor_list->element_num_ == 0) { + return NNACL_OK; + } + if (((size_t)(tensor_list->element_num_)) != tensor_shape->size_) { + return NNACL_ERR; + } + tensor_list->tensors_data_type_ = dtype; + tensor_list->tensors_ = (TensorC **)malloc(tensor_list->element_num_ * sizeof(TensorC *)); // free in infer_manager + if (tensor_list->tensors_ == NULL) { + return NNACL_NULL_PTR; + } + memset(tensor_list->tensors_, 0, tensor_list->element_num_ * sizeof(TensorC *)); + for (int i = 0; i < tensor_list->element_num_; ++i) { + TensorC *tensor_ptr = (TensorC *)malloc(sizeof(TensorC)); + if (tensor_ptr == NULL) { + return NNACL_ERR; + } + memset(tensor_ptr, 0, sizeof(TensorC)); + tensor_ptr->format_ = Format_NHWC; + tensor_ptr->data_type_ = dtype; + ShapeSet(tensor_ptr->shape_, &(tensor_ptr->shape_size_), tensor_shape->shape_[i], tensor_shape->shape_size_[i]); + tensor_list->tensors_[i] = tensor_ptr; + } + return NNACL_OK; +} + +int TensorListMergeShape(int *element_shape, size_t *element_shape_size, const int *tmp, size_t tmp_size) { + if (*element_shape_size >= 255 || element_shape[0] == -1) { + ShapeSet(element_shape, element_shape_size, tmp, tmp_size); + return NNACL_OK; + } + if (*element_shape_size != tmp_size) { + return NNACL_ERR; + } + for (size_t j = 0; j < tmp_size; ++j) { + if (element_shape[j] >= 0 && tmp[j] >= 0 && element_shape[j] != tmp[j]) { + return NNACL_ERR; + } + element_shape[j] = element_shape[j] >= 0 ? element_shape[j] : tmp[j]; + } + return NNACL_OK; +} + +bool TensorListIsFullyDefined(int *shape, size_t shape_size) { + for (size_t i = 0; i < shape_size; ++i) { + if (shape[i] < 0) { + return false; + } + } + return true; +} + +int CheckAugmentNull(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + for (size_t i = 0; i < inputs_size; i++) { + if (inputs[i] == NULL) { + return NNACL_NULL_PTR; + } + } + for (size_t i = 0; i < outputs_size; i++) { + if (outputs[i] == NULL) { + return NNACL_NULL_PTR; + } + } + if (parameter == NULL) { + return NNACL_NULL_PTR; + } + return NNACL_OK; +} + +int CheckAugmentNullSize(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter, size_t inputs_size_obj, size_t outputs_size_obj) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret == NNACL_NULL_PTR) { + return NNACL_NULL_PTR; + } + if (inputs_size != inputs_size_obj || outputs_size != outputs_size_obj) { + return NNACL_INPUT_TENSOR_ERROR; + } + return NNACL_OK; +} + +int CheckAugmentNullSizeInputTwo(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter, size_t inputs_size_obj_0, + size_t inputs_size_obj_1, size_t outputs_size_obj) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret == NNACL_NULL_PTR) { + return NNACL_NULL_PTR; + } + if ((inputs_size != inputs_size_obj_0 && inputs_size != inputs_size_obj_1) || outputs_size != outputs_size_obj) { + return NNACL_INPUT_TENSOR_ERROR; + } + return NNACL_OK; +} + +int CheckAugmentNullInputSize(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter, size_t inputs_size_obj) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret == NNACL_NULL_PTR) { + return NNACL_NULL_PTR; + } + if (inputs_size != inputs_size_obj) { + return NNACL_INPUT_TENSOR_ERROR; + } + return NNACL_OK; +} + +int CheckAugmentNullOutputSize(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter, size_t outputs_size_obj) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret == NNACL_NULL_PTR) { + return NNACL_NULL_PTR; + } + if (outputs_size != outputs_size_obj) { + return NNACL_INPUT_TENSOR_ERROR; + } + return NNACL_OK; +} + +int SetShapeTensor(TensorC *dst, const TensorC *src) { + for (size_t i = 0; i < src->shape_size_; i++) { + dst->shape_[i] = src->shape_[i]; + } + dst->shape_size_ = src->shape_size_; + return NNACL_OK; +} + +int SetShapeArray(TensorC *dst, int *src, size_t src_size) { + for (size_t i = 0; i < src_size; i++) { + dst->shape_[i] = src[i]; + } + dst->shape_size_ = src_size; + return NNACL_OK; +} + +void SetDataTypeFormat(TensorC *dst, const TensorC *src) { + dst->format_ = src->format_; + dst->data_type_ = src->data_type_; +} + +int GetBatch(const TensorC *tensor) { + if (tensor->shape_size_ != 4 && tensor->shape_size_ != 2) { + return -1; + } + switch (tensor->format_) { + case Format_NHWC: + case Format_NHWC4: + case Format_NCHW: + case Format_NC4HW4: + case Format_KCHW: + case Format_KHWC: + case Format_NC: + case Format_NC4: + return tensor->shape_[0]; + case Format_HWCK: + case Format_CHWK: + return tensor->shape_[3]; + case Format_HWKC: + return tensor->shape_[2]; + case Format_CKHW: + return tensor->shape_[1]; + default: + return -1; + } +} +int GetHeight(const TensorC *tensor) { + if (tensor->shape_size_ != 4 && tensor->shape_size_ != 2) { + return -1; + } + switch (tensor->format_) { + case Format_NCHW: + case Format_KCHW: + case Format_CKHW: + return tensor->shape_[2]; + case Format_NHWC: + case Format_NHWC4: + case Format_NC4HW4: + case Format_KHWC: + case Format_CHWK: + return tensor->shape_[1]; + case Format_HWCK: + case Format_HWKC: + case Format_HW: + case Format_HW4: + return tensor->shape_[0]; + default: + return -1; + } +} +int GetWidth(const TensorC *tensor) { + if (tensor->shape_size_ != 4 && tensor->shape_size_ != 2) { + return -1; + } + switch (tensor->format_) { + case Format_NCHW: + case Format_KCHW: + case Format_CKHW: + return tensor->shape_[3]; + case Format_KHWC: + case Format_NHWC: + case Format_NHWC4: + case Format_NC4HW4: + case Format_CHWK: + return tensor->shape_[2]; + case Format_HWCK: + case Format_HWKC: + case Format_HW: + case Format_HW4: + return tensor->shape_[1]; + default: + return -1; + } +} +int GetChannel(const TensorC *tensor) { + if (tensor->shape_size_ != 4 && tensor->shape_size_ != 2) { + return -1; + } + switch (tensor->format_) { + case Format_NCHW: + case Format_KCHW: + case Format_NC: + case Format_NC4: + return tensor->shape_[1]; + case Format_HWCK: + return tensor->shape_[2]; + case Format_HWKC: + case Format_NHWC: + case Format_NHWC4: + case Format_NC4HW4: + case Format_KHWC: + return tensor->shape_[3]; + case Format_CKHW: + case Format_CHWK: + return tensor->shape_[0]; + default: + return -1; + } +} + +int GetElementNum(const TensorC *tensor) { + if (tensor->shape_size_ == 0) { + return 1; // scalar mode + } + int res = 1; + for (size_t i = 0; i < tensor->shape_size_; i++) { + res = res * tensor->shape_[i]; + } + return res; +} +int GetDimensionSize(const TensorC *tensor, const size_t index) { + int dim_size = -1; + if (index < tensor->shape_size_) { + dim_size = tensor->shape_[index]; + } + return dim_size; +} + +int ShapeSet(int *dst_shape, size_t *dst_shape_size, const int *src_shape, size_t src_shape_size) { + for (size_t i = 0; i < src_shape_size; i++) { + dst_shape[i] = src_shape[i]; + } + *dst_shape_size = src_shape_size; + return NNACL_OK; +} + +int ShapePush(int *shape, size_t *shape_size, int value) { + shape[*shape_size] = value; + *shape_size = *shape_size + 1; + return NNACL_OK; +} + +int ShapeInsert(int *shape, size_t *shape_size, int index, int value) { + if (index < 0 || index > *shape_size) { + return NNACL_ERR; + } + for (int i = *shape_size; i > index; i--) { + shape[i] = shape[i - 1]; + } + shape[index] = value; + *shape_size = *shape_size + 1; + return NNACL_OK; +} + +int ShapeErase(int *shape, size_t *shape_size, int index) { + if (index < 0 && index >= *shape_size) { + return NNACL_ERR; + } + + for (int i = index; i < *shape_size - 1; i++) { + shape[i] = shape[i + 1]; + } + *shape_size = *shape_size - 1; + return NNACL_OK; +} + +bool ShapeEqual(const int *shape0, size_t shape0_size, const int *shape1, size_t shape1_size) { + if (shape0_size != shape1_size) { + return false; + } + for (int i = 0; i < shape0_size; i++) { + if (shape0[i] != shape1[i]) { + return false; + } + } + return true; +} + +void iswap(int *a, int *b) { + int tmp = *a; + *a = *b; + *b = tmp; +} + +int imin(int a, int b) { return a > b ? b : a; } + +int imax(int a, int b) { return a < b ? b : a; } + +// input == output completely refer to +// 1. zeros_like +int CommonInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + if (parameter == NULL || inputs[0] == NULL || outputs[0] == NULL) { + return NNACL_NULL_PTR; + } + SetDataTypeFormat(outputs[0], inputs[0]); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + SetShapeTensor(outputs[0], inputs[0]); + return NNACL_OK; +} + +int FftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + output->data_type_ = kNumberTypeFloat32; + output->format_ = input->format_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int input_shape[MAX_SHAPE_SIZE]; + size_t input_shape_size = 0; + ShapeSet(input_shape, &input_shape_size, input->shape_, input->shape_size_); + input_shape_size--; + SetShapeArray(output, input_shape, input_shape_size); + return NNACL_OK; +} + +int VectorCInit(VectorC *vc, size_t per_malloc_size) { + vc->data_ = (int *)malloc(per_malloc_size * sizeof(int)); + if (vc->data_ == NULL) { + return NNACL_ERR; + } + vc->size_ = 0; + vc->max_size_ = per_malloc_size; + vc->per_malloc_size_ = per_malloc_size; + return NNACL_OK; +} + +void VectorCSet(VectorC *vc, const int *src_shape, size_t src_shape_size) { + if (src_shape_size == 0) { + vc->size_ = 0; + } else { + free(vc->data_); + vc->max_size_ = (src_shape_size / vc->per_malloc_size_ + 1) * vc->per_malloc_size_; + vc->data_ = (int *)malloc(sizeof(int) * vc->max_size_); + for (size_t i = 0; i < src_shape_size; i++) { + vc->data_[i] = src_shape[i]; + } + vc->size_ = src_shape_size; + } +} + +void VectorCPush(VectorC *vc, int value) { + if (vc->size_ + 1 > vc->max_size_) { + int *tmp = (int *)malloc(vc->per_malloc_size_ * sizeof(int) + vc->max_size_ * sizeof(int)); + memcpy(tmp, vc->data_, vc->size_ * sizeof(int)); + free(vc->data_); + vc->data_ = tmp; + vc->max_size_ = vc->max_size_ + vc->per_malloc_size_; + } + vc->data_[vc->size_] = value; + vc->size_++; +} + +void VectorCInsert(VectorC *vc, int index, int value) { + if (vc->size_ + 1 > vc->max_size_) { + int *tmp = (int *)malloc(vc->per_malloc_size_ * sizeof(int) + vc->max_size_ * sizeof(int)); + memcpy(tmp, vc->data_, vc->size_ * sizeof(int)); + free(vc->data_); + vc->data_ = tmp; + vc->max_size_ = vc->max_size_ + vc->per_malloc_size_; + } + memmove(vc->data_ + index + 1, vc->data_ + index, (vc->size_ - index) * sizeof(int)); + vc->data_[index] = value; + vc->size_++; +} + +void VectorCErase(VectorC *vc, int index) { + memmove(vc->data_ + index, vc->data_ + index + 1, (vc->size_ - index - 1) * sizeof(int)); + vc->size_--; +} + +bool VectorCEqual(VectorC *vc1, VectorC *vc2) { + if (vc1->size_ != vc2->size_) { + return false; + } + for (size_t i = 0; i < vc1->size_; i++) { + if (vc1->data_[i] != vc2->data_[i]) { + return false; + } + } + return true; +} + +void VectorCFree(VectorC *vc) { + free(vc->data_); + vc->data_ = NULL; +} diff --git a/mindspore/lite/nnacl/infer/common_infer.h b/mindspore/lite/nnacl/infer/common_infer.h new file mode 100644 index 0000000000..792dd98470 --- /dev/null +++ b/mindspore/lite/nnacl/infer/common_infer.h @@ -0,0 +1,209 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_COMMON_H_ +#define MINDSPORE_LITE_NNACL_COMMON_H_ + +#include <stddef.h> +#include "nnacl/errorcode.h" +#include "nnacl/op_base.h" +#include "nnacl/tensor_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define kNCHW_N 0 +#define kNCHW_C 1 +#define kNCHW_H 2 +#define kNCHW_W 3 + +typedef enum FormatC { + Format_NCHW = 0, + Format_NHWC = 1, + Format_NHWC4 = 2, + Format_HWKC = 3, + Format_HWCK = 4, + Format_KCHW = 5, + Format_CKHW = 6, + Format_KHWC = 7, + Format_CHWK = 8, + Format_HW = 9, + Format_HW4 = 10, + Format_NC = 11, + Format_NC4 = 12, + Format_NC4HW4 = 100, + Format_NUM_OF_FORMAT = 101, + Format_MIN = Format_NCHW, + Format_MAX = Format_NUM_OF_FORMAT +} FormatC; + +typedef enum TypeIdC { + kTypeUnknown = 0, + kMetaTypeBegin = kTypeUnknown, + kMetaTypeType, // Type + kMetaTypeAnything, + kMetaTypeObject, + kMetaTypeTypeType, // TypeType + kMetaTypeProblem, + kMetaTypeExternal, + kMetaTypeNone, + kMetaTypeNull, + kMetaTypeEllipsis, + kMetaTypeEnd, + // + // Object types + // + kObjectTypeBegin = kMetaTypeEnd, + kObjectTypeNumber, + kObjectTypeString, + kObjectTypeList, + kObjectTypeTuple, + kObjectTypeSlice, + kObjectTypeKeyword, + kObjectTypeTensorType, + kObjectTypeRowTensorType, + kObjectTypeSparseTensorType, + kObjectTypeUndeterminedType, + kObjectTypeClass, + kObjectTypeDictionary, + kObjectTypeFunction, + kObjectTypeJTagged, + kObjectTypeSymbolicKeyType, + kObjectTypeEnvType, + kObjectTypeRefKey, + kObjectTypeRef, + kObjectTypeEnd, + // + // Number Types + // + kNumberTypeBegin = kObjectTypeEnd, + kNumberTypeBool, + kNumberTypeInt, + kNumberTypeInt8, + kNumberTypeInt16, + kNumberTypeInt32, + kNumberTypeInt64, + kNumberTypeUInt, + kNumberTypeUInt8, + kNumberTypeUInt16, + kNumberTypeUInt32, + kNumberTypeUInt64, + kNumberTypeFloat, + kNumberTypeFloat16, + kNumberTypeFloat32, + kNumberTypeFloat64, + kNumberTypeComplex64, + kNumberTypeEnd +} TypeIdC; + +enum NNACLLshProjectionType { + LshProjectionType_UNKNOWN = 0, + LshProjectionType_SPARSE = 1, + LshProjectionType_DENSE = 2, + LshProjectionType_MIN = LshProjectionType_UNKNOWN, + LshProjectionType_MAX = LshProjectionType_DENSE +}; + +enum NNACLQuantType { + QuantType_QUANT_NONE = 0, + QuantType_AwareTraining = 1, + QuantType_WeightQuant = 2, + QuantType_PostTraining = 3, + QuantType_MIN = QuantType_QUANT_NONE, + QuantType_MAX = QuantType_PostTraining +}; + +typedef struct vvector { + int **shape_; // value of shapes + int *shape_size_; // size of shape + size_t size_; // number of shapes +} vvector; + +typedef struct TensorListC { + int data_type_; + int format_; + + TensorC **tensors_; + size_t element_num_; + int tensors_data_type_; // element_data_type_, keep same as c++ + int element_shape_[MAX_SHAPE_SIZE]; + size_t element_shape_size_; + int max_elements_num_; +} TensorListC; + +typedef struct VectorC { + int *data_; + size_t size_; + size_t max_size_; + size_t per_malloc_size_; +} VectorC; + +int MallocTensorListData(TensorListC *tensor_list, TypeIdC dtype, vvector *tensor_shape); +int TensorListMergeShape(int *element_shape, size_t *element_shape_size, const int *tmp, size_t tmp_size); +bool TensorListIsFullyDefined(int *shape, size_t shape_size); + +int GetBatch(const TensorC *tensor); +int GetHeight(const TensorC *tensor); +int GetWidth(const TensorC *tensor); +int GetChannel(const TensorC *tensor); +int GetElementNum(const TensorC *tensor); +int GetDimensionSize(const TensorC *tensor, const size_t index); + +int CheckAugmentNull(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); +int CheckAugmentNullSize(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter, size_t inputs_size_obj, size_t outputs_size_obj); +int CheckAugmentNullSizeInputTwo(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter, size_t inputs_size_obj_0, + size_t inputs_size_obj_1, size_t outputs_size_obj); +int CheckAugmentNullInputSize(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter, size_t inputs_size_obj); +int CheckAugmentNullOutputSize(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter, size_t outputs_size_obj); +void SetDataTypeFormat(TensorC *dst, const TensorC *src); + +int SetShapeTensor(TensorC *dst, const TensorC *src); +int SetShapeArray(TensorC *dst, int *src, size_t src_size); +int ShapeSet(int *dst_shape, size_t *dst_shape_size, const int *src_shape, size_t src_shape_size); +int ShapePush(int *shape, size_t *shape_size, int value); +int ShapeInsert(int *shape, size_t *shape_size, int index, int value); +int ShapeErase(int *shape, size_t *shape_size, int index); +bool ShapeEqual(const int *shape0, size_t shape0_size, const int *shape1, size_t shape1_size); + +void iswap(int *a, int *b); + +int imin(int a, int b); +int imax(int a, int b); + +int CommonInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); +int FftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +int VectorCInit(VectorC *vc, size_t per_malloc_size); +void VectorCSet(VectorC *vc, const int *src_shape, size_t src_shape_size); +void VectorCPush(VectorC *vc, int value); +void VectorCInsert(VectorC *vc, int index, int value); +void VectorCErase(VectorC *vc, int index); +bool VectorCEqual(VectorC *vc1, VectorC *vc2); +void VectorCFree(VectorC *vc); + +#ifdef __cplusplus +} +#endif + +#endif // MINDSPORE_LITE_NNACL_COMMON__H_ diff --git a/mindspore/lite/nnacl/infer/concat_infer.c b/mindspore/lite/nnacl/infer/concat_infer.c new file mode 100644 index 0000000000..330801c16b --- /dev/null +++ b/mindspore/lite/nnacl/infer/concat_infer.c @@ -0,0 +1,73 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/concat_infer.h" + +int ConcatInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullOutputSize(inputs, inputs_size, outputs, outputs_size, parameter, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *input0 = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input0); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + const int *input0_shape = inputs[0]->shape_; + size_t input0_shape_size = inputs[0]->shape_size_; + + ConcatParameter *param = (ConcatParameter *)parameter; + int axis = param->axis_ < 0 ? param->axis_ + input0_shape_size : param->axis_; + if (axis < 0 || axis >= input0_shape_size) { + return NNACL_ERR; + } + int input0_shape_without_axis[MAX_SHAPE_SIZE]; + size_t input0_shape_without_axis_size = 0; + ShapeSet(input0_shape_without_axis, &input0_shape_without_axis_size, input0_shape, input0_shape_size); + ShapeErase(input0_shape_without_axis, &input0_shape_without_axis_size, axis); + int output_axis_dim = input0_shape[axis]; + for (size_t i = 1; i < inputs_size; ++i) { + int shape_tmp[MAX_SHAPE_SIZE]; + size_t shape_tmp_size = 0; + ShapeSet(shape_tmp, &shape_tmp_size, inputs[i]->shape_, inputs[i]->shape_size_); + if (shape_tmp_size != input0_shape_size) { + return NNACL_PARAM_INVALID; + } + if ((inputs[i]->data_type_ != output->data_type_) && + !((inputs[i]->data_type_ == kNumberTypeFloat16 && output->data_type_ == kNumberTypeFloat32) || + (inputs[i]->data_type_ == kNumberTypeFloat32 && output->data_type_ == kNumberTypeFloat16))) { + return NNACL_PARAM_INVALID; + } + int axis_tmp = shape_tmp[axis]; + ShapeErase(shape_tmp, &shape_tmp_size, axis); + if (!ShapeEqual(input0_shape_without_axis, input0_shape_without_axis_size, shape_tmp, shape_tmp_size)) { + return NNACL_ERR; + } + output_axis_dim += axis_tmp; + } + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = input0_shape_size; + for (size_t i = 0; i < input0_shape_size; i++) { + output_shape[i] = input0_shape[i]; + } + output_shape[axis] = output_axis_dim; + SetShapeArray(outputs[0], output_shape, output_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/concat_infer.h b/mindspore/lite/nnacl/infer/concat_infer.h new file mode 100644 index 0000000000..08f3b8ff78 --- /dev/null +++ b/mindspore/lite/nnacl/infer/concat_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_CONCAT_INFER_H +#define MINDSPORE_LITE_NNACL_CONCAT_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/concat_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ConcatInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_CONCAT_INFER_H diff --git a/mindspore/lite/nnacl/infer/constant_of_shape_infer.c b/mindspore/lite/nnacl/infer/constant_of_shape_infer.c new file mode 100644 index 0000000000..49bd00995e --- /dev/null +++ b/mindspore/lite/nnacl/infer/constant_of_shape_infer.c @@ -0,0 +1,64 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/constant_of_shape_infer.h" + +int ConstantOfShapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *in_tensor = inputs[0]; + TensorC *out_tensor = outputs[0]; + ConstantOfShapeParameter *param = (ConstantOfShapeParameter *)parameter; + out_tensor->data_type_ = (TypeIdC)(param->data_type_); + out_tensor->format_ = in_tensor->format_; + if (!parameter->infer_flag_ || in_tensor->data_ == NULL) { + return NNACL_INFER_INVALID; + } + int size = GetElementNum(in_tensor); + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = size; + switch (in_tensor->data_type_) { + case kNumberTypeInt32: { + int32_t *in_data = (int32_t *)(in_tensor->data_); + for (int i = 0; i < size; ++i) { + out_shape[i] = in_data[i]; + if (out_shape[i] <= 0) { + return NNACL_ERR; + } + } + break; + } + case kNumberTypeInt64: { + int64_t *in_data = (int64_t *)(in_tensor->data_); + for (int i = 0; i < size; ++i) { + out_shape[i] = in_data[i]; + if (out_shape[i] <= 0) { + return NNACL_ERR; + } + } + break; + } + default: + return NNACL_INFER_INVALID; + } + + SetShapeArray(out_tensor, out_shape, out_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/constant_of_shape_infer.h b/mindspore/lite/nnacl/infer/constant_of_shape_infer.h new file mode 100644 index 0000000000..2c51287201 --- /dev/null +++ b/mindspore/lite/nnacl/infer/constant_of_shape_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_CONSTANT_OF_SHAPE_INFER_H +#define MINDSPORE_LITE_NNACL_CONSTANT_OF_SHAPE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/constant_of_shape.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ConstantOfShapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_CONSTANT_OF_SHAPE_INFER_H diff --git a/mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.c b/mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.c new file mode 100644 index 0000000000..6f2bfeeb8e --- /dev/null +++ b/mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.c @@ -0,0 +1,35 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/conv2d_grad_filter_infer.h" + +int Conv2dGradFilterInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + if (inputs_size < 2 || outputs_size != 1) { + return NNACL_ERR; + } + SetDataTypeFormat(outputs[0], inputs[0]); + + size_t filter_shape_size_ = 4; + int filter_shape_[MAX_SHAPE_SIZE]; + const int nchw2nhwc[4] = {0, 2, 3, 1}; + for (size_t i = 0; i < filter_shape_size_; i++) { + filter_shape_[i] = *((int *)(inputs[2]->data_) + nchw2nhwc[i]); + } + + SetShapeArray(outputs[0], filter_shape_, filter_shape_size_); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.h b/mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.h new file mode 100644 index 0000000000..2fa82c41de --- /dev/null +++ b/mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_CONV2D_GRAD_FILTER_INFER_H +#define MINDSPORE_LITE_NNACL_CONV2D_GRAD_FILTER_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int Conv2dGradFilterInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_CONV2D_GRAD_FILTER_INFER_H diff --git a/mindspore/lite/nnacl/infer/conv2d_grad_input_infer.c b/mindspore/lite/nnacl/infer/conv2d_grad_input_infer.c new file mode 100644 index 0000000000..5aa6f929a4 --- /dev/null +++ b/mindspore/lite/nnacl/infer/conv2d_grad_input_infer.c @@ -0,0 +1,41 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/conv2d_grad_input_infer.h" + +int Conv2dGradInputInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + if (inputs_size < 2 || outputs_size != 1) { + return NNACL_ERR; + } + const TensorC *in0 = inputs[0]; + TensorC *out = outputs[0]; + + if (in0 == NULL || out == NULL) { + return NNACL_NULL_PTR; + } + SetDataTypeFormat(out, in0); + + size_t shape_size_ = in0->shape_size_; + int shape_[MAX_SHAPE_SIZE]; + const int nchw2nhwc[4] = {0, 2, 3, 1}; + for (int i = 0; i < shape_size_; i++) { + shape_[i] = *((int *)(inputs[2]->data_) + nchw2nhwc[i]); + } + SetShapeArray(out, shape_, shape_size_); + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/conv2d_grad_input_infer.h b/mindspore/lite/nnacl/infer/conv2d_grad_input_infer.h new file mode 100644 index 0000000000..4ea80be53f --- /dev/null +++ b/mindspore/lite/nnacl/infer/conv2d_grad_input_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_CONV2D_GRAD_INPUT_INFER_H +#define MINDSPORE_LITE_NNACL_CONV2D_GRAD_INPUT_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int Conv2dGradInputInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_CONV2D_GRAD_INPUT_INFER_H diff --git a/mindspore/lite/nnacl/infer/conv2d_infer.c b/mindspore/lite/nnacl/infer/conv2d_infer.c new file mode 100644 index 0000000000..5a64063a3c --- /dev/null +++ b/mindspore/lite/nnacl/infer/conv2d_infer.c @@ -0,0 +1,99 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "nnacl/infer/conv2d_infer.h" + +void ConvInferShape(int input_h, int input_w, int *output_h, int *output_w, ConvParameter *param) { + int kernel_w = param->kernel_w_; + int kernel_h = param->kernel_h_; + int stride_w = param->stride_w_; + int stride_h = param->stride_h_; + int dilate_w = param->dilation_w_; + int dilate_h = param->dilation_h_; + + if (param->pad_mode_ == Pad_same) { // maybe error + *output_w = ceil((float)(input_w) / (float)(stride_w)); + *output_h = ceil((float)(input_h) / (float)(stride_h)); + int pad_h_all = ((*output_h - 1) * stride_h + (kernel_h - 1) * dilate_h + 1 - input_h); + int pad_w_all = ((*output_w - 1) * stride_w + (kernel_w - 1) * dilate_w + 1 - input_w); + if (pad_h_all < 0) { + param->pad_u_ = param->pad_d_ = 0; + } else { + param->pad_u_ = pad_h_all / 2; + param->pad_d_ = pad_h_all - param->pad_u_; + } + if (pad_w_all < 0) { + param->pad_l_ = param->pad_r_ = 0; + } else { + param->pad_l_ = pad_w_all / 2; + param->pad_r_ = pad_w_all - param->pad_l_; + } + } else { + *output_w = ceil(((float)(input_w) + param->pad_l_ + param->pad_r_ - ((float)(kernel_w)-1) * (float)(dilate_w)) / + (float)(stride_w)); + *output_h = ceil(((float)(input_h) + param->pad_u_ + param->pad_d_ - ((float)(kernel_h)-1) * (float)(dilate_h)) / + (float)(stride_h)); + } +} + +int Conv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSizeInputTwo(inputs, inputs_size, outputs, outputs_size, parameter, 2, 3, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input_tensor = inputs[0]; + const TensorC *weight_tensor = inputs[1]; + TensorC *out_tensor = outputs[0]; + + out_tensor->format_ = input_tensor->format_; + out_tensor->data_type_ = input_tensor->data_type_; + ConvParameter *param = (ConvParameter *)parameter; + if (param->group_ == 0) { + param->group_ = weight_tensor->shape_[0]; + } + param->output_channel_ = weight_tensor->shape_[0]; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + const int *in_shape = input_tensor->shape_; + if (input_tensor->shape_size_ == 0) { + return NNACL_INFER_INVALID; + } + int input_h = in_shape[1]; + int input_w = in_shape[2]; + int output_w = 0, output_h = 0; + + ConvInferShape(input_h, input_w, &output_h, &output_w, param); + + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + ShapeSet(out_shape, &out_shape_size, input_tensor->shape_, input_tensor->shape_size_); + out_shape[1] = output_h >= 0 ? output_h : 1; + out_shape[2] = output_w >= 0 ? output_w : 1; + out_shape[3] = GetBatch(weight_tensor); + SetShapeArray(out_tensor, out_shape, out_shape_size); + + param->input_batch_ = in_shape[0]; + param->input_h_ = in_shape[1]; + param->input_w_ = in_shape[2]; + param->input_channel_ = in_shape[3]; + param->output_batch_ = out_shape[0]; + param->output_h_ = out_shape[1]; + param->output_w_ = out_shape[2]; + param->output_channel_ = out_shape[3]; + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/conv2d_infer.h b/mindspore/lite/nnacl/infer/conv2d_infer.h new file mode 100644 index 0000000000..ee0d291b6a --- /dev/null +++ b/mindspore/lite/nnacl/infer/conv2d_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_CONV2D_INFER_H +#define MINDSPORE_LITE_NNACL_CONV2D_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int Conv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_CONV2D_INFER_H diff --git a/mindspore/lite/nnacl/infer/crop_and_resize_infer.c b/mindspore/lite/nnacl/infer/crop_and_resize_infer.c new file mode 100644 index 0000000000..fdbdbd9e53 --- /dev/null +++ b/mindspore/lite/nnacl/infer/crop_and_resize_infer.c @@ -0,0 +1,56 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/crop_and_resize_infer.h" + +int CropAndResizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullInputSize(inputs, inputs_size, outputs, outputs_size, parameter, 4); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *input = inputs[0]; + if (input->shape_size_ != 0 && input->shape_size_ != 4) { + return NNACL_ERR; + } + + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + if (inputs[1]->data_ != NULL) { + const TensorC *boxes_tensor = inputs[1]; + ShapePush(output_shape, &output_shape_size, boxes_tensor->shape_[0]); + } else { + ShapePush(output_shape, &output_shape_size, GetBatch(input)); + } + + const TensorC *shape_tensor = inputs[3]; + int32_t *data = (int32_t *)(shape_tensor->data_); + if (data == NULL) { + return NNACL_INFER_INVALID; + } + ShapePush(output_shape, &output_shape_size, data[0]); + ShapePush(output_shape, &output_shape_size, data[1]); + ShapePush(output_shape, &output_shape_size, GetChannel(input)); + SetShapeArray(output, output_shape, output_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/crop_and_resize_infer.h b/mindspore/lite/nnacl/infer/crop_and_resize_infer.h new file mode 100644 index 0000000000..0d0858839a --- /dev/null +++ b/mindspore/lite/nnacl/infer/crop_and_resize_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_CROP_AND_RESIZE_INFER_H +#define MINDSPORE_LITE_NNACL_CROP_AND_RESIZE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int CropAndResizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_CROP_AND_RESIZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/crop_infer.c b/mindspore/lite/nnacl/infer/crop_infer.c new file mode 100644 index 0000000000..f815d30773 --- /dev/null +++ b/mindspore/lite/nnacl/infer/crop_infer.c @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/crop_infer.h" + +int CropInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 2, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + SetDataTypeFormat(outputs[0], inputs[0]); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + SetShapeTensor(outputs[0], inputs[1]); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/crop_infer.h b/mindspore/lite/nnacl/infer/crop_infer.h new file mode 100644 index 0000000000..dd6de645f3 --- /dev/null +++ b/mindspore/lite/nnacl/infer/crop_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_CROP_INFER_H +#define MINDSPORE_LITE_NNACL_CROP_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/crop_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int CropInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_CROP_INFER_H diff --git a/mindspore/lite/nnacl/infer/custom_extract_features_infer.c b/mindspore/lite/nnacl/infer/custom_extract_features_infer.c new file mode 100644 index 0000000000..4fa4a09b0d --- /dev/null +++ b/mindspore/lite/nnacl/infer/custom_extract_features_infer.c @@ -0,0 +1,46 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/custom_extract_features_infer.h" + +int CustomExtractFeaturesInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *input = inputs[0]; + TensorC *output0 = outputs[0]; + TensorC *output1 = outputs[1]; + + output0->data_type_ = kNumberTypeInt32; + output0->format_ = input->format_; + output1->data_type_ = kNumberTypeFloat32; + output1->format_ = input->format_; + + if (input->data_ == NULL) { + return NNACL_INFER_INVALID; + } + int string_num = *((const int32_t *)(input->data_)); + + int res = (string_num == 0 ? 1 : string_num); + output0->shape_size_ = 1; + output0->shape_[0] = res; + output1->shape_size_ = 1; + output1->shape_[0] = res; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/custom_extract_features_infer.h b/mindspore/lite/nnacl/infer/custom_extract_features_infer.h new file mode 100644 index 0000000000..af518e60ce --- /dev/null +++ b/mindspore/lite/nnacl/infer/custom_extract_features_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_CUSTOM_EXTRACT_FEATURES_INFER_H +#define MINDSPORE_LITE_NNACL_CUSTOM_EXTRACT_FEATURES_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int CustomExtractFeaturesInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_CUSTOM_EXTRACT_FEATURES_INFER_H diff --git a/mindspore/lite/nnacl/infer/custom_normalize_infer.c b/mindspore/lite/nnacl/infer/custom_normalize_infer.c new file mode 100644 index 0000000000..5ed8b9f323 --- /dev/null +++ b/mindspore/lite/nnacl/infer/custom_normalize_infer.c @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/custom_normalize_infer.h" + +int CustomNormalizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + + SetDataTypeFormat(output, input); + + if (input->data_ == NULL) { + return NNACL_INFER_INVALID; + } + int string_num = *((const int32_t *)(input->data_)); // also look custom_extract_features + + output->shape_size_ = 1; + output->shape_[0] = (string_num == 0 ? 1 : string_num); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/custom_normalize_infer.h b/mindspore/lite/nnacl/infer/custom_normalize_infer.h new file mode 100644 index 0000000000..6fe40cfc51 --- /dev/null +++ b/mindspore/lite/nnacl/infer/custom_normalize_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_CUSTOM_NORMALIZE_INFER_H +#define MINDSPORE_LITE_NNACL_CUSTOM_NORMALIZE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/softmax_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int CustomNormalizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_CUSTOM_NORMALIZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/custom_predict_infer.c b/mindspore/lite/nnacl/infer/custom_predict_infer.c new file mode 100644 index 0000000000..bd119033cd --- /dev/null +++ b/mindspore/lite/nnacl/infer/custom_predict_infer.c @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/custom_predict_infer.h" + +int CustomPredictInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output0 = outputs[0]; + TensorC *output1 = outputs[1]; + + CustomPredictParameter *param = (CustomPredictParameter *)parameter; + output0->shape_size_ = 1; + output0->shape_[0] = param->output_num; + output0->data_type_ = kNumberTypeInt32; + output0->format_ = input->format_; + output1->shape_size_ = 1; + output1->shape_[0] = param->output_num; + output1->data_type_ = kNumberTypeFloat32; + output1->format_ = input->format_; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/custom_predict_infer.h b/mindspore/lite/nnacl/infer/custom_predict_infer.h new file mode 100644 index 0000000000..4df7628e5e --- /dev/null +++ b/mindspore/lite/nnacl/infer/custom_predict_infer.h @@ -0,0 +1,36 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_CUSTOM_PREDICT_INFER_H +#define MINDSPORE_LITE_NNACL_CUSTOM_PREDICT_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct CustomPredictParameter { + OpParameter op_parameter_; + int output_num; +} CustomPredictParameter; + +int CustomPredictInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_CUSTOM_PREDICT_INFER_H diff --git a/mindspore/lite/nnacl/infer/deconv2d_infer.c b/mindspore/lite/nnacl/infer/deconv2d_infer.c new file mode 100644 index 0000000000..2f37906834 --- /dev/null +++ b/mindspore/lite/nnacl/infer/deconv2d_infer.c @@ -0,0 +1,97 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/deconv2d_infer.h" + +int Deconv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *input = inputs[0]; + const TensorC *weight = inputs[1]; + TensorC *output = outputs[0]; + output->format_ = input->format_; + output->data_type_ = input->data_type_; + + ConvParameter *param = (ConvParameter *)parameter; + if (param->group_ == 0) { + param->group_ = weight->shape_[0]; + } + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int32_t input_h = GetHeight(input); + int32_t input_w = GetWidth(input); + + int32_t output_n = GetBatch(input); + int32_t output_h = 0; + int32_t output_w = 0; + int32_t output_c = GetChannel(weight); + if (param->group_ == GetChannel(input) && param->group_ == GetBatch(weight) && 1 == GetChannel(weight)) { + output_c = GetBatch(weight); /* depthwise */ + } + + int kernel_w = param->kernel_w_; + int kernel_h = param->kernel_h_; + int stride_w = param->stride_w_; + int stride_h = param->stride_h_; + int dilate_w = param->dilation_w_; + int dilate_h = param->dilation_h_; + int pad_mode = param->pad_mode_; + if (pad_mode == Pad_pad) { + output_h = (input_h - 1) * stride_h + ((kernel_h - 1) * dilate_h + 1) - param->pad_u_ - param->pad_d_; + output_w = (input_w - 1) * stride_w + ((kernel_w - 1) * dilate_w + 1) - param->pad_l_ - param->pad_r_; + } else if (pad_mode == Pad_same) { + output_h = input_h * stride_h; + output_w = input_w * stride_w; + } else if (pad_mode == Pad_valid) { + output_h = (input_h - 1) * stride_h + kernel_h; + output_w = (input_w - 1) * stride_w + kernel_w; + } else { + return NNACL_ERR; + } + + output_h += param->output_padding_h; + output_w += param->output_padding_w; + + output->shape_size_ = 4; + output->shape_[0] = output_n; + output->shape_[1] = output_h; + output->shape_[2] = output_w; + output->shape_[3] = output_c; + + if (pad_mode == Pad_same) { + param->pad_u_ = ((input_h - 1) * stride_h + (kernel_h - 1) * dilate_h + 1 - output_h) / 2; + param->pad_l_ = ((input_w - 1) * stride_w + (kernel_w - 1) * dilate_w + 1 - output_w) / 2; + } else if (pad_mode == Pad_valid) { + param->pad_u_ = 0; + param->pad_l_ = 0; + } + + const int *in_shape = input->shape_; + param->input_batch_ = in_shape[0]; + param->input_h_ = in_shape[1]; + param->input_w_ = in_shape[2]; + param->input_channel_ = in_shape[3]; + param->output_batch_ = output_n; + param->output_h_ = output_h; + param->output_w_ = output_w; + param->output_channel_ = output_c; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/deconv2d_infer.h b/mindspore/lite/nnacl/infer/deconv2d_infer.h new file mode 100644 index 0000000000..0563a9c6e9 --- /dev/null +++ b/mindspore/lite/nnacl/infer/deconv2d_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_DECONV2D_INFER_H +#define MINDSPORE_LITE_NNACL_DECONV2D_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int Deconv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_DECONV2D_INFER_H diff --git a/mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.c b/mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.c new file mode 100644 index 0000000000..a62c311a20 --- /dev/null +++ b/mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.c @@ -0,0 +1,57 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/dedepthwise_conv2d_infer.h" + +int DeDepthwiseConv2DInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + int check_ret = CheckAugmentNullSizeInputTwo(inputs, inputs_size, outputs, outputs_size, parameter, 2, 3, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int input_h = input->shape_[1]; + int input_w = input->shape_[2]; + int input_channel = input->shape_[3]; + int output_w = 0, output_h = 0; + + ConvParameter *param = (ConvParameter *)parameter; + output_h = param->stride_h_ * (input_h - 1) + param->kernel_h_ - param->pad_u_ - param->pad_d_; + output_w = param->stride_w_ * (input_w - 1) + param->kernel_w_ - param->pad_l_ - param->pad_r_; + if ((output_h + param->pad_u_ + param->pad_d_ - param->kernel_h_) % param->stride_h_ != 0) { + output_h += (output_h + param->pad_l_ + param->pad_r_ - param->kernel_h_) % param->stride_h_; + } + if ((output_w + param->pad_l_ + param->pad_r_ - param->kernel_w_) % param->stride_w_ != 0) { + output_w += (output_w + param->pad_l_ + param->pad_r_ - param->kernel_w_) % param->stride_w_; + } + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + ShapeSet(out_shape, &out_shape_size, input->shape_, input->shape_size_); + out_shape[1] = output_h; + out_shape[2] = output_w; + if (param->channel_multiplie_ != 1) { + return NNACL_ERR; + } + out_shape[3] = input_channel; // in_channel * out_channel + + SetShapeArray(output, out_shape, out_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.h b/mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.h new file mode 100644 index 0000000000..59f295e141 --- /dev/null +++ b/mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_DEDEPTHWISE_CONV2D_INFER_H +#define MINDSPORE_LITE_NNACL_DEDEPTHWISE_CONV2D_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DeDepthwiseConv2DInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_DEDEPTHWISE_CONV2D_INFER_H diff --git a/mindspore/lite/nnacl/infer/depth_to_space_infer.c b/mindspore/lite/nnacl/infer/depth_to_space_infer.c new file mode 100644 index 0000000000..81fdb94006 --- /dev/null +++ b/mindspore/lite/nnacl/infer/depth_to_space_infer.c @@ -0,0 +1,54 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/depth_to_space_infer.h" + +int DepthToSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *input = inputs[0]; + if (input->format_ != Format_NHWC) { + return NNACL_ERR; + } + SetDataTypeFormat(outputs[0], input); + DepthToSpaceParameter *param = (DepthToSpaceParameter *)parameter; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int input_shape[MAX_SHAPE_SIZE]; + size_t input_shape_size = 0; + ShapeSet(input_shape, &input_shape_size, input->shape_, input->shape_size_); + if (input_shape_size != 4) { + return NNACL_PARAM_INVALID; + } + + int32_t block_size = param->block_size_; + if (input_shape[kNHWC_C] % (block_size * block_size) != 0 || input_shape[kNHWC_C] == 0) { + return NNACL_PARAM_INVALID; + } + int32_t output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = input_shape_size; + output_shape[kNHWC_N] = input_shape[kNHWC_N]; + output_shape[kNHWC_H] = input_shape[kNHWC_H] * block_size; + output_shape[kNHWC_W] = input_shape[kNHWC_W] * block_size; + output_shape[kNHWC_C] = input_shape[kNHWC_C] / (block_size * block_size); + SetShapeArray(outputs[0], output_shape, output_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/depth_to_space_infer.h b/mindspore/lite/nnacl/infer/depth_to_space_infer.h new file mode 100644 index 0000000000..be114f56e8 --- /dev/null +++ b/mindspore/lite/nnacl/infer/depth_to_space_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_DEPTHTOSPACE_INFER_H +#define MINDSPORE_LITE_NNACL_DEPTHTOSPACE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/depth_to_space_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DepthToSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_DEPTHTOSPACE_INFER_H diff --git a/mindspore/lite/nnacl/infer/depthwise_conv2d_infer.c b/mindspore/lite/nnacl/infer/depthwise_conv2d_infer.c new file mode 100644 index 0000000000..9a60c6006a --- /dev/null +++ b/mindspore/lite/nnacl/infer/depthwise_conv2d_infer.c @@ -0,0 +1,71 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/depthwise_conv2d_infer.h" + +int DepthwiseConv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSizeInputTwo(inputs, inputs_size, outputs, outputs_size, parameter, 2, 3, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + ConvParameter *param = (ConvParameter *)parameter; + + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int input_h = input->shape_[1]; + int input_w = input->shape_[2]; + int input_channel = input->shape_[3]; + int output_w = 0, output_h = 0; + param->input_channel_ = input_channel; + + if (param->pad_mode_ == Pad_same) { + output_h = ceil((float)(input_h) / (float)(param->stride_h_)); + output_w = ceil((float)(input_w) / (float)(param->stride_w_)); + int pad_h_all = ((output_h - 1) * param->stride_h_ + (param->kernel_h_ - 1) * param->dilation_h_ + 1 - input_h); + int pad_w_all = ((output_w - 1) * param->stride_w_ + (param->kernel_w_ - 1) * param->dilation_w_ + 1 - input_w); + if (pad_h_all > 0) { + param->pad_u_ = pad_h_all / 2; + param->pad_d_ = pad_h_all - param->pad_u_; + } + if (pad_w_all > 0) { + param->pad_l_ = pad_w_all / 2; + param->pad_r_ = pad_w_all - param->pad_l_; + } + } else { + output_h = ceil(((float)(input_h) + param->pad_u_ + param->pad_d_ - + ((float)(param->kernel_h_) - 1) * (float)(param->dilation_h_)) / + (float)(param->stride_h_)); + output_w = ceil(((float)(input_w) + param->pad_l_ + param->pad_r_ - + ((float)(param->kernel_w_) - 1) * (float)(param->dilation_w_)) / + (float)(param->stride_w_)); + } + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + ShapeSet(out_shape, &out_shape_size, input->shape_, input->shape_size_); + out_shape[1] = output_h; + out_shape[2] = output_w; + if (param->channel_multiplie_ != 1) { + return NNACL_ERR; + } + out_shape[3] = input_channel; // in_channel * out_channel + SetShapeArray(output, out_shape, out_shape_size); + return 0; +} diff --git a/mindspore/lite/nnacl/infer/depthwise_conv2d_infer.h b/mindspore/lite/nnacl/infer/depthwise_conv2d_infer.h new file mode 100644 index 0000000000..799279a1c7 --- /dev/null +++ b/mindspore/lite/nnacl/infer/depthwise_conv2d_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_DEPTHWISE_CONV2D_INFER_H +#define MINDSPORE_LITE_NNACL_DEPTHWISE_CONV2D_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DepthwiseConv2dInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_DEPTHWISE_CONV2D_INFER_H diff --git a/mindspore/lite/nnacl/infer/detection_post_process_infer.c b/mindspore/lite/nnacl/infer/detection_post_process_infer.c new file mode 100644 index 0000000000..4a5a883507 --- /dev/null +++ b/mindspore/lite/nnacl/infer/detection_post_process_infer.c @@ -0,0 +1,76 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/detection_post_process_infer.h" + +int DetectionPostProcessInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 3, 4); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *boxes = inputs[0]; + const TensorC *scores = inputs[1]; + const TensorC *anchors = inputs[2]; + + DetectionPostProcessParameter *param = (DetectionPostProcessParameter *)parameter; + if (scores->shape_[2] < param->num_classes_) { + return NNACL_ERR; + } + if (scores->shape_[2] - param->num_classes_ > 1) { + return NNACL_ERR; + } + if (boxes->shape_[1] != scores->shape_[1]) { + return NNACL_ERR; + } + if (boxes->shape_[1] != anchors->shape_[0]) { + return NNACL_ERR; + } + + TensorC *detected_boxes = outputs[0]; + TensorC *detected_classes = outputs[1]; + TensorC *detected_scores = outputs[2]; + TensorC *num_det = outputs[3]; + + detected_boxes->format_ = boxes->format_; + detected_boxes->data_type_ = kNumberTypeFloat32; + detected_classes->format_ = boxes->format_; + detected_classes->data_type_ = kNumberTypeFloat32; + detected_scores->format_ = boxes->format_; + detected_scores->data_type_ = kNumberTypeFloat32; + num_det->format_ = boxes->format_; + num_det->data_type_ = kNumberTypeFloat32; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + const int max_detections = param->max_detections_; + const int max_classes_per_detection = param->max_classes_per_detection_; + const int num_detected_boxes = (int)(max_detections * max_classes_per_detection); + detected_boxes->shape_size_ = 3; + detected_boxes->shape_[0] = 1; + detected_boxes->shape_[1] = num_detected_boxes; + detected_boxes->shape_[2] = 4; + detected_classes->shape_size_ = 2; + detected_classes->shape_[0] = 1; + detected_classes->shape_[1] = num_detected_boxes; + detected_scores->shape_size_ = 2; + detected_scores->shape_[0] = 1; + detected_scores->shape_[1] = num_detected_boxes; + num_det->shape_size_ = 1; + num_det->shape_[0] = 1; + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/detection_post_process_infer.h b/mindspore/lite/nnacl/infer/detection_post_process_infer.h new file mode 100644 index 0000000000..f5ac10500f --- /dev/null +++ b/mindspore/lite/nnacl/infer/detection_post_process_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_DETECTION_POST_PROCESS_INFER_H +#define MINDSPORE_LITE_NNACL_DETECTION_POST_PROCESS_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/detection_post_process_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DetectionPostProcessInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_DETECTION_POST_PROCESS_INFER_H diff --git a/mindspore/lite/nnacl/infer/dropout_grad_infer.c b/mindspore/lite/nnacl/infer/dropout_grad_infer.c new file mode 100644 index 0000000000..b759ae93ed --- /dev/null +++ b/mindspore/lite/nnacl/infer/dropout_grad_infer.c @@ -0,0 +1,33 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/dropout_grad_infer.h" + +int DropoutGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullInputSize(inputs, inputs_size, outputs, outputs_size, parameter, 2); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + SetShapeTensor(output, input); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/dropout_grad_infer.h b/mindspore/lite/nnacl/infer/dropout_grad_infer.h new file mode 100644 index 0000000000..b88bfe11da --- /dev/null +++ b/mindspore/lite/nnacl/infer/dropout_grad_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_DROPOUT_GRAD_INFER_H +#define MINDSPORE_LITE_NNACL_DROPOUT_GRAD_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DropoutGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_DROPOUT_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/dropout_infer.c b/mindspore/lite/nnacl/infer/dropout_infer.c new file mode 100644 index 0000000000..c5ca932d9c --- /dev/null +++ b/mindspore/lite/nnacl/infer/dropout_infer.c @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/dropout_infer.h" + +int DropoutInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *input = inputs[0]; + TensorC *output0 = outputs[0]; + SetDataTypeFormat(output0, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + SetShapeTensor(output0, input); + if (outputs_size > 1) { + TensorC *output1 = outputs[1]; + SetDataTypeFormat(output1, input); + SetShapeTensor(output1, input); + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/dropout_infer.h b/mindspore/lite/nnacl/infer/dropout_infer.h new file mode 100644 index 0000000000..9e13f939c4 --- /dev/null +++ b/mindspore/lite/nnacl/infer/dropout_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_DROPOUT_INFER_H +#define MINDSPORE_LITE_NNACL_DROPOUT_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int DropoutInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_DROPOUT_INFER_H diff --git a/mindspore/lite/nnacl/infer/embedding_lookup_infer.c b/mindspore/lite/nnacl/infer/embedding_lookup_infer.c new file mode 100644 index 0000000000..4d58ebbcbb --- /dev/null +++ b/mindspore/lite/nnacl/infer/embedding_lookup_infer.c @@ -0,0 +1,58 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/embedding_lookup_infer.h" + +int EmbeddingLookupInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + if (inputs_size < 2 || outputs_size != 1) { + return NNACL_INPUT_TENSOR_ERROR; + } + const TensorC *params_ = inputs[0]; + const TensorC *ids = inputs[inputs_size - 1]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, params_); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int embedding_shape[MAX_SHAPE_SIZE]; + size_t embedding_shape_size = 0; + ShapeSet(embedding_shape, &embedding_shape_size, params_->shape_, params_->shape_size_); + ShapeErase(embedding_shape, &embedding_shape_size, 0); + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + ShapeSet(output_shape, &output_shape_size, ids->shape_, ids->shape_size_); + for (size_t i = 0; i < embedding_shape_size; ++i) { + ShapePush(output_shape, &output_shape_size, embedding_shape[i]); + } + for (size_t i = 1; i < inputs_size - 1; ++i) { + int embedding_shape_t[MAX_SHAPE_SIZE]; + size_t embedding_shape_t_size = 0; + ShapeSet(embedding_shape_t, &embedding_shape_t_size, inputs[i]->shape_, inputs[i]->shape_size_); + ShapeErase(embedding_shape_t, &embedding_shape_t_size, 0); + bool t_equal = ShapeEqual(embedding_shape_t, embedding_shape_t_size, embedding_shape, embedding_shape_size); + if (!t_equal) { + return NNACL_INPUT_TENSOR_ERROR; + } + } + SetShapeArray(output, output_shape, output_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/embedding_lookup_infer.h b/mindspore/lite/nnacl/infer/embedding_lookup_infer.h new file mode 100644 index 0000000000..642cf2e65a --- /dev/null +++ b/mindspore/lite/nnacl/infer/embedding_lookup_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_EMBEDDING_LOOKUP_INFER_H +#define MINDSPORE_LITE_NNACL_EMBEDDING_LOOKUP_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int EmbeddingLookupInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_EMBEDDING_LOOKUP_INFER_H diff --git a/mindspore/lite/nnacl/infer/expand_dims_infer.c b/mindspore/lite/nnacl/infer/expand_dims_infer.c new file mode 100644 index 0000000000..08d1691eac --- /dev/null +++ b/mindspore/lite/nnacl/infer/expand_dims_infer.c @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/expand_dims_infer.h" + +int ExpandDimsInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullOutputSize(inputs, inputs_size, outputs, outputs_size, parameter, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int dim = ((int32_t *)(inputs[1]->data_))[0]; + if (dim < 0) { + dim += input->shape_size_ + 1; + } + if (dim > (int)(input->shape_size_)) { + return NNACL_INPUT_TENSOR_ERROR; + } + + ShapeSet(output->shape_, &(output->shape_size_), input->shape_, input->shape_size_); + ShapeInsert(output->shape_, &(output->shape_size_), dim, 1); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/expand_dims_infer.h b/mindspore/lite/nnacl/infer/expand_dims_infer.h new file mode 100644 index 0000000000..9005d75d13 --- /dev/null +++ b/mindspore/lite/nnacl/infer/expand_dims_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_EXPAND_DIMS_INFER_H +#define MINDSPORE_LITE_NNACL_EXPAND_DIMS_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ExpandDimsInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_EXPAND_DIMS_INFER_H diff --git a/mindspore/lite/nnacl/infer/fft_imag_infer.c b/mindspore/lite/nnacl/infer/fft_imag_infer.c new file mode 100644 index 0000000000..81bf648e7a --- /dev/null +++ b/mindspore/lite/nnacl/infer/fft_imag_infer.c @@ -0,0 +1,22 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/fft_imag_infer.h" + +int FftImagInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + return FftInferShape(inputs, inputs_size, outputs, outputs_size, parameter); +} diff --git a/mindspore/lite/nnacl/infer/fft_imag_infer.h b/mindspore/lite/nnacl/infer/fft_imag_infer.h new file mode 100644 index 0000000000..df816e6397 --- /dev/null +++ b/mindspore/lite/nnacl/infer/fft_imag_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FFT_IMAG_INFER_H +#define MINDSPORE_LITE_NNACL_FFT_IMAG_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int FftImagInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_FFT_IMAG_INFER_H diff --git a/mindspore/lite/nnacl/infer/fft_real_infer.c b/mindspore/lite/nnacl/infer/fft_real_infer.c new file mode 100644 index 0000000000..fcd4cc1a50 --- /dev/null +++ b/mindspore/lite/nnacl/infer/fft_real_infer.c @@ -0,0 +1,22 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/fft_real_infer.h" + +int FftRealInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + return FftInferShape(inputs, inputs_size, outputs, outputs_size, parameter); +} diff --git a/mindspore/lite/nnacl/infer/fft_real_infer.h b/mindspore/lite/nnacl/infer/fft_real_infer.h new file mode 100644 index 0000000000..b3410ead4d --- /dev/null +++ b/mindspore/lite/nnacl/infer/fft_real_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FFT_REAL_INFER_H +#define MINDSPORE_LITE_NNACL_FFT_REAL_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int FftRealInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_FFT_REAL_INFER_H diff --git a/mindspore/lite/nnacl/infer/fill_infer.c b/mindspore/lite/nnacl/infer/fill_infer.c new file mode 100644 index 0000000000..8f68e035e6 --- /dev/null +++ b/mindspore/lite/nnacl/infer/fill_infer.c @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/fill_infer.h" + +int FillInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 2, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + const TensorC *dst_shape_tensor = inputs[1]; + const int32_t *dst_shape = (int32_t *)(dst_shape_tensor->data_); + size_t num_dims = 1; + for (size_t i = 0; i < dst_shape_tensor->shape_size_; ++i) { + num_dims *= dst_shape_tensor->shape_[i]; + } + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + for (size_t i = 0; i < num_dims; i++) { + ShapePush(output_shape, &output_shape_size, dst_shape[i]); + } + SetShapeArray(output, output_shape, output_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/fill_infer.h b/mindspore/lite/nnacl/infer/fill_infer.h new file mode 100644 index 0000000000..535a7d84a3 --- /dev/null +++ b/mindspore/lite/nnacl/infer/fill_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FILL_INFER_H +#define MINDSPORE_LITE_NNACL_FILL_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int FillInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_FILL_INFER_H diff --git a/mindspore/lite/nnacl/infer/flatten_grad_infer.c b/mindspore/lite/nnacl/infer/flatten_grad_infer.c new file mode 100644 index 0000000000..96d96f59ec --- /dev/null +++ b/mindspore/lite/nnacl/infer/flatten_grad_infer.c @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/flatten_grad_infer.h" + +int FlattenGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int output_shape[2]; + size_t output_shape_size = 2; + output_shape[0] = input->shape_[0]; + output_shape[1] = 1; + for (size_t i = 1; i < input->shape_size_; i++) { + output_shape[1] *= input->shape_[i]; + } + SetShapeArray(output, output_shape, output_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/flatten_grad_infer.h b/mindspore/lite/nnacl/infer/flatten_grad_infer.h new file mode 100644 index 0000000000..532ebe591d --- /dev/null +++ b/mindspore/lite/nnacl/infer/flatten_grad_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FLATTEN_GRAD_INFER_INFER_H +#define MINDSPORE_LITE_NNACL_FLATTEN_GRAD_INFER_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int FlattenGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_FLATTEN_GRAD_INFER_INFER_H diff --git a/mindspore/lite/nnacl/infer/flatten_infer.c b/mindspore/lite/nnacl/infer/flatten_infer.c new file mode 100644 index 0000000000..217ce0c62e --- /dev/null +++ b/mindspore/lite/nnacl/infer/flatten_infer.c @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/flatten_infer.h" + +int FlattenInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int input_shape[MAX_SHAPE_SIZE]; + size_t input_shape_size = 0; + ShapeSet(input_shape, &input_shape_size, input->shape_, input->shape_size_); + int output_shape[2]; + output_shape[0] = input_shape[0]; + output_shape[1] = 1; + for (size_t i = 1; i < input_shape_size; i++) { + output_shape[1] *= input_shape[i]; + } + SetShapeArray(output, output_shape, 2); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/flatten_infer.h b/mindspore/lite/nnacl/infer/flatten_infer.h new file mode 100644 index 0000000000..f71e25829d --- /dev/null +++ b/mindspore/lite/nnacl/infer/flatten_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FLATTEN_INFER_H +#define MINDSPORE_LITE_NNACL_FLATTEN_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int FlattenInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_FLATTEN_INFER_H diff --git a/mindspore/lite/nnacl/infer/full_connection_infer.c b/mindspore/lite/nnacl/infer/full_connection_infer.c new file mode 100644 index 0000000000..6e3e9c2382 --- /dev/null +++ b/mindspore/lite/nnacl/infer/full_connection_infer.c @@ -0,0 +1,74 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/full_connection_infer.h" + +int FullConnectionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input0 = inputs[0]; + const TensorC *input1 = inputs[1]; + TensorC *output = outputs[0]; + MatMulParameter *param = (MatMulParameter *)parameter; + SetDataTypeFormat(output, input0); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + if ((param->has_bias_ && inputs_size != 3) || (!param->has_bias_ && inputs_size != 2)) { + return NNACL_INPUT_TENSOR_ERROR; + } + if (param->use_axis_ && (param->axis_ < 1 || param->axis_ > (int)(input0->shape_size_))) { + return NNACL_ERR; + } + int new_k = 1; + if (param->use_axis_) { + for (size_t i = param->axis_; i < input0->shape_size_; ++i) { + new_k *= input0->shape_[i]; + } + if (new_k != input1->shape_[1]) { + return NNACL_INPUT_TENSOR_ERROR; + } + } else { + new_k = input1->shape_[1]; + } + if (param->has_bias_) { + if (inputs[2]->shape_[0] != input1->shape_[0]) { + return NNACL_INPUT_TENSOR_ERROR; + } + } + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + ShapeSet(out_shape, &out_shape_size, inputs[0]->shape_, inputs[0]->shape_size_); + if (param->use_axis_) { + out_shape_size = param->axis_ + 1; + out_shape[param->axis_] = input1->shape_[0]; + } else { + int total = 1; + for (size_t i = 0; i < input0->shape_size_; ++i) { + total *= input0->shape_[i]; + } + out_shape_size = 2; + int batch_size = total / new_k; + out_shape[0] = batch_size; + out_shape[1] = input1->shape_[0]; + } + SetShapeArray(output, out_shape, out_shape_size); + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/full_connection_infer.h b/mindspore/lite/nnacl/infer/full_connection_infer.h new file mode 100644 index 0000000000..dc3ef3cfa8 --- /dev/null +++ b/mindspore/lite/nnacl/infer/full_connection_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FULL_CONNECTION_INFER_H +#define MINDSPORE_LITE_NNACL_FULL_CONNECTION_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/matmul_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int FullConnectionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_FULL_CONNECTION_INFER_H diff --git a/mindspore/lite/nnacl/infer/fused_batchnorm_infer.c b/mindspore/lite/nnacl/infer/fused_batchnorm_infer.c new file mode 100644 index 0000000000..d3428bf440 --- /dev/null +++ b/mindspore/lite/nnacl/infer/fused_batchnorm_infer.c @@ -0,0 +1,34 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/fused_batchnorm_infer.h" + +int FusedBatchNormInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + for (size_t i = 0; i < inputs_size; i++) { + if (outputs_size <= i) { + break; + } + SetShapeTensor(outputs[i], inputs[i]); + SetDataTypeFormat(outputs[i], inputs[i]); + } + if (outputs_size > 5) { + SetDataTypeFormat(outputs[5], inputs[0]); + outputs[5]->shape_size_ = 1; + outputs[5]->shape_[0] = 1; + } + return 0; +} diff --git a/mindspore/lite/nnacl/infer/fused_batchnorm_infer.h b/mindspore/lite/nnacl/infer/fused_batchnorm_infer.h new file mode 100644 index 0000000000..a90de7f459 --- /dev/null +++ b/mindspore/lite/nnacl/infer/fused_batchnorm_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_FUSED_BATCHNORM_INFER_H +#define MINDSPORE_LITE_NNACL_FUSED_BATCHNORM_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int FusedBatchNormInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_FUSED_BATCHNORM_INFER_H diff --git a/mindspore/lite/nnacl/infer/gather_infer.c b/mindspore/lite/nnacl/infer/gather_infer.c new file mode 100644 index 0000000000..4bd9fb5a9f --- /dev/null +++ b/mindspore/lite/nnacl/infer/gather_infer.c @@ -0,0 +1,60 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/gather_infer.h" + +int GatherInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + if (inputs_size < 2 || outputs_size != 1) { + return NNACL_ERR; + } + const TensorC *input = inputs[0]; + const TensorC *indices = inputs[1]; + TensorC *output = outputs[0]; + output->data_type_ = input->data_type_; + GatherParameter *param = (GatherParameter *)parameter; + if (param->quant_type_ == QuantType_WeightQuant) { + output->data_type_ = kNumberTypeFloat32; + } + output->format_ = input->format_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int axis = *((int *)inputs[2]->data_); + if (axis < 0) { + axis += input->shape_size_; + } + int indices_shape[MAX_SHAPE_SIZE]; + size_t indices_shape_size = 0; + ShapeSet(indices_shape, &indices_shape_size, indices->shape_, indices->shape_size_); + int indices_rank = indices_shape_size; + int in_shape[MAX_SHAPE_SIZE]; + size_t in_shape_size = 0; + ShapeSet(in_shape, &in_shape_size, input->shape_, input->shape_size_); + int in_rank = in_shape_size; + if (in_rank < axis + 1) { + return NNACL_ERR; + } + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + ShapeSet(out_shape, &out_shape_size, in_shape, in_shape_size); + ShapeErase(out_shape, &out_shape_size, axis); + for (int i = indices_rank - 1; i >= 0; --i) { + ShapeInsert(out_shape, &out_shape_size, axis, indices_shape[i]); + } + SetShapeArray(output, out_shape, out_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/gather_infer.h b/mindspore/lite/nnacl/infer/gather_infer.h new file mode 100644 index 0000000000..b83028addb --- /dev/null +++ b/mindspore/lite/nnacl/infer/gather_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_GATHER_INFER_H +#define MINDSPORE_LITE_NNACL_GATHER_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/gather_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int GatherInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_GATHER_INFER_H diff --git a/mindspore/lite/nnacl/infer/gather_nd_infer.c b/mindspore/lite/nnacl/infer/gather_nd_infer.c new file mode 100644 index 0000000000..98ac806526 --- /dev/null +++ b/mindspore/lite/nnacl/infer/gather_nd_infer.c @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/gather_nd_infer.h" + +int GatherNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 2, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + const TensorC *indices = inputs[1]; + TensorC *output = outputs[0]; + + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int in_rank = input->shape_size_; + int indices_rank = indices->shape_size_; + if (indices->shape_[indices_rank - 1] > in_rank) { + return NNACL_OK; + } + int i = 0; + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + for (i = 0; i < indices_rank - 1; ++i) { + ShapePush(out_shape, &out_shape_size, indices->shape_[i]); + } + for (i = indices->shape_[indices_rank - 1]; i < in_rank; ++i) { + ShapePush(out_shape, &out_shape_size, input->shape_[i]); + } + SetShapeArray(output, out_shape, out_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/gather_nd_infer.h b/mindspore/lite/nnacl/infer/gather_nd_infer.h new file mode 100644 index 0000000000..69c804f1d0 --- /dev/null +++ b/mindspore/lite/nnacl/infer/gather_nd_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_GATHER_ND_INFER_H +#define MINDSPORE_LITE_NNACL_GATHER_ND_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/gatherNd_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int GatherNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_GATHER_ND_INFER_H diff --git a/mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.c b/mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.c new file mode 100644 index 0000000000..012eb49808 --- /dev/null +++ b/mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.c @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/group_conv2d_grad_input_infer.h" + +int GroupConv2dGradInputInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + if (inputs_size < 2 || outputs_size != 1) { + return NNACL_ERR; + } + + const TensorC *in0 = inputs[0]; + TensorC *out = outputs[0]; + + SetDataTypeFormat(out, in0); + + size_t shape_size_ = in0->shape_size_; + int shape_[MAX_SHAPE_SIZE]; + for (int i = 0; i < shape_size_; i++) { + shape_[i] = in0->shape_[i]; + } + SetShapeArray(out, shape_, shape_size_); + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.h b/mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.h new file mode 100644 index 0000000000..672924a092 --- /dev/null +++ b/mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_GROUP_CONV2D_GRAD_INPUT_INFER_H +#define MINDSPORE_LITE_NNACL_GROUP_CONV2D_GRAD_INPUT_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/conv_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int GroupConv2dGradInputInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_GROUP_CONV2D_GRAD_INPUT_INFER_H diff --git a/mindspore/lite/nnacl/infer/gru_infer.c b/mindspore/lite/nnacl/infer/gru_infer.c new file mode 100644 index 0000000000..61bd5c1eeb --- /dev/null +++ b/mindspore/lite/nnacl/infer/gru_infer.c @@ -0,0 +1,82 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/gru_infer.h" + +int GruInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + if ((inputs_size != 5 && inputs_size != 6) || outputs_size != 2) { + return NNACL_INPUT_TENSOR_ERROR; + } + const TensorC *input = inputs[0]; + const TensorC *weight_gate = inputs[1]; + const TensorC *weight_recurrence = inputs[2]; + const TensorC *bias = inputs[3]; + TensorC *output = outputs[0]; + for (int i = 0; i < 2; i++) { + SetDataTypeFormat(outputs[i], input); + } + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + const int *in_shape = input->shape_; // seq_len, batch, input_size + const int *w_gate_shape = weight_gate->shape_; // num_direction, hidden_size * 3, input_size + const int *w_recu_shape = weight_recurrence->shape_; // num_direction, hidden_size * 3, hidden_size + const int *bias_shape = bias->shape_; // num_direction, hidden_size * 6 + if (input->shape_size_ != 3 || weight_gate->shape_size_ != 3 || weight_recurrence->shape_size_ != 3) { + return NNACL_ERR; + } + if (w_gate_shape[1] != w_recu_shape[1] || w_recu_shape[1] * 2 != bias_shape[1]) { + return NNACL_ERR; + } + if (inputs_size == 6) { + const int *seq_len_shape = inputs[5]->shape_; + if (seq_len_shape[0] > 1) { + return NNACL_ERR; + } + if (inputs[5]->shape_size_ != 1 && seq_len_shape[0] != in_shape[1]) { + return NNACL_ERR; + } + } + + int hidden_size = w_gate_shape[1] / 3; + // set output + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + ShapeSet(out_shape, &out_shape_size, in_shape, input->shape_size_); + out_shape[2] = hidden_size; + + GruParameter *param = (GruParameter *)parameter; + if (param->bidirectional_) { + ShapeInsert(out_shape, &out_shape_size, 1, 2); + } else { + ShapeInsert(out_shape, &out_shape_size, 1, 1); + } + SetShapeArray(output, out_shape, out_shape_size); + // set hidden state + int state_shape[MAX_SHAPE_SIZE]; + size_t state_shape_size = 0; + ShapeSet(state_shape, &state_shape_size, in_shape, input->shape_size_); + state_shape[0] = param->bidirectional_ ? 2 : 1; + state_shape[2] = hidden_size; + SetShapeArray(outputs[1], state_shape, state_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/gru_infer.h b/mindspore/lite/nnacl/infer/gru_infer.h new file mode 100644 index 0000000000..448c49ca94 --- /dev/null +++ b/mindspore/lite/nnacl/infer/gru_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_GRU_INFER_H +#define MINDSPORE_LITE_NNACL_GRU_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/gru_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int GruInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_GRU_INFER_H diff --git a/mindspore/lite/nnacl/infer/hashtable_lookup_infer.c b/mindspore/lite/nnacl/infer/hashtable_lookup_infer.c new file mode 100644 index 0000000000..a01525c4e9 --- /dev/null +++ b/mindspore/lite/nnacl/infer/hashtable_lookup_infer.c @@ -0,0 +1,41 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/hashtable_lookup_infer.h" + +int HashtableLoopupInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + const TensorC *values = inputs[2]; + TensorC *output = outputs[0]; + TensorC *hits = outputs[1]; + + output->data_type_ = values->data_type_; + output->format_ = input->format_; + hits->shape_size_ = 1; + hits->shape_[0] = GetDimensionSize(input, 0); + hits->data_type_ = kNumberTypeUInt8; + hits->format_ = input->format_; + + if (input->data_ == NULL) { + return NNACL_INFER_INVALID; + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/hashtable_lookup_infer.h b/mindspore/lite/nnacl/infer/hashtable_lookup_infer.h new file mode 100644 index 0000000000..304e97a3e2 --- /dev/null +++ b/mindspore/lite/nnacl/infer/hashtable_lookup_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_HASHTABLE_LOOKUP_INFER_H +#define MINDSPORE_LITE_NNACL_HASHTABLE_LOOKUP_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int HashtableLoopupInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_HASHTABLE_LOOKUP_INFER_H diff --git a/mindspore/lite/nnacl/infer/invert_permutation_infer.c b/mindspore/lite/nnacl/infer/invert_permutation_infer.c new file mode 100644 index 0000000000..d7448eecb1 --- /dev/null +++ b/mindspore/lite/nnacl/infer/invert_permutation_infer.c @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/invert_permutation_infer.h" + +int InvertPermutationInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + if (input->data_type_ != kNumberTypeInt32) { + return NNACL_ERR; + } + if (input->shape_size_ != 1) { + return NNACL_ERR; + } + SetShapeTensor(output, input); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/invert_permutation_infer.h b/mindspore/lite/nnacl/infer/invert_permutation_infer.h new file mode 100644 index 0000000000..fb2f71a9bb --- /dev/null +++ b/mindspore/lite/nnacl/infer/invert_permutation_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INVERT_PERMUTATION_INFER_H +#define MINDSPORE_LITE_NNACL_INVERT_PERMUTATION_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int InvertPermutationInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_INVERT_PERMUTATION_INFER_H diff --git a/mindspore/lite/nnacl/infer/layer_norm_infer.c b/mindspore/lite/nnacl/infer/layer_norm_infer.c new file mode 100644 index 0000000000..bfa9e51d67 --- /dev/null +++ b/mindspore/lite/nnacl/infer/layer_norm_infer.c @@ -0,0 +1,35 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/layer_norm_infer.h" + +int LayerNormInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSizeInputTwo(inputs, inputs_size, outputs, outputs_size, parameter, 1, 3, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + LayerNormParameter *param = (LayerNormParameter *)parameter; + if (!param->op_parameter_.infer_flag_) { + return NNACL_INFER_INVALID; + } + + SetShapeTensor(output, input); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/layer_norm_infer.h b/mindspore/lite/nnacl/infer/layer_norm_infer.h new file mode 100644 index 0000000000..bbc87f7db6 --- /dev/null +++ b/mindspore/lite/nnacl/infer/layer_norm_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_LAYER_NORM_INFER_H +#define MINDSPORE_LITE_NNACL_LAYER_NORM_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/layer_norm_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int LayerNormInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_LAYER_NORM_INFER_H diff --git a/mindspore/lite/nnacl/infer/lin_space_infer.c b/mindspore/lite/nnacl/infer/lin_space_infer.c new file mode 100644 index 0000000000..5a3d1c3bf6 --- /dev/null +++ b/mindspore/lite/nnacl/infer/lin_space_infer.c @@ -0,0 +1,40 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/lin_space_infer.h" + +int LinSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + + output->data_type_ = input->data_type_; + output->format_ = input->format_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int *num = (int *)(inputs[2]->data_); + if (num == NULL) { + return NNACL_INFER_INVALID; + } + output->shape_size_ = 1; + output->shape_[0] = num[0]; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/lin_space_infer.h b/mindspore/lite/nnacl/infer/lin_space_infer.h new file mode 100644 index 0000000000..0568040914 --- /dev/null +++ b/mindspore/lite/nnacl/infer/lin_space_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_LIN_SPACE_INFER_H +#define MINDSPORE_LITE_NNACL_LIN_SPACE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int LinSpaceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_LIN_SPACE_INFER_H diff --git a/mindspore/lite/nnacl/infer/lsh_projection_infer.c b/mindspore/lite/nnacl/infer/lsh_projection_infer.c new file mode 100644 index 0000000000..b5e170874a --- /dev/null +++ b/mindspore/lite/nnacl/infer/lsh_projection_infer.c @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/lsh_projection_infer.h" + +int LshProjectionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSizeInputTwo(inputs, inputs_size, outputs, outputs_size, parameter, 2, 3, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *in_hash = inputs[0]; + if (in_hash->shape_size_ != 2 || GetDimensionSize(in_hash, 1) > 32) { + return NNACL_ERR; + } + TensorC *out_tensor = outputs[0]; + out_tensor->data_type_ = kNumberTypeInt32; + out_tensor->format_ = Format_NHWC; + + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + LshProjectionParameter *param = (LshProjectionParameter *)parameter; + switch (param->lsh_type_) { + case LshProjectionType_SPARSE: + ShapePush(out_shape, &out_shape_size, GetDimensionSize(in_hash, 0)); + break; + case LshProjectionType_DENSE: + ShapePush(out_shape, &out_shape_size, GetDimensionSize(in_hash, 0) * GetDimensionSize(in_hash, 1)); + break; + default: + return NNACL_ERR; + } + SetShapeArray(out_tensor, out_shape, out_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/lsh_projection_infer.h b/mindspore/lite/nnacl/infer/lsh_projection_infer.h new file mode 100644 index 0000000000..ffba1443f8 --- /dev/null +++ b/mindspore/lite/nnacl/infer/lsh_projection_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_LSH_PROJECTION_INFER_H +#define MINDSPORE_LITE_NNACL_LSH_PROJECTION_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/lsh_projection_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int LshProjectionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_LSH_PROJECTION_INFER_H diff --git a/mindspore/lite/nnacl/infer/lstm_infer.c b/mindspore/lite/nnacl/infer/lstm_infer.c new file mode 100644 index 0000000000..2f30704260 --- /dev/null +++ b/mindspore/lite/nnacl/infer/lstm_infer.c @@ -0,0 +1,62 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/lstm_infer.h" + +int LstmInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 6, 3); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + const TensorC *weight_i = inputs[1]; + TensorC *output = outputs[0]; + for (int i = 0; i < 3; i++) { + SetDataTypeFormat(outputs[i], input); + } + + LstmParameter *param = (LstmParameter *)parameter; + if (!param->op_parameter_.infer_flag_) { + return NNACL_INFER_INVALID; + } + + if (input->shape_size_ != 3 || weight_i->shape_size_ != 3) { + return NNACL_ERR; + } + + // int hidden_size = w_shape[1] / 4; + int hidden_size = weight_i->shape_[1] / 4; + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + ShapeSet(out_shape, &out_shape_size, input->shape_, input->shape_size_); + out_shape[2] = hidden_size; + if (param->bidirectional_) { + ShapeInsert(out_shape, &out_shape_size, 1, 2); + } else { + ShapeInsert(out_shape, &out_shape_size, 1, 1); + } + SetShapeArray(output, out_shape, out_shape_size); + int state_shape[MAX_SHAPE_SIZE]; + size_t state_shape_size = 0; + ShapeSet(state_shape, &state_shape_size, input->shape_, input->shape_size_); + state_shape[0] = param->bidirectional_ ? 2 : 1; + state_shape[2] = hidden_size; + SetShapeArray(outputs[1], state_shape, state_shape_size); + SetShapeArray(outputs[2], state_shape, state_shape_size); + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/lstm_infer.h b/mindspore/lite/nnacl/infer/lstm_infer.h new file mode 100644 index 0000000000..ea51f01b28 --- /dev/null +++ b/mindspore/lite/nnacl/infer/lstm_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_LSTM_INFER_H +#define MINDSPORE_LITE_NNACL_LSTM_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/lstm_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int LstmInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_LSTM_INFER_H diff --git a/mindspore/lite/nnacl/infer/matmul_infer.c b/mindspore/lite/nnacl/infer/matmul_infer.c new file mode 100644 index 0000000000..aff275b15f --- /dev/null +++ b/mindspore/lite/nnacl/infer/matmul_infer.c @@ -0,0 +1,83 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/matmul_infer.h" + +int MatmulInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + TensorC *input0 = (TensorC *)inputs[0]; + TensorC *input1 = (TensorC *)inputs[1]; + TensorC *output = outputs[0]; + + SetDataTypeFormat(output, input0); + MatMulParameter *param = (MatMulParameter *)parameter; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int a_shape[MAX_SHAPE_SIZE]; + size_t a_shape_size = 0; + ShapeSet(a_shape, &a_shape_size, input0->shape_, input0->shape_size_); + int b_shape[MAX_SHAPE_SIZE]; + size_t b_shape_size = 0; + ShapeSet(b_shape, &b_shape_size, input1->shape_, input1->shape_size_); + + if (a_shape_size == 4 && a_shape[2] == 1 && a_shape[3] == 1) { + a_shape_size = 2; + SetShapeArray(input0, a_shape, a_shape_size); + } + + bool del_start = false; + bool del_end = false; + if (a_shape_size == 1) { + ShapeInsert(a_shape, &a_shape_size, 0, 1); + SetShapeArray(input0, a_shape, a_shape_size); + del_start = true; + } + if (b_shape_size == 1) { + ShapePush(b_shape, &b_shape_size, 1); + SetShapeArray(input1, b_shape, b_shape_size); + del_end = true; + } + for (size_t i = 0; i < (a_shape_size - 2) && i < (b_shape_size - 2); ++i) { + if (a_shape[a_shape_size - 3 - i] != b_shape[b_shape_size - 3 - i]) { + return NNACL_INPUT_TENSOR_ERROR; + } + } + + if (param->a_transpose_) { + iswap(&a_shape[a_shape_size - 1], &a_shape[a_shape_size - 2]); + } + if (param->b_transpose_) { + iswap(&b_shape[b_shape_size - 1], &b_shape[b_shape_size - 2]); + } + int c_shape[MAX_SHAPE_SIZE]; + size_t c_shape_size = 0; + ShapeSet(c_shape, &c_shape_size, a_shape, a_shape_size); + c_shape[c_shape_size - 1] = b_shape[b_shape_size - 1]; + if (del_start) { + ShapeErase(c_shape, &c_shape_size, 0); + } + if (del_end) { + c_shape_size--; + } + SetShapeArray(output, c_shape, c_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/matmul_infer.h b/mindspore/lite/nnacl/infer/matmul_infer.h new file mode 100644 index 0000000000..9091f4e0f4 --- /dev/null +++ b/mindspore/lite/nnacl/infer/matmul_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_MATMUL_INFER_H +#define MINDSPORE_LITE_NNACL_MATMUL_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/matmul_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int MatmulInferShape(const TensorC *const *const inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_MATMUL_INFER_H diff --git a/mindspore/lite/nnacl/infer/maximum_grad_infer.c b/mindspore/lite/nnacl/infer/maximum_grad_infer.c new file mode 100644 index 0000000000..f35561795f --- /dev/null +++ b/mindspore/lite/nnacl/infer/maximum_grad_infer.c @@ -0,0 +1,56 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/maximum_grad_infer.h" + +int MaximumGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 3, 2); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *x1 = inputs[0]; + const TensorC *x2 = inputs[1]; + const TensorC *dy = inputs[2]; + TensorC *dx1 = outputs[0]; + TensorC *dx2 = outputs[1]; + + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + MaximumGradParameter *param = (MaximumGradParameter *)parameter; + param->ndim_ = dy->shape_size_; + param->x1_shape_size_ = param->ndim_; + param->x2_shape_size_ = param->ndim_; + param->dy_shape_size_ = param->ndim_; + int fillDimNum0 = dy->shape_size_ - x1->shape_size_; + int fillDimNum1 = dy->shape_size_ - x2->shape_size_; + int j0 = 0; + int j1 = 0; + for (unsigned int i = 0; i < dy->shape_size_; i++) { + param->x1_shape_[i] = (i < fillDimNum0) ? 1 : x1->shape_[j0++]; + param->x2_shape_[i] = (i < fillDimNum1) ? 1 : x2->shape_[j1++]; + param->dy_shape_[i] = dy->shape_[i]; + } + + SetShapeTensor(dx1, x1); + SetShapeTensor(dx2, x2); + SetDataTypeFormat(dx1, dy); + SetDataTypeFormat(dx2, dy); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/maximum_grad_infer.h b/mindspore/lite/nnacl/infer/maximum_grad_infer.h new file mode 100644 index 0000000000..e76c5e9350 --- /dev/null +++ b/mindspore/lite/nnacl/infer/maximum_grad_infer.h @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_MAXIMUM_GRAD_INFER_H +#define MINDSPORE_LITE_NNACL_MAXIMUM_GRAD_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct MaximumGradParameter { + OpParameter op_parameter_; + int ndim_; + int x1_shape_[MAX_SHAPE_SIZE]; + size_t x1_shape_size_; + int x2_shape_[MAX_SHAPE_SIZE]; + size_t x2_shape_size_; + int dy_shape_[MAX_SHAPE_SIZE]; + size_t dy_shape_size_; +} MaximumGradParameter; + +int MaximumGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_MAXIMUM_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/mean_infer.c b/mindspore/lite/nnacl/infer/mean_infer.c new file mode 100644 index 0000000000..1b22283b80 --- /dev/null +++ b/mindspore/lite/nnacl/infer/mean_infer.c @@ -0,0 +1,67 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/mean_infer.h" + +int MeanInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + ReduceParameter *param = (ReduceParameter *)parameter; + bool keep_dims = (bool)(param->keep_dims_); + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + int *axes = param->axes_; + int num_axes = param->num_axes_; + // reduce on all axes + if (num_axes == 0) { + if (keep_dims) { + for (size_t i = 0; i < input->shape_size_; i++) { + ShapePush(out_shape, &out_shape_size, 1); + } + } + SetShapeArray(output, out_shape, out_shape_size); + output->data_type_ = input->data_type_; + return NNACL_OK; + } + // reduce on selected axes + for (size_t i = 0; i < input->shape_size_; i++) { + bool reduce_axis = false; + for (size_t idx = 0; idx < num_axes; ++idx) { + if (((size_t)(axes[idx])) == i) { + reduce_axis = true; + break; + } + } + if (reduce_axis) { + if (keep_dims) { + ShapePush(out_shape, &out_shape_size, 1); + } + } else { + ShapePush(out_shape, &out_shape_size, input->shape_[i]); + } + } + SetShapeArray(output, out_shape, out_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/mean_infer.h b/mindspore/lite/nnacl/infer/mean_infer.h new file mode 100644 index 0000000000..ab83182eb8 --- /dev/null +++ b/mindspore/lite/nnacl/infer/mean_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_MEAN_INFER_H +#define MINDSPORE_LITE_NNACL_MEAN_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/reduce_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int MeanInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_MEAN_INFER_H diff --git a/mindspore/lite/nnacl/infer/merge_infer.c b/mindspore/lite/nnacl/infer/merge_infer.c new file mode 100644 index 0000000000..5af32daf81 --- /dev/null +++ b/mindspore/lite/nnacl/infer/merge_infer.c @@ -0,0 +1,93 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/merge_infer.h" +#include <string.h> + +int MergeAbleToInfer(const TensorC *const *inputs, size_t inputs_size) { + for (size_t i = 0; i < inputs_size; i++) { + if (inputs[i]->shape_size_ == 0) { + return HasZeroShape; + } + if (inputs[i]->data_ == NULL) { + return NotAble; + } + } + return Able; +} + +int MergeInfer(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size) { + for (size_t i = 0; i < inputs_size; i++) { + SetDataTypeFormat(outputs[i], inputs[i]); + if (((TensorListC *)inputs[i])->data_type_ == kObjectTypeTensorType) { + TensorListC *input_tensorlist = (TensorListC *)inputs[i]; + TensorListC *output_tensorlist = (TensorListC *)outputs[i]; + ShapeSet(output_tensorlist->element_shape_, &output_tensorlist->element_shape_size_, + input_tensorlist->element_shape_, input_tensorlist->element_shape_size_); + output_tensorlist->max_elements_num_ = input_tensorlist->max_elements_num_; + output_tensorlist->tensors_data_type_ = input_tensorlist->tensors_data_type_; + + output_tensorlist->element_num_ = input_tensorlist->element_num_; + for (size_t j = 0; j < output_tensorlist->element_num_; j++) { + memcpy(output_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC)); + } + } else { + SetShapeTensor(outputs[i], inputs[i]); + } + } + return NNACL_OK; +} + +int MergeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + if (inputs_size != 2 * outputs_size) { + return NNACL_ERR; + } + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + for (size_t i = 0; i < outputs_size; ++i) { + outputs[i]->data_type_ = inputs[i]->data_type_; + } + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + const TensorC *const *left_part_inputs = inputs; + size_t left_part_inputs_size = inputs_size / 2; + + const TensorC *const *right_part_inputs = inputs + left_part_inputs_size; + size_t right_part_inputs_size = inputs_size / 2; + + if (MergeAbleToInfer(left_part_inputs, left_part_inputs_size) == Able) { + return MergeInfer(left_part_inputs, left_part_inputs_size, outputs, outputs_size); + } + + if (MergeAbleToInfer(right_part_inputs, right_part_inputs_size) == Able) { + return MergeInfer(right_part_inputs, right_part_inputs_size, outputs, outputs_size); + } + + if (MergeAbleToInfer(left_part_inputs, left_part_inputs_size) == HasZeroShape && + MergeAbleToInfer(right_part_inputs, right_part_inputs_size) == HasZeroShape) { + return MergeInfer(left_part_inputs, left_part_inputs_size, outputs, outputs_size); + } + + return NNACL_INFER_INVALID; +} diff --git a/mindspore/lite/nnacl/infer/merge_infer.h b/mindspore/lite/nnacl/infer/merge_infer.h new file mode 100644 index 0000000000..57b40e07df --- /dev/null +++ b/mindspore/lite/nnacl/infer/merge_infer.h @@ -0,0 +1,34 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_MERGE_INFER_H +#define MINDSPORE_LITE_NNACL_MERGE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/softmax_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +enum InferStatus { Able, NotAble, HasZeroShape }; + +int MergeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_MERGE_INFER_H diff --git a/mindspore/lite/nnacl/infer/mfcc_infer.c b/mindspore/lite/nnacl/infer/mfcc_infer.c new file mode 100644 index 0000000000..f47e849b36 --- /dev/null +++ b/mindspore/lite/nnacl/infer/mfcc_infer.c @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/mfcc_infer.h" + +int MfccInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + if (input->shape_size_ != 3) { + return NNACL_ERR; + } + if (GetElementNum(inputs[1]) != 1) { + return NNACL_ERR; + } + output->shape_size_ = 3; + output->shape_[0] = input->shape_[0]; + output->shape_[1] = input->shape_[1]; + MfccParameter *param = (MfccParameter *)parameter; + output->shape_[2] = param->dct_coeff_num_; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/mfcc_infer.h b/mindspore/lite/nnacl/infer/mfcc_infer.h new file mode 100644 index 0000000000..358deb46a9 --- /dev/null +++ b/mindspore/lite/nnacl/infer/mfcc_infer.h @@ -0,0 +1,36 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_MFCC_INFER_H +#define MINDSPORE_LITE_NNACL_MFCC_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct MfccParameter { + OpParameter op_parameter_; + int dct_coeff_num_; +} MfccParameter; + +int MfccInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_MFCC_INFER_H diff --git a/mindspore/lite/nnacl/infer/non_max_suppression_infer.c b/mindspore/lite/nnacl/infer/non_max_suppression_infer.c new file mode 100644 index 0000000000..740e958a87 --- /dev/null +++ b/mindspore/lite/nnacl/infer/non_max_suppression_infer.c @@ -0,0 +1,30 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/non_max_suppression_infer.h" + +int NonMaxSuppressionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + output->data_type_ = kNumberTypeInt32; + output->format_ = input->format_; + return NNACL_INFER_INVALID; +} diff --git a/mindspore/lite/nnacl/infer/non_max_suppression_infer.h b/mindspore/lite/nnacl/infer/non_max_suppression_infer.h new file mode 100644 index 0000000000..bb0cc24d1a --- /dev/null +++ b/mindspore/lite/nnacl/infer/non_max_suppression_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_NON_MAX_SUPPRESSION_INFER_H +#define MINDSPORE_LITE_NNACL_NON_MAX_SUPPRESSION_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int NonMaxSuppressionInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_NON_MAX_SUPPRESSION_INFER_H diff --git a/mindspore/lite/nnacl/infer/one_hot_infer.c b/mindspore/lite/nnacl/infer/one_hot_infer.c new file mode 100644 index 0000000000..aec2a7d161 --- /dev/null +++ b/mindspore/lite/nnacl/infer/one_hot_infer.c @@ -0,0 +1,54 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/one_hot_infer.h" + +int OneHotInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + if (inputs_size != 4 && inputs_size != 3) { + return NNACL_INPUT_TENSOR_ERROR; + } + + const TensorC *input = inputs[0]; + const TensorC *depth_tensor = inputs[1]; + const TensorC *on_value = inputs[2]; + TensorC *output = outputs[0]; + const int *depth = (int *)(depth_tensor->data_); + if (depth == NULL) { + return NNACL_NULL_PTR; + } + SetDataTypeFormat(output, on_value); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + OneHotParameter *param = (OneHotParameter *)parameter; + int axis = param->axis_; + int input_rank = (int)(input->shape_size_); + if (axis < 0) { + axis += input_rank + 1; + } + ShapeSet(output->shape_, &(output->shape_size_), input->shape_, input->shape_size_); + int res_insert = ShapeInsert(output->shape_, &output->shape_size_, axis, *depth); + if (res_insert == NNACL_ERR) { + return NNACL_ERR; + } + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/one_hot_infer.h b/mindspore/lite/nnacl/infer/one_hot_infer.h new file mode 100644 index 0000000000..3e0305e158 --- /dev/null +++ b/mindspore/lite/nnacl/infer/one_hot_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ONE_HOT_INFER_H +#define MINDSPORE_LITE_NNACL_ONE_HOT_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/one_hot_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int OneHotInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_ONE_HOT_INFER_H diff --git a/mindspore/lite/nnacl/infer/pad_infer.c b/mindspore/lite/nnacl/infer/pad_infer.c new file mode 100644 index 0000000000..c2cd67d8ce --- /dev/null +++ b/mindspore/lite/nnacl/infer/pad_infer.c @@ -0,0 +1,58 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/pad_infer.h" + +int PadInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + PadParameter *param = (PadParameter *)parameter; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + const TensorC *paddings = inputs[1]; + int size = GetElementNum(paddings); + if (size > MAX_PAD_SIZE) { + return NNACL_PARAM_INVALID; + } + if (paddings->data_ == NULL) { + return NNACL_INFER_INVALID; + } + param->padding_length = size; + for (int i = 0; i < size; ++i) { + param->paddings_[i] = ((int *)paddings->data_)[i]; + } + + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + if (input->shape_size_ > 4) { + return NNACL_INPUT_TENSOR_ERROR; + } + for (size_t i = 0; i < input->shape_size_; i++) { + int shape = input->shape_[i] + param->paddings_[2 * i] + param->paddings_[2 * i + 1]; + ShapePush(output_shape, &output_shape_size, shape); + } + + SetShapeArray(output, output_shape, output_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/pad_infer.h b/mindspore/lite/nnacl/infer/pad_infer.h new file mode 100644 index 0000000000..b97bea4b52 --- /dev/null +++ b/mindspore/lite/nnacl/infer/pad_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_PAD_INFER_H +#define MINDSPORE_LITE_NNACL_PAD_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/pad_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PadInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_PAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/partial_infer.c b/mindspore/lite/nnacl/infer/partial_infer.c new file mode 100644 index 0000000000..5fa89a3b8e --- /dev/null +++ b/mindspore/lite/nnacl/infer/partial_infer.c @@ -0,0 +1,22 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/partial_infer.h" + +int PartialInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/partial_infer.h b/mindspore/lite/nnacl/infer/partial_infer.h new file mode 100644 index 0000000000..7d9adbe8ca --- /dev/null +++ b/mindspore/lite/nnacl/infer/partial_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_PARTIAL_INFER_H +#define MINDSPORE_LITE_NNACL_PARTIAL_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/softmax_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PartialInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_PARTIAL_INFER_H diff --git a/mindspore/lite/nnacl/infer/pooling_grad_infer.c b/mindspore/lite/nnacl/infer/pooling_grad_infer.c new file mode 100644 index 0000000000..c5b0d3e2b9 --- /dev/null +++ b/mindspore/lite/nnacl/infer/pooling_grad_infer.c @@ -0,0 +1,59 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/pooling_grad_infer.h" +#include <math.h> + +int PoolingGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 3, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + int input_h = input->shape_[1]; + int input_w = input->shape_[2]; + + PoolingParameter *param = (PoolingParameter *)parameter; + int window_h = param->window_h_; + int window_w = param->window_w_; + if (param->global_) { + window_h = input_h; + window_w = input_w; + } + + if (param->pad_mode_ == Pad_same) { + int output_w = ceil((float)(input_w) / (float)(param->stride_w_)); + int output_h = ceil((float)(input_h) / (float)(param->stride_h_)); + int pad_h_all = ((output_h - 1) * param->stride_h_ + (window_h - 1) + 1 - input_h); + int pad_w_all = ((output_w - 1) * param->stride_w_ + (window_w - 1) + 1 - input_w); + if (pad_h_all < 0) { + param->pad_u_ = param->pad_d_ = 0; + } else { + param->pad_u_ = pad_h_all / 2; + param->pad_d_ = pad_h_all - param->pad_u_; + } + if (pad_w_all < 0) { + param->pad_l_ = param->pad_r_ = 0; + } else { + param->pad_l_ = pad_w_all / 2; + param->pad_r_ = pad_w_all - param->pad_l_; + } + } + SetDataTypeFormat(outputs[0], input); + SetShapeTensor(outputs[0], input); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/pooling_grad_infer.h b/mindspore/lite/nnacl/infer/pooling_grad_infer.h new file mode 100644 index 0000000000..d8104f35e8 --- /dev/null +++ b/mindspore/lite/nnacl/infer/pooling_grad_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_POOLING_GRAD_INFER_H +#define MINDSPORE_LITE_NNACL_POOLING_GRAD_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/pooling_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PoolingGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_POOLING_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/pooling_infer.c b/mindspore/lite/nnacl/infer/pooling_infer.c new file mode 100644 index 0000000000..d301854f96 --- /dev/null +++ b/mindspore/lite/nnacl/infer/pooling_infer.c @@ -0,0 +1,80 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/pooling_infer.h" +#include <math.h> + +int PoolingInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + PoolingParameter *param = (PoolingParameter *)parameter; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int input_h = input->shape_[1]; + int input_w = input->shape_[2]; + + int window_h = param->window_h_; + int window_w = param->window_w_; + if (param->global_) { + window_h = input_h; + window_w = input_w; + } + int output_h = 0; + int output_w = 0; + if (param->pad_mode_ == Pad_same) { // maybe error + output_w = ceil((float)(input_w) / (float)(param->stride_w_)); + output_h = ceil((float)(input_h) / (float)(param->stride_h_)); + int pad_h_all = ((output_h - 1) * param->stride_h_ + (window_h - 1) + 1 - input_h); + int pad_w_all = ((output_w - 1) * param->stride_w_ + (window_w - 1) + 1 - input_w); + if (pad_h_all < 0) { + param->pad_u_ = param->pad_d_ = 0; + } else { + param->pad_u_ = pad_h_all / 2; + param->pad_d_ = pad_h_all - param->pad_u_; + } + if (pad_w_all < 0) { + param->pad_l_ = param->pad_r_ = 0; + } else { + param->pad_l_ = pad_w_all / 2; + param->pad_r_ = pad_w_all - param->pad_l_; + } + } else { + int round_mode = (RoundMode)param->round_mode_; + if (round_mode == RoundMode_Floor) { + output_h = floor((float)(input_h + param->pad_u_ + param->pad_d_ - window_h) / param->stride_h_) + 1; + output_w = floor((float)(input_w + param->pad_l_ + param->pad_r_ - window_w) / param->stride_w_) + 1; + } else if (round_mode == RoundMode_Ceil) { + output_h = ceil((float)(input_h + param->pad_u_ + param->pad_d_ - window_h) / param->stride_h_) + 1; + output_w = ceil((float)(input_w + param->pad_l_ + param->pad_r_ - window_w) / param->stride_w_) + 1; + } else { + return NNACL_ERR; + } + } + int input_shape[MAX_SHAPE_SIZE]; + size_t input_shape_size = 0; + ShapeSet(input_shape, &input_shape_size, input->shape_, input->shape_size_); + input_shape[1] = output_h > 0 ? output_h : 1; + input_shape[2] = output_w > 0 ? output_w : 1; + SetShapeArray(output, input_shape, input_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/pooling_infer.h b/mindspore/lite/nnacl/infer/pooling_infer.h new file mode 100644 index 0000000000..1f30eeaebb --- /dev/null +++ b/mindspore/lite/nnacl/infer/pooling_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_POOLING_INFER_H +#define MINDSPORE_LITE_NNACL_POOLING_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/pooling_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PoolingInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_POOLING_INFER_H diff --git a/mindspore/lite/nnacl/infer/power_infer.c b/mindspore/lite/nnacl/infer/power_infer.c new file mode 100644 index 0000000000..aa45b38167 --- /dev/null +++ b/mindspore/lite/nnacl/infer/power_infer.c @@ -0,0 +1,51 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/power_infer.h" + +int PowerInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *x_tensor = inputs[0]; + TensorC *exp_tensor = NULL; + if (inputs_size == 2) { + exp_tensor = (TensorC *)inputs[1]; + PowerParameter *param = (PowerParameter *)parameter; + float *exp_data = (float *)(exp_tensor->data_); + if (exp_data == NULL) { + return NNACL_INFER_INVALID; + } + param->power_ = *exp_data; + } + TensorC *output_tensor = outputs[0]; + + SetDataTypeFormat(output_tensor, x_tensor); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + if (exp_tensor != NULL) { + bool exp_x_equal = ShapeEqual(exp_tensor->shape_, exp_tensor->shape_size_, x_tensor->shape_, x_tensor->shape_size_); + if (!exp_x_equal && GetElementNum(exp_tensor) != 1) { + return NNACL_INPUT_TENSOR_ERROR; + } + } + + SetShapeTensor(output_tensor, x_tensor); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/power_infer.h b/mindspore/lite/nnacl/infer/power_infer.h new file mode 100644 index 0000000000..10823b6ee4 --- /dev/null +++ b/mindspore/lite/nnacl/infer/power_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_POWER_INFER_H +#define MINDSPORE_LITE_NNACL_POWER_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/power_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PowerInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_POWER_INFER_H diff --git a/mindspore/lite/nnacl/infer/prior_box_infer.c b/mindspore/lite/nnacl/infer/prior_box_infer.c new file mode 100644 index 0000000000..cba8185204 --- /dev/null +++ b/mindspore/lite/nnacl/infer/prior_box_infer.c @@ -0,0 +1,74 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/prior_box_infer.h" +#include <math.h> + +int PriorBoxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + output->data_type_ = kNumberTypeFloat32; + output->format_ = input->format_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + float different_aspect_ratios[MAX_SHAPE_SIZE * 2 + 1]; // NOTE: flip double the number + different_aspect_ratios[0] = 1.0; + size_t different_aspect_ratios_size = 1; + + PriorBoxParameter *param = (PriorBoxParameter *)parameter; + float *aspect_ratios = param->aspect_ratios; + size_t aspect_ratios_size = param->aspect_ratios_size; + for (size_t i = 0; i < aspect_ratios_size; i++) { + float ratio = aspect_ratios[i]; + bool exist = false; + for (size_t j = 0; j < different_aspect_ratios_size; j++) { + if (fabsf(ratio - different_aspect_ratios[j]) < 1e-6) { + exist = true; + break; + } + } + if (!exist) { + different_aspect_ratios[different_aspect_ratios_size] = ratio; + different_aspect_ratios_size++; + if (param->flip) { + different_aspect_ratios[different_aspect_ratios_size] = 1.0f / ratio; + different_aspect_ratios_size++; + } + } + } + + size_t min_sizes_size = param->min_sizes_size; + size_t max_sizes_size = param->max_sizes_size; + int32_t num_priors_box = min_sizes_size * different_aspect_ratios_size + max_sizes_size; + int kPriorBoxPoints = 4; + int kPriorBoxN = 1; + int kPriorBoxW = 1; + int kPriorBoxC = 2; + + int32_t h = GetHeight(input) * GetWidth(input) * num_priors_box * kPriorBoxPoints; + output->shape_size_ = 4; + output->shape_[0] = kPriorBoxN; + output->shape_[1] = h; + output->shape_[2] = kPriorBoxW; + output->shape_[3] = kPriorBoxC; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/prior_box_infer.h b/mindspore/lite/nnacl/infer/prior_box_infer.h new file mode 100644 index 0000000000..1803485263 --- /dev/null +++ b/mindspore/lite/nnacl/infer/prior_box_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_PRIOR_BOX_INFER_H +#define MINDSPORE_LITE_NNACL_PRIOR_BOX_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/prior_box_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int PriorBoxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_PRIOR_BOX_INFER_H diff --git a/mindspore/lite/nnacl/infer/quant_dtype_cast_infer.c b/mindspore/lite/nnacl/infer/quant_dtype_cast_infer.c new file mode 100644 index 0000000000..c904a066ce --- /dev/null +++ b/mindspore/lite/nnacl/infer/quant_dtype_cast_infer.c @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/quant_dtype_cast_infer.h" + +int QuantDtypeCastInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + + QuantDtypeCastParameter *param = (QuantDtypeCastParameter *)parameter; + if (input->data_type_ != param->srcT_) { + return NNACL_ERR; + } + output->data_type_ = param->dstT_; + output->format_ = input->format_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + SetShapeTensor(output, input); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/quant_dtype_cast_infer.h b/mindspore/lite/nnacl/infer/quant_dtype_cast_infer.h new file mode 100644 index 0000000000..b1fb1ca101 --- /dev/null +++ b/mindspore/lite/nnacl/infer/quant_dtype_cast_infer.h @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_QUANT_DTYPE_CAST_INFER_H +#define MINDSPORE_LITE_NNACL_QUANT_DTYPE_CAST_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct QuantDtypeCastParameter { + OpParameter op_parameter_; + int srcT_; + int dstT_; +} QuantDtypeCastParameter; + +int QuantDtypeCastInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_QUANT_DTYPE_CAST_INFER_H diff --git a/mindspore/lite/nnacl/infer/random_standard_normal_infer.c b/mindspore/lite/nnacl/infer/random_standard_normal_infer.c new file mode 100644 index 0000000000..d3b175fc06 --- /dev/null +++ b/mindspore/lite/nnacl/infer/random_standard_normal_infer.c @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/random_standard_normal_infer.h" + +int RandomStandardNormalInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int32_t *input_data = (int32_t *)(inputs[0]->data_); + if (input_data == NULL) { + return NNACL_INFER_INVALID; + } + int input_num = GetElementNum(inputs[0]); + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + for (int i = 0; i < input_num; i++) { + ShapePush(output_shape, &output_shape_size, input_data[i]); + } + SetShapeArray(outputs[0], output_shape, output_shape_size); + outputs[0]->data_type_ = kNumberTypeFloat32; + outputs[0]->format_ = Format_NHWC; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/random_standard_normal_infer.h b/mindspore/lite/nnacl/infer/random_standard_normal_infer.h new file mode 100644 index 0000000000..d91ba863ce --- /dev/null +++ b/mindspore/lite/nnacl/infer/random_standard_normal_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_RANDOM_STANDARD_NORMAL_INFER_H +#define MINDSPORE_LITE_NNACL_RANDOM_STANDARD_NORMAL_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int RandomStandardNormalInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_RANDOM_STANDARD_NORMAL_INFER_H diff --git a/mindspore/lite/nnacl/infer/range_infer.c b/mindspore/lite/nnacl/infer/range_infer.c new file mode 100644 index 0000000000..51b06ba2a8 --- /dev/null +++ b/mindspore/lite/nnacl/infer/range_infer.c @@ -0,0 +1,74 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/range_infer.h" +#include <math.h> + +int RangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + if (input == NULL || output == NULL) { + return NNACL_NULL_PTR; + } + + if (inputs_size == 3) { + output->data_type_ = input->data_type_; + } else { + output->data_type_ = kNumberTypeInt32; + } + output->format_ = input->format_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int shape_size = 0; + if (inputs_size == 3) { + if ((inputs[0]->data_ == NULL) || (inputs[1]->data_ == NULL) || (inputs[2]->data_ == NULL)) { + return NNACL_INFER_INVALID; + } + switch (inputs[0]->data_type_) { + case kNumberTypeInt: + case kNumberTypeInt32: { + int start = *(int *)(inputs[0]->data_); + int limit = *(int *)(inputs[1]->data_); + int delta = *(int *)(inputs[2]->data_); + shape_size = imax((int)(ceil((float)(limit - start) / delta)), 0); + } break; + case kNumberTypeFloat32: + case kNumberTypeFloat: { + float start = *(float *)(inputs[0]->data_); + float limit = *(float *)(inputs[1]->data_); + float delta = *(float *)(inputs[2]->data_); + shape_size = imax((int)(ceil((float)(limit - start) / delta)), 0); + } break; + default: { + return NNACL_ERR; + } + } + } else { + RangeParameter *param = (RangeParameter *)parameter; + shape_size = ceil((float)(param->limit_ - param->start_) / param->delta_); + } + + output->shape_size_ = 1; + output->shape_[0] = shape_size; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/range_infer.h b/mindspore/lite/nnacl/infer/range_infer.h new file mode 100644 index 0000000000..c52e8cc406 --- /dev/null +++ b/mindspore/lite/nnacl/infer/range_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_RANGE_INFER_H +#define MINDSPORE_LITE_NNACL_RANGE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/range_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int RangeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_RANGE_INFER_H diff --git a/mindspore/lite/nnacl/infer/rank_infer.c b/mindspore/lite/nnacl/infer/rank_infer.c new file mode 100644 index 0000000000..56c53920c8 --- /dev/null +++ b/mindspore/lite/nnacl/infer/rank_infer.c @@ -0,0 +1,34 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/rank_infer.h" + +int RankInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + output->shape_size_ = 1; + output->shape_[0] = 1; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/rank_infer.h b/mindspore/lite/nnacl/infer/rank_infer.h new file mode 100644 index 0000000000..ce162ed35b --- /dev/null +++ b/mindspore/lite/nnacl/infer/rank_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_RANK_INFER_H +#define MINDSPORE_LITE_NNACL_RANK_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int RankInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_RANK_INFER_H diff --git a/mindspore/lite/nnacl/infer/reduce_infer.c b/mindspore/lite/nnacl/infer/reduce_infer.c new file mode 100644 index 0000000000..7d1d744e8b --- /dev/null +++ b/mindspore/lite/nnacl/infer/reduce_infer.c @@ -0,0 +1,101 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/reduce_infer.h" + +int ReduceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullOutputSize(inputs, inputs_size, outputs, outputs_size, parameter, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + if (inputs_size < 1) { + return NNACL_INPUT_TENSOR_ERROR; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + ReduceParameter *param = (ReduceParameter *)parameter; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + bool keep_dims = param->keep_dims_; + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + // get axes from input tensor + const TensorC *axes_input = inputs[1]; + int *axes = (int *)axes_input->data_; + if (axes == NULL) { + return NNACL_NULL_PTR; + } + size_t num_axes; + if (axes_input->shape_size_ == 1) { + num_axes = axes_input->shape_[0]; + } else if (axes_input->shape_size_ == 0) { + num_axes = 1; + } else { + return NNACL_ERR; + } + + int rank = (int)(input->shape_size_); + int actual_axes[MAX_SHAPE_SIZE]; + size_t actual_axes_size = 0; + ShapeSet(actual_axes, &actual_axes_size, axes, num_axes); + + if (param->reduce_to_end_) { + if (num_axes != 1) { + return NNACL_ERR; + } + + int begin_axis; + begin_axis = axes[0] < 0 ? axes[0] + rank : axes[0]; + for (size_t i = begin_axis + 1; i < rank; ++i) { + ShapePush(actual_axes, &actual_axes_size, i); + } + num_axes = rank - begin_axis; + keep_dims = false; + } + // reduce on all axes + if (num_axes == 0) { + if (keep_dims) { + for (size_t i = 0; i < input->shape_size_; i++) { + ShapePush(out_shape, &out_shape_size, 1); + } + } + SetShapeArray(output, out_shape, out_shape_size); + output->data_type_ = input->data_type_; + return NNACL_OK; + } + // reduce on selected axes + for (size_t i = 0; i < input->shape_size_; i++) { + bool reduce_axis = false; + for (size_t idx = 0; idx < num_axes; ++idx) { + if ((size_t)(actual_axes[idx]) == i || (size_t)(actual_axes[idx] + input->shape_size_) == i) { + reduce_axis = true; + break; + } + } + if (reduce_axis) { + if (keep_dims) { + ShapePush(out_shape, &out_shape_size, 1); + } + } else { + ShapePush(out_shape, &out_shape_size, input->shape_[i]); + } + } + SetShapeArray(output, out_shape, out_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/reduce_infer.h b/mindspore/lite/nnacl/infer/reduce_infer.h new file mode 100644 index 0000000000..8bec1eb2ba --- /dev/null +++ b/mindspore/lite/nnacl/infer/reduce_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_REDUCE_INFER_H +#define MINDSPORE_LITE_NNACL_REDUCE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/reduce_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ReduceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_REDUCE_INFER_H diff --git a/mindspore/lite/nnacl/infer/reshape_infer.c b/mindspore/lite/nnacl/infer/reshape_infer.c new file mode 100644 index 0000000000..81516fac1d --- /dev/null +++ b/mindspore/lite/nnacl/infer/reshape_infer.c @@ -0,0 +1,176 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/reshape_infer.h" + +void CalShape(int *data, const TensorC *const *inputs, int *out_shape, size_t *out_shape_size, int shape_size) { + int input_count = GetElementNum(inputs[0]); + int index = 0; + int size = 1; + for (int i = 0; i < shape_size; i++) { + if ((int)(data[i]) == -1) { + index = i; + } else if ((int)(data[i]) == 0) { + size *= inputs[0]->shape_[i]; + } else { + size *= data[i]; + } + ShapePush(out_shape, out_shape_size, data[i]); + } + if ((int)(data[index]) == -1) { + out_shape[index] = input_count / size; + } +} + +int CalNewShape(const TensorC *in_tensor, int *out_shape, size_t out_shape_size) { + size_t in_shape_size = 1; + for (size_t i = 0; i < in_tensor->shape_size_; i++) { + in_shape_size *= in_tensor->shape_[i]; + } + int64_t infer_index = -1; + size_t out_shape_size_new = 1; + for (size_t i = 0; i < out_shape_size; i++) { + if (out_shape[i] == -1) { + if (infer_index == -1) { + infer_index = i; + } else { + return NNACL_ERR; + } + } else if (out_shape[i] < 0) { + return NNACL_ERR; + } else if (out_shape[i] == 0) { + if (GetElementNum(in_tensor) != 0) { + out_shape[i] = in_tensor->shape_[i]; + out_shape_size_new *= out_shape[i]; + } else { + out_shape_size_new = 0; + break; + } + } else { + out_shape_size_new *= out_shape[i]; + } + } + if (infer_index == -1 && out_shape_size_new != in_shape_size) { + return NNACL_ERR; + } + if (infer_index != -1) { + out_shape[infer_index] = in_shape_size / out_shape_size_new; + } + return NNACL_OK; +} + +int CalShapeByType(const TensorC *const *inputs, size_t shape_size, int *out_shape, size_t *out_shape_size) { + const TensorC *shape_tensor = inputs[1]; + switch (shape_tensor->data_type_) { + case kNumberTypeInt8: { + int8_t *data = (int8_t *)(shape_tensor->data_); + int *data_int = (int *)malloc(sizeof(int) * shape_size); + for (size_t i = 0; i < shape_size; i++) { + data_int[i] = data[i]; + } + CalShape(data_int, inputs, out_shape, out_shape_size, shape_size); + free(data_int); + } break; + case kNumberTypeInt32: { + int32_t *data = (int32_t *)(shape_tensor->data_); + int *data_int = (int *)malloc(sizeof(int) * shape_size); + for (size_t i = 0; i < shape_size; i++) { + data_int[i] = data[i]; + } + CalShape(data_int, inputs, out_shape, out_shape_size, shape_size); + free(data_int); + } break; + case kNumberTypeInt64: { + int64_t *data = (int64_t *)(shape_tensor->data_); + int *data_int = (int *)malloc(sizeof(int) * shape_size); + for (size_t i = 0; i < shape_size; i++) { + data_int[i] = data[i]; + } + CalShape(data_int, inputs, out_shape, out_shape_size, shape_size); + free(data_int); + } break; + case kNumberTypeFloat: { + float *data = (float *)(shape_tensor->data_); + int *data_int = (int *)malloc(sizeof(int) * shape_size); + for (size_t i = 0; i < shape_size; i++) { + data_int[i] = data[i]; + } + CalShape(data_int, inputs, out_shape, out_shape_size, shape_size); + free(data_int); + } break; + case kNumberTypeUInt32: { + uint32_t *data = (uint32_t *)(shape_tensor->data_); + int *data_int = (int *)malloc(sizeof(int) * shape_size); + for (size_t i = 0; i < shape_size; i++) { + data_int[i] = data[i]; + } + CalShape(data_int, inputs, out_shape, out_shape_size, shape_size); + free(data_int); + } break; + default: { + return NNACL_ERR; + } + } + return NNACL_OK; +} + +int ReshapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + ReshapeParameter *param = (ReshapeParameter *)parameter; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + if (inputs_size == 2) { + const TensorC *shape_tensor = inputs[1]; + if (GetElementNum(input) == 1 && input->shape_size_ == 0) { + if (shape_tensor->data_ == NULL || (shape_tensor->shape_size_ == 1 && shape_tensor->shape_[0] == 0)) { + SetShapeArray(output, out_shape, out_shape_size); + return NNACL_OK; + } + } + + if (shape_tensor->data_ == NULL) { + return NNACL_INFER_INVALID; + } + size_t shape_size = GetElementNum(shape_tensor); + int calRet = CalShapeByType(inputs, shape_size, out_shape, &out_shape_size); + if (calRet != NNACL_OK) { + return calRet; + } + } else if (inputs_size == 1) { + for (size_t i = 0; i < param->shape_dim_; ++i) { + ShapePush(out_shape, &out_shape_size, param->shape_[i]); + } + } else { + return NNACL_ERR; + } + int ret = CalNewShape(inputs[0], out_shape, out_shape_size); + if (ret != NNACL_OK) { + return ret; + } + SetShapeArray(output, out_shape, out_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/reshape_infer.h b/mindspore/lite/nnacl/infer/reshape_infer.h new file mode 100644 index 0000000000..adc01b9dac --- /dev/null +++ b/mindspore/lite/nnacl/infer/reshape_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_RESHAPE_INFER_H +#define MINDSPORE_LITE_NNACL_RESHAPE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/reshape_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ReshapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_RESHAPE_INFER_H diff --git a/mindspore/lite/nnacl/infer/resize_infer.c b/mindspore/lite/nnacl/infer/resize_infer.c new file mode 100644 index 0000000000..71c077af70 --- /dev/null +++ b/mindspore/lite/nnacl/infer/resize_infer.c @@ -0,0 +1,137 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/resize_infer.h" + +int CalculateNewHeightAndWidth(const TensorC *const *inputs, size_t inputs_size, ResizeParameter *param) { + const TensorC *input = inputs[0]; + if (inputs_size == 2) { + const TensorC *shape_tensor = inputs[1]; + if (shape_tensor->data_ == NULL) { + return NNACL_INFER_INVALID; + } + size_t shape_size = GetElementNum(shape_tensor); + switch (shape_size) { + case 4: { + if (shape_tensor->data_type_ == kNumberTypeInt32) { + int32_t *data = (int32_t *)(shape_tensor->data_); + if (data == NULL) { + return NNACL_INFER_INVALID; + } + switch (shape_tensor->format_) { + case Format_NCHW: + param->new_height_ = data[2]; + param->new_width_ = data[3]; + break; + case Format_NHWC: + param->new_height_ = data[1]; + param->new_width_ = data[2]; + break; + default: + return NNACL_INFER_INVALID; + } + } else if (shape_tensor->data_type_ == kNumberTypeFloat32) { + float *data = (float *)(shape_tensor->data_); + if (data == NULL) { + return NNACL_INFER_INVALID; + } + switch (shape_tensor->format_) { + case Format_NCHW: + param->new_height_ = data[2] * GetHeight(input); + param->new_width_ = data[3] * GetWidth(input); + break; + case Format_NHWC: + param->new_height_ = data[1] * GetHeight(input); + param->new_width_ = data[2] * GetWidth(input); + break; + default: + return NNACL_INFER_INVALID; + } + } + break; + } + case 2: { + int32_t *data = (int32_t *)(shape_tensor->data_); + if (data == NULL) { + return NNACL_INFER_INVALID; + } + param->new_height_ = data[0]; + param->new_width_ = data[1]; + break; + } + case 1: { + // caffe zoom_factor + int scale; + if (shape_tensor->data_type_ == kNumberTypeInt32) { + int *data = (int *)(shape_tensor->data_); + if (data == NULL) { + return NNACL_INFER_INVALID; + } + scale = data[0]; + } else { + return NNACL_ERR; + } + param->new_height_ = GetHeight(input) + (GetHeight(input) - 1) * (scale - 1); + param->new_width_ = GetWidth(input) + (GetWidth(input) - 1) * (scale - 1); + break; + } + default: { + return NNACL_ERR; + } + } + } else if (inputs_size == 1) { + } else if (inputs_size == 4) { + if (inputs[3]->data_ == NULL) { + return NNACL_INFER_INVALID; + } + param->new_height_ = ((int *)(inputs[3]->data_))[0]; + param->new_width_ = ((int *)(inputs[3]->data_))[1]; + } else { + return NNACL_ERR; + } + return NNACL_OK; +} + +int ResizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + if (input->shape_size_ != 0 && input->shape_size_ != 4) { + return NNACL_ERR; + } + TensorC *output = outputs[0]; + + SetDataTypeFormat(output, input); + ResizeParameter *param = (ResizeParameter *)parameter; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + ShapePush(output_shape, &output_shape_size, GetBatch(input)); + int ret = CalculateNewHeightAndWidth(inputs, inputs_size, param); + if (ret == NNACL_OK) { + ShapePush(output_shape, &output_shape_size, param->new_height_); + ShapePush(output_shape, &output_shape_size, param->new_width_); + ShapePush(output_shape, &output_shape_size, GetChannel(input)); + SetShapeArray(output, output_shape, output_shape_size); + } + return ret; +} diff --git a/mindspore/lite/nnacl/infer/resize_infer.h b/mindspore/lite/nnacl/infer/resize_infer.h new file mode 100644 index 0000000000..50ad390ab6 --- /dev/null +++ b/mindspore/lite/nnacl/infer/resize_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_RESIZE_INFER_H +#define MINDSPORE_LITE_NNACL_RESIZE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/resize_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ResizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_RESIZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/rfft_infer.c b/mindspore/lite/nnacl/infer/rfft_infer.c new file mode 100644 index 0000000000..093ff691de --- /dev/null +++ b/mindspore/lite/nnacl/infer/rfft_infer.c @@ -0,0 +1,36 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/rfft_infer.h" +int RfftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + output->data_type_ = kNumberTypeComplex64; + output->format_ = input->format_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + ShapeSet(output->shape_, &(output->shape_size_), input->shape_, input->shape_size_); + RfftParameter *param = (RfftParameter *)parameter; + output->shape_[input->shape_size_ - 1] = param->fft_length_ / 2 + 1; + ShapePush(output->shape_, &(output->shape_size_), 2); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/rfft_infer.h b/mindspore/lite/nnacl/infer/rfft_infer.h new file mode 100644 index 0000000000..c430cb342b --- /dev/null +++ b/mindspore/lite/nnacl/infer/rfft_infer.h @@ -0,0 +1,36 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_RFFT_INFER_H +#define MINDSPORE_LITE_NNACL_RFFT_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct RfftParameter { + OpParameter op_parameter_; + int fft_length_; +} RfftParameter; + +int RfftInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_RFFT_INFER_H diff --git a/mindspore/lite/nnacl/infer/roi_pooling_infer.c b/mindspore/lite/nnacl/infer/roi_pooling_infer.c new file mode 100644 index 0000000000..488364771d --- /dev/null +++ b/mindspore/lite/nnacl/infer/roi_pooling_infer.c @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/roi_pooling_infer.h" + +int ROIPoolingInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + if (inputs_size != 2) { + return NNACL_INPUT_TENSOR_ERROR; + } + const TensorC *input = inputs[0]; + const TensorC *roi = inputs[1]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + ROIPoolingParameter *param = (ROIPoolingParameter *)parameter; + output->shape_size_ = 4; + output->shape_[0] = roi->shape_[0]; + output->shape_[1] = param->pooledH_; + output->shape_[2] = param->pooledW_; + output->shape_[3] = GetChannel(input); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/roi_pooling_infer.h b/mindspore/lite/nnacl/infer/roi_pooling_infer.h new file mode 100644 index 0000000000..7fb99468c0 --- /dev/null +++ b/mindspore/lite/nnacl/infer/roi_pooling_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_ROI_POOLING_INFER_H +#define MINDSPORE_LITE_NNACL_ROI_POOLING_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/roi_pooling_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ROIPoolingInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_ROI_POOLING_INFER_H diff --git a/mindspore/lite/nnacl/infer/scatter_nd_infer.c b/mindspore/lite/nnacl/infer/scatter_nd_infer.c new file mode 100644 index 0000000000..f40edf5320 --- /dev/null +++ b/mindspore/lite/nnacl/infer/scatter_nd_infer.c @@ -0,0 +1,40 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/scatter_nd_infer.h" + +int ScatterNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 3, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *shape = inputs[0]; + if (shape->data_ == NULL) { + return NNACL_INFER_INVALID; + } + const TensorC *update = inputs[2]; + TensorC *output = outputs[0]; + + SetDataTypeFormat(output, update); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int *shape_data = (int *)(shape->data_); + SetShapeArray(output, shape_data, GetElementNum(shape)); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/scatter_nd_infer.h b/mindspore/lite/nnacl/infer/scatter_nd_infer.h new file mode 100644 index 0000000000..5ee5acdaad --- /dev/null +++ b/mindspore/lite/nnacl/infer/scatter_nd_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SCATTER_ND_INFER_H +#define MINDSPORE_LITE_NNACL_SCATTER_ND_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/softmax_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ScatterNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SCATTER_ND_INFER_H diff --git a/mindspore/lite/nnacl/infer/select_infer.c b/mindspore/lite/nnacl/infer/select_infer.c new file mode 100644 index 0000000000..311af22f7d --- /dev/null +++ b/mindspore/lite/nnacl/infer/select_infer.c @@ -0,0 +1,53 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/select_infer.h" +#include <string.h> + +int SelectInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = + CheckAugmentNullInputSize(inputs, inputs_size, outputs, outputs_size, parameter, 2 * outputs_size + 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + for (size_t i = 0; i < outputs_size; i++) { + const TensorC *input = inputs[i + 1]; + TensorC *output = outputs[i]; + SetDataTypeFormat(output, input); + if (input->data_type_ == kObjectTypeTensorType) { + TensorListC *input_tensorlist = (TensorListC *)(input); + TensorListC *output_tensorlist = (TensorListC *)(output); + output_tensorlist->element_shape_size_ = input_tensorlist->element_shape_size_; + for (size_t j = 0; j < input_tensorlist->element_shape_size_; j++) { + output_tensorlist->element_shape_[j] = input_tensorlist->element_shape_[j]; + } + output_tensorlist->max_elements_num_ = input_tensorlist->max_elements_num_; + output_tensorlist->tensors_data_type_ = input_tensorlist->tensors_data_type_; + output_tensorlist->element_num_ = input_tensorlist->element_num_; + + for (size_t j = 0; j < output_tensorlist->element_num_; j++) { + memcpy(output_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC)); + } + } else { + SetShapeTensor(output, input); + } + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/select_infer.h b/mindspore/lite/nnacl/infer/select_infer.h new file mode 100644 index 0000000000..1b95ebf830 --- /dev/null +++ b/mindspore/lite/nnacl/infer/select_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SELECT_INFER_H +#define MINDSPORE_LITE_NNACL_SELECT_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SelectInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SELECT_INFER_H diff --git a/mindspore/lite/nnacl/infer/sgd_infer.c b/mindspore/lite/nnacl/infer/sgd_infer.c new file mode 100644 index 0000000000..71da6844d2 --- /dev/null +++ b/mindspore/lite/nnacl/infer/sgd_infer.c @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/sgd_infer.h" + +int SgdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullInputSize(inputs, inputs_size, outputs, outputs_size, parameter, 6); + if (check_ret != NNACL_OK) { + return check_ret; + } + + if (GetElementNum(inputs[0]) != GetElementNum(inputs[1]) || GetElementNum(inputs[0]) != GetElementNum(inputs[3]) || + GetElementNum(inputs[2]) != 1 || GetElementNum(inputs[4]) != 1) { + return NNACL_INPUT_TENSOR_ERROR; + } + if (outputs_size != 0) { + TensorC *out = outputs[0]; + SetDataTypeFormat(out, inputs[0]); + out->shape_size_ = 1; + out->shape_[0] = 1; + } + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/sgd_infer.h b/mindspore/lite/nnacl/infer/sgd_infer.h new file mode 100644 index 0000000000..8d47efdcda --- /dev/null +++ b/mindspore/lite/nnacl/infer/sgd_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SGD_INFER_H +#define MINDSPORE_LITE_NNACL_SGD_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SgdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SGD_INFER_H diff --git a/mindspore/lite/nnacl/infer/shape_infer.c b/mindspore/lite/nnacl/infer/shape_infer.c new file mode 100644 index 0000000000..b2110b4a8c --- /dev/null +++ b/mindspore/lite/nnacl/infer/shape_infer.c @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/shape_infer.h" + +int ShapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *in_tensor = inputs[0]; + TensorC *out_tensor = outputs[0]; + + out_tensor->data_type_ = kNumberTypeInt32; + out_tensor->format_ = in_tensor->format_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + out_tensor->shape_size_ = 1; + out_tensor->shape_[0] = (int)(in_tensor->shape_size_); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/shape_infer.h b/mindspore/lite/nnacl/infer/shape_infer.h new file mode 100644 index 0000000000..30be218bc6 --- /dev/null +++ b/mindspore/lite/nnacl/infer/shape_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SHAPE_INFER_H +#define MINDSPORE_LITE_NNACL_SHAPE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int ShapeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SHAPE_INFER_H diff --git a/mindspore/lite/nnacl/infer/size_infer.c b/mindspore/lite/nnacl/infer/size_infer.c new file mode 100644 index 0000000000..f110ce0014 --- /dev/null +++ b/mindspore/lite/nnacl/infer/size_infer.c @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/size_infer.h" + +int SizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *in_tensor = inputs[0]; + TensorC *out_tensor = outputs[0]; + out_tensor->data_type_ = kNumberTypeInt32; + out_tensor->format_ = in_tensor->format_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + out_tensor->shape_size_ = 1; + out_tensor->shape_[0] = 1; + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/size_infer.h b/mindspore/lite/nnacl/infer/size_infer.h new file mode 100644 index 0000000000..b299c0e1ad --- /dev/null +++ b/mindspore/lite/nnacl/infer/size_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SIZE_INFER_H +#define MINDSPORE_LITE_NNACL_SIZE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SizeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SIZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/skip_gram_infer.c b/mindspore/lite/nnacl/infer/skip_gram_infer.c new file mode 100644 index 0000000000..5517abfd80 --- /dev/null +++ b/mindspore/lite/nnacl/infer/skip_gram_infer.c @@ -0,0 +1,34 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/skip_gram_infer.h" + +int SkipGramInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + + SetDataTypeFormat(output, input); + if (input->data_ == NULL) { + return NNACL_INFER_INVALID; + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/skip_gram_infer.h b/mindspore/lite/nnacl/infer/skip_gram_infer.h new file mode 100644 index 0000000000..6b54fc1c9a --- /dev/null +++ b/mindspore/lite/nnacl/infer/skip_gram_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SKIP_GRAM_INFER_H +#define MINDSPORE_LITE_NNACL_SKIP_GRAM_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SkipGramInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SKIP_GRAM_INFER_H diff --git a/mindspore/lite/nnacl/infer/slice_infer.c b/mindspore/lite/nnacl/infer/slice_infer.c new file mode 100644 index 0000000000..390902d83f --- /dev/null +++ b/mindspore/lite/nnacl/infer/slice_infer.c @@ -0,0 +1,81 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/slice_infer.h" + +int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + if (inputs_size < 1 || outputs_size != 1) { + return NNACL_PARAM_INVALID; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + SliceParameter *param = (SliceParameter *)parameter; + param->param_length_ = input->shape_size_; + output->shape_size_ = input->shape_size_; + + /* init begin parameter */ + size_t slice_begin_size = GetElementNum(inputs[1]); + int *begin_ptr = (int *)(inputs[1]->data_); + if (slice_begin_size != param->param_length_ || begin_ptr == NULL) { + return NNACL_INFER_INVALID; + } + for (int i = 0; i < slice_begin_size; i++) { + param->begin_[i] = begin_ptr[i]; + } + + /* init size parameter */ + size_t slice_size_size = GetElementNum(inputs[2]); + int *size_ptr = (int *)(inputs[2]->data_); + if (slice_size_size != param->param_length_ || size_ptr == NULL) { + return NNACL_INFER_INVALID; + } + for (int i = 0; i < slice_size_size; i++) { + param->size_[i] = size_ptr[i]; + } + + /* infer output shape information */ + int begin[MAX_SHAPE_SIZE]; + int size[MAX_SHAPE_SIZE]; + for (size_t i = 0; i < param->param_length_; ++i) { + begin[param->axis_[i]] = param->begin_[i]; + size[param->axis_[i]] = param->size_[i]; + } + + for (size_t i = 0; i < param->param_length_; ++i) { + if (size[i] < 0 && size[i] != -1) { + return NNACL_PARAM_INVALID; + } + if (begin[i] < 0) { + return NNACL_PARAM_INVALID; + } + if (input->shape_[i] <= begin[i]) { + return NNACL_PARAM_INVALID; + } + if (size[i] > (input->shape_[i] - begin[i])) { + return NNACL_PARAM_INVALID; + } + + output->shape_[i] = size[i] < 0 ? input->shape_[i] - begin[i] : size[i]; + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/slice_infer.h b/mindspore/lite/nnacl/infer/slice_infer.h new file mode 100644 index 0000000000..0aa3f79ce3 --- /dev/null +++ b/mindspore/lite/nnacl/infer/slice_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SLICE_INFER_H +#define MINDSPORE_LITE_NNACL_SLICE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/slice_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SLICE_INFER_H diff --git a/mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.c b/mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.c new file mode 100644 index 0000000000..5b78c4d102 --- /dev/null +++ b/mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.c @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/softmax_cross_entropy_infer.h" + +int SoftmaxCrossEntropyInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + if (1 > outputs_size) { + return NNACL_INPUT_TENSOR_ERROR; + } + const TensorC *in0 = inputs[0]; + TensorC *out = outputs[0]; + + out->shape_size_ = 2; + out->shape_[0] = in0->shape_[0]; + out->shape_[1] = 1; + SetDataTypeFormat(out, in0); + + if (1 < outputs_size) { + TensorC *grads = outputs[1]; + SetShapeTensor(grads, in0); + SetDataTypeFormat(grads, in0); + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.h b/mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.h new file mode 100644 index 0000000000..b66aa8d7ef --- /dev/null +++ b/mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SOFTMAX_CROSS_ENTROPY_INFER_H +#define MINDSPORE_LITE_NNACL_SOFTMAX_CROSS_ENTROPY_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SoftmaxCrossEntropyInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SOFTMAX_ENTROPY_INFER_H diff --git a/mindspore/lite/nnacl/infer/softmax_infer.c b/mindspore/lite/nnacl/infer/softmax_infer.c new file mode 100644 index 0000000000..39fd8ff999 --- /dev/null +++ b/mindspore/lite/nnacl/infer/softmax_infer.c @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/softmax_infer.h" + +int SoftMaxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + + output->data_type_ = input->data_type_; + output->format_ = input->format_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + if (input->shape_size_ > 5) { + return NNACL_ERR; + } + SetShapeTensor(output, input); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/softmax_infer.h b/mindspore/lite/nnacl/infer/softmax_infer.h new file mode 100644 index 0000000000..ba22743fea --- /dev/null +++ b/mindspore/lite/nnacl/infer/softmax_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SOFTMAX_INFER_H +#define MINDSPORE_LITE_NNACL_SOFTMAX_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/softmax_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SoftMaxInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SOFTMAX_INFER_H diff --git a/mindspore/lite/nnacl/infer/space_to_batch_infer.c b/mindspore/lite/nnacl/infer/space_to_batch_infer.c new file mode 100644 index 0000000000..0eb146052b --- /dev/null +++ b/mindspore/lite/nnacl/infer/space_to_batch_infer.c @@ -0,0 +1,57 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/space_to_batch_infer.h" + +int SpaceToBatchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *input = inputs[0]; + if (input->format_ != Format_NHWC) { + return NNACL_ERR; + } + SetDataTypeFormat(outputs[0], input); + SpaceToBatchParameter *param = (SpaceToBatchParameter *)parameter; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + if (input->shape_size_ != 4) { + return NNACL_ERR; + } + + int *block_shape = param->block_sizes_; + size_t block_shape_size = param->m_; + int *paddings = param->paddings_; + int padding_left = 0; + int padding_right = 0; + int block_w = 1; + if (block_shape_size == 2) { + padding_left = paddings[2]; + padding_right = paddings[3]; + block_w = block_shape[1]; + } + + outputs[0]->shape_[kNHWC_N] = input->shape_[kNHWC_N] * (block_shape[0] * block_w); + outputs[0]->shape_[kNHWC_H] = (input->shape_[kNHWC_H] + paddings[0] + paddings[1]) / block_shape[0]; + outputs[0]->shape_[kNHWC_W] = (input->shape_[kNHWC_W] + padding_left + padding_right) / block_w; + outputs[0]->shape_[kNHWC_C] = input->shape_[kNHWC_C]; + outputs[0]->shape_size_ = input->shape_size_; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/space_to_batch_infer.h b/mindspore/lite/nnacl/infer/space_to_batch_infer.h new file mode 100644 index 0000000000..e6e8743222 --- /dev/null +++ b/mindspore/lite/nnacl/infer/space_to_batch_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SPACE_TO_BATCH_INFER_H +#define MINDSPORE_LITE_NNACL_SPACE_TO_BATCH_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/space_to_batch_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SpaceToBatchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SPACE_TO_BATCH_INFER_H diff --git a/mindspore/lite/nnacl/infer/space_to_batch_nd_infer.c b/mindspore/lite/nnacl/infer/space_to_batch_nd_infer.c new file mode 100644 index 0000000000..0cc350a96c --- /dev/null +++ b/mindspore/lite/nnacl/infer/space_to_batch_nd_infer.c @@ -0,0 +1,132 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/space_to_batch_nd_infer.h" +#include <limits.h> + +int SpaceSetOutputShapeFromParam(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + const TensorC *input = inputs[0]; + if (input->shape_size_ != 4) { + return NNACL_ERR; + } + SpaceToBatchParameter *param = (SpaceToBatchParameter *)parameter; + int *block_shape = param->block_sizes_; + size_t block_shape_size = param->m_; + int *padding = param->paddings_; + int padding_left = 0; + int padding_right = 0; + int block_w = 1; + if (block_shape_size == 2) { + padding_left = padding[2]; + padding_right = padding[3]; + block_w = block_shape[1]; + } + if (block_shape[0] * block_w > INT_MAX / input->shape_[kNHWC_N]) { + return NNACL_ERR; + } + outputs[0]->shape_[kNHWC_N] = input->shape_[kNHWC_N] * block_shape[0] * block_w; + if (padding[0] + padding[1] > INT_MAX - input->shape_[kNHWC_H]) { + return NNACL_ERR; + } + outputs[0]->shape_[kNHWC_H] = (input->shape_[kNHWC_H] + padding[0] + padding[1]) / block_shape[0]; + if (padding_left + padding_right > INT_MAX - input->shape_[kNHWC_W]) { + return NNACL_ERR; + } + outputs[0]->shape_[kNHWC_W] = (input->shape_[kNHWC_W] + padding_left + padding_right) / block_w; + if (input->shape_size_ > 3) { + outputs[0]->shape_[kNHWC_C] = input->shape_[kNHWC_C]; + } + outputs[0]->shape_size_ = input->shape_size_; + return NNACL_OK; +} + +int SpaceSetOutputShapeFromInput(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + const TensorC *input = inputs[0]; + if (input->shape_size_ != 4) { + return NNACL_ERR; + } + if (GetElementNum(inputs[2]) != 4) { + return NNACL_ERR; + } + int *block_shape = (int *)(inputs[1]->data_); + int *padding = (int *)(inputs[2]->data_); + int padding_left = 0; + int padding_right = 0; + int block_w = 1; + if (GetElementNum(inputs[1]) == 2) { + padding_left = padding[2]; + padding_right = padding[3]; + block_w = block_shape[1]; + } + int32_t output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = input->shape_size_; + if (block_shape[0] * block_w > INT_MAX / input->shape_[kNHWC_N]) { + return NNACL_ERR; + } + output_shape[kNHWC_N] = input->shape_[kNHWC_N] * block_shape[0] * block_w; + if (padding[0] + padding[1] > INT_MAX - input->shape_[kNHWC_H]) { + return NNACL_ERR; + } + output_shape[kNHWC_H] = (input->shape_[kNHWC_H] + padding[0] + padding[1]) / block_shape[0]; + if (padding_left + padding_right > INT_MAX - input->shape_[kNHWC_W]) { + return NNACL_ERR; + } + output_shape[kNHWC_W] = (input->shape_[kNHWC_W] + padding_left + padding_right) / block_w; + if (input->shape_size_ > 3) { + output_shape[kNHWC_C] = input->shape_[kNHWC_C]; + } + SetShapeArray(outputs[0], output_shape, output_shape_size); + return NNACL_OK; +} + +int SpaceToBatchNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + if (outputs_size != 1 || (inputs_size != 1 && inputs_size != 3)) { + return 1; + } + + const TensorC *input = inputs[0]; + if (input->format_ != Format_NHWC) { + return NNACL_ERR; + } + outputs[0]->data_type_ = input->data_type_; + outputs[0]->format_ = input->format_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + if (inputs_size == 1) { + int ret = SpaceSetOutputShapeFromParam(inputs, inputs_size, outputs, outputs_size, parameter); + if (ret != NNACL_OK) { + return ret; + } + } + if (inputs_size == 3) { + if (inputs[0]->data_ == NULL) { + return NNACL_INFER_INVALID; + } + if (inputs[1]->data_ == NULL || inputs[2]->data_ == NULL) { + return NNACL_ERR; + } + int ret = SpaceSetOutputShapeFromInput(inputs, inputs_size, outputs, outputs_size, parameter); + if (ret != NNACL_OK) { + return ret; + } + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/space_to_batch_nd_infer.h b/mindspore/lite/nnacl/infer/space_to_batch_nd_infer.h new file mode 100644 index 0000000000..c8bc25e2c4 --- /dev/null +++ b/mindspore/lite/nnacl/infer/space_to_batch_nd_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SPACE_TO_BATCH_ND_INFER_H +#define MINDSPORE_LITE_NNACL_SPACE_TO_BATCH_ND_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/space_to_batch_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SpaceToBatchNdInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SPACE_TO_BATCH_ND_INFER_H diff --git a/mindspore/lite/nnacl/infer/space_to_depth_infer.c b/mindspore/lite/nnacl/infer/space_to_depth_infer.c new file mode 100644 index 0000000000..7baaaaade9 --- /dev/null +++ b/mindspore/lite/nnacl/infer/space_to_depth_infer.c @@ -0,0 +1,56 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/space_to_depth_infer.h" +#include <limits.h> + +int SpaceToDepthInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + if (outputs_size != 1 || inputs_size != 1) { + return NNACL_ERR; + } + + const TensorC *input = inputs[0]; + if (input->format_ != Format_NHWC) { + return NNACL_ERR; + } + SetDataTypeFormat(outputs[0], input); + SpaceToDepthParameter *param = (SpaceToDepthParameter *)parameter; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + if (input->shape_size_ != 4) { + return NNACL_ERR; + } + + int32_t block_size = param->block_size_; + if (block_size == 0) { + return NNACL_ERR; + } + if (input->shape_[kNHWC_H] % block_size != 0 || input->shape_[kNHWC_H] == 0 || + input->shape_[kNHWC_W] % block_size != 0 || input->shape_[kNHWC_W] == 0) { + return NNACL_ERR; + } + outputs[0]->shape_[kNHWC_N] = input->shape_[kNHWC_N]; + outputs[0]->shape_[kNHWC_H] = input->shape_[kNHWC_H] / block_size; + outputs[0]->shape_[kNHWC_W] = input->shape_[kNHWC_W] / block_size; + if (block_size * block_size > INT_MAX / input->shape_[kNHWC_C]) { + return NNACL_ERR; + } + outputs[0]->shape_[kNHWC_C] = input->shape_[kNHWC_C] * (block_size * block_size); + outputs[0]->shape_size_ = input->shape_size_; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/space_to_depth_infer.h b/mindspore/lite/nnacl/infer/space_to_depth_infer.h new file mode 100644 index 0000000000..65dfefdd2e --- /dev/null +++ b/mindspore/lite/nnacl/infer/space_to_depth_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SPACE_TO_DEPTH_INFER_H +#define MINDSPORE_LITE_NNACL_SPACE_TO_DEPTH_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/space_to_depth_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SpaceToDepthInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SPACE_TO_DEPTH_INFER_H diff --git a/mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_infer.c b/mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_infer.c new file mode 100644 index 0000000000..5d8b7991b7 --- /dev/null +++ b/mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_infer.c @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/sparse_softmax_cross_entropy_infer.h" + +int SparseSoftmaxCrossEntropyInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 2, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *in0 = inputs[0]; + TensorC *out = outputs[0]; + + SparseSoftmaxCrossEntropyParameter *param = (SparseSoftmaxCrossEntropyParameter *)parameter; + if (param->is_grad_ != 0) { + SetShapeTensor(out, in0); + SetDataTypeFormat(out, in0); + } else { + out->shape_size_ = 1; + out->shape_[0] = 1; + SetDataTypeFormat(out, in0); + } + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_infer.h b/mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_infer.h new file mode 100644 index 0000000000..56322e3533 --- /dev/null +++ b/mindspore/lite/nnacl/infer/sparse_softmax_cross_entropy_infer.h @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SPARSE_SOFTMAX_CROSS_ENTROPY_INFER_H +#define MINDSPORE_LITE_NNACL_SPARSE_SOFTMAX_CROSS_ENTROPY_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/softmax_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct SparseSoftmaxCrossEntropyParameter { + OpParameter op_parameter_; + bool is_grad_; +} SparseSoftmaxCrossEntropyParameter; + +int SparseSoftmaxCrossEntropyInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SPARSE_SOFTMAX_CROSS_ENTROPY_INFER_H diff --git a/mindspore/lite/nnacl/infer/sparse_to_dense_infer.c b/mindspore/lite/nnacl/infer/sparse_to_dense_infer.c new file mode 100644 index 0000000000..486df3ef19 --- /dev/null +++ b/mindspore/lite/nnacl/infer/sparse_to_dense_infer.c @@ -0,0 +1,40 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/sparse_to_dense_infer.h" + +int SparseToDenseInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + TensorC *output = outputs[0]; + const TensorC *input1 = inputs[1]; + const TensorC *input2 = inputs[2]; + SetDataTypeFormat(output, input2); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int *input1_data = (int *)(input1->data_); + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + for (int i = 0; i < GetElementNum(input1); i++) { + ShapePush(output_shape, &output_shape_size, input1_data[i]); + } + SetShapeArray(output, output_shape, output_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/sparse_to_dense_infer.h b/mindspore/lite/nnacl/infer/sparse_to_dense_infer.h new file mode 100644 index 0000000000..1e274247e2 --- /dev/null +++ b/mindspore/lite/nnacl/infer/sparse_to_dense_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SPACE_TO_DENSE_INFER_H +#define MINDSPORE_LITE_NNACL_SPACE_TO_DENSE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SparseToDenseInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SPACE_TO_DENSE_INFER_H diff --git a/mindspore/lite/nnacl/infer/splice_infer.c b/mindspore/lite/nnacl/infer/splice_infer.c new file mode 100644 index 0000000000..74468f743b --- /dev/null +++ b/mindspore/lite/nnacl/infer/splice_infer.c @@ -0,0 +1,58 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/splice_infer.h" + +int SpliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret == NNACL_NULL_PTR) { + return NNACL_NULL_PTR; + } + if (inputs_size != 1) { + return NNACL_ERR; + } + if (outputs_size != 1) { + return NNACL_ERR; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + size_t max_dims = input->shape_size_; + size_t max_dims_idx = 0; + + // determine max_dims + for (size_t i = 1; i < inputs_size; ++i) { + if (inputs[i]->shape_size_ > max_dims) { + max_dims = inputs[i]->shape_size_; + max_dims_idx = i; + } + } + SpliceParameter *param = (SpliceParameter *)parameter; + if (param == NULL) { + return NNACL_NULL_PTR; + } + int context_size = param->context_dim_; + int out_dim = param->output_dim_; + ShapeSet(output->shape_, &output->shape_size_, inputs[max_dims_idx]->shape_, inputs[max_dims_idx]->shape_size_); + output->shape_[1] = input->shape_[1] - context_size + 1; + output->shape_[2] = out_dim; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/splice_infer.h b/mindspore/lite/nnacl/infer/splice_infer.h new file mode 100644 index 0000000000..bb7584cc19 --- /dev/null +++ b/mindspore/lite/nnacl/infer/splice_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2019-2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_INFER_SPLICE_INFER_H_ +#define MINDSPORE_LITE_NNACL_INFER_SPLICE_INFER_H_ +#include "nnacl/infer/common_infer.h" +#include "nnacl/splice_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SpliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_INFER_SPLICE_INFER_H_ diff --git a/mindspore/lite/nnacl/infer/split_infer.c b/mindspore/lite/nnacl/infer/split_infer.c new file mode 100644 index 0000000000..8a4f139325 --- /dev/null +++ b/mindspore/lite/nnacl/infer/split_infer.c @@ -0,0 +1,77 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/split_infer.h" + +int SplitInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + if (inputs_size < 1) { + return NNACL_ERR; + } + if (outputs_size == 0) { + return NNACL_ERR; + } + for (size_t i = 0; i < outputs_size; i++) { + SetDataTypeFormat(outputs[i], input); + } + + SplitParameter *param = (SplitParameter *)parameter; + + size_t num_split_ = param->num_split_ == 0 ? (int)(outputs_size) : param->num_split_; + param->num_split_ = num_split_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + size_t split_dim = param->split_dim_ < 0 ? input->shape_size_ + param->split_dim_ : param->split_dim_; + if (split_dim > input->shape_size_) { + return NNACL_ERR; + } + if ((int)(outputs_size) != num_split_) { + return NNACL_ERR; + } + if (param->split_count_ == 0) { + if (input->shape_[split_dim] % num_split_ != 0) { + return NNACL_ERR; + } + for (int i = 0; i < num_split_; ++i) { + param->split_sizes_[i] = input->shape_[split_dim] / num_split_; + } + } + for (int i = 0; i < num_split_; ++i) { + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + ShapeSet(output_shape, &output_shape_size, input->shape_, input->shape_size_); + int split_dim_i = input->shape_[split_dim]; + if (i == num_split_ - 1 && param->split_sizes_[i] == -1) { + for (size_t j = 0; j < param->num_split_ - 1; ++j) { + split_dim_i -= param->split_sizes_[j]; + } + param->split_sizes_[i] = split_dim_i; + } else { + split_dim_i = param->split_sizes_[i]; + } + output_shape[split_dim] = split_dim_i; + SetShapeArray(outputs[i], output_shape, output_shape_size); + SetDataTypeFormat(outputs[i], input); + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/split_infer.h b/mindspore/lite/nnacl/infer/split_infer.h new file mode 100644 index 0000000000..7745fd26cb --- /dev/null +++ b/mindspore/lite/nnacl/infer/split_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SPLIT_INFER_H +#define MINDSPORE_LITE_NNACL_SPLIT_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/split_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SplitInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SPLIT_INFER_H diff --git a/mindspore/lite/nnacl/infer/squeeze_infer.c b/mindspore/lite/nnacl/infer/squeeze_infer.c new file mode 100644 index 0000000000..2ed2f52eaa --- /dev/null +++ b/mindspore/lite/nnacl/infer/squeeze_infer.c @@ -0,0 +1,59 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/squeeze_infer.h" + +int SqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *input = inputs[0]; + SqueezeParameter *param = (SqueezeParameter *)parameter; + SetDataTypeFormat(outputs[0], input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + + for (size_t i = 0; i < param->axis_size_; i++) { + param->axis_[i] = param->axis_[i] >= 0 ? param->axis_[i] : param->axis_[i] + input->shape_size_; + } + + if (param->axis_size_ == 0) { + for (size_t i = 0; i < input->shape_size_; i++) { + if (input->shape_[i] != 1) { + ShapePush(out_shape, &out_shape_size, input->shape_[i]); + } + } + } else { + size_t axisIdx = 0; + for (size_t i = 0; i < input->shape_size_; i++) { + if (axisIdx < param->axis_size_ && param->axis_[axisIdx] == (int)(i)) { + if (input->shape_[i] != 1) return NNACL_PARAM_INVALID; + axisIdx++; + continue; + } else { + ShapePush(out_shape, &out_shape_size, input->shape_[i]); + } + } + } + SetShapeArray(outputs[0], out_shape, out_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/squeeze_infer.h b/mindspore/lite/nnacl/infer/squeeze_infer.h new file mode 100644 index 0000000000..9b7409ab28 --- /dev/null +++ b/mindspore/lite/nnacl/infer/squeeze_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SQUEEZE_INFER_H +#define MINDSPORE_LITE_NNACL_SQUEEZE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/squeeze_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SQUEEZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/stack_infer.c b/mindspore/lite/nnacl/infer/stack_infer.c new file mode 100644 index 0000000000..c135a2f8e9 --- /dev/null +++ b/mindspore/lite/nnacl/infer/stack_infer.c @@ -0,0 +1,57 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/stack_infer.h" + +int StackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + if (outputs_size != 1) { + return NNACL_PARAM_INVALID; + } + if (inputs_size < 1) { + return NNACL_PARAM_INVALID; + } + const TensorC *input = inputs[0]; + SetDataTypeFormat(outputs[0], input); + StackParameter *param = (StackParameter *)parameter; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int32_t output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + ShapeSet(output_shape, &output_shape_size, input->shape_, input->shape_size_); + int axis = param->axis_ < 0 ? param->axis_ + input->shape_size_ + 1 : param->axis_; + if (axis < 0 || axis > input->shape_size_) { + return NNACL_PARAM_INVALID; + } + + for (size_t i = 1; i < inputs_size; ++i) { + if (inputs[i]->shape_size_ != input->shape_size_) { + return NNACL_PARAM_INVALID; + } + for (size_t j = 0; j < input->shape_size_; ++j) { + if (inputs[i]->shape_[j] != input->shape_[j]) { + return NNACL_PARAM_INVALID; + } + } + if (inputs[i]->data_type_ != input->data_type_) { + return NNACL_PARAM_INVALID; + } + } + ShapeInsert(output_shape, &output_shape_size, axis, inputs_size); + SetShapeArray(outputs[0], output_shape, output_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/stack_infer.h b/mindspore/lite/nnacl/infer/stack_infer.h new file mode 100644 index 0000000000..40e47158e5 --- /dev/null +++ b/mindspore/lite/nnacl/infer/stack_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_STACK_INFER_H +#define MINDSPORE_LITE_NNACL_STACK_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/stack_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int StackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_STACK_INFER_H diff --git a/mindspore/lite/nnacl/infer/strided_slice_grad_infer.c b/mindspore/lite/nnacl/infer/strided_slice_grad_infer.c new file mode 100644 index 0000000000..6de2337a29 --- /dev/null +++ b/mindspore/lite/nnacl/infer/strided_slice_grad_infer.c @@ -0,0 +1,140 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/strided_slice_grad_infer.h" + +bool StridedSliceCheckInputs(const TensorC *const *inputs, size_t inputs_size) { + for (size_t i = 1; i < inputs_size; ++i) { + if (inputs[i]->data_ == NULL) { + return false; + } + } + + return true; // note: the original code is ndim_ <= in_shape_size +} + +int StridedSliceGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 5, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *input = inputs[0]; + SetDataTypeFormat(outputs[0], input); + bool inferflag = parameter->infer_flag_; + + int in_shape_[MAX_SHAPE_SIZE]; + size_t in_shape_size = 0; + if (inferflag) { + ShapeSet(in_shape_, &in_shape_size, input->shape_, input->shape_size_); + } + int begins_[MAX_SHAPE_SIZE]; + size_t begins_size = 0; + int ends_[MAX_SHAPE_SIZE]; + size_t ends_size = 0; + int strides_[MAX_SHAPE_SIZE]; + size_t strides_size = 0; + + if (!StridedSliceCheckInputs(inputs, inputs_size)) { + return NNACL_INFER_INVALID; + } + + // input order: dy, shapex, begins, ends, strides. + const TensorC *begin_tensor = inputs[2]; + int *begin_data = (int *)(begin_tensor->data_); + int *end_data = (int *)(inputs[3]->data_); + int *stride_data = (int *)(inputs[4]->data_); + if (begin_data == NULL || end_data == NULL || stride_data == NULL) { + return NNACL_ERR; + } + size_t ndim_ = GetElementNum(begin_tensor); + for (int i = 0; i < ndim_; ++i) { + ShapePush(begins_, &begins_size, begin_data[i]); + ShapePush(ends_, &ends_size, end_data[i]); + ShapePush(strides_, &strides_size, stride_data[i]); + } + + // set all mask to original input shape + uint32_t begins_mask_[MAX_SHAPE_SIZE]; + uint32_t ends_mask_[MAX_SHAPE_SIZE]; + uint32_t ellipsis_mask_[MAX_SHAPE_SIZE]; + uint32_t new_axis_mask_[MAX_SHAPE_SIZE]; + + StridedSliceParameter *param = (StridedSliceParameter *)parameter; + for (size_t i = 0; i < ndim_; i++) { + begins_mask_[i] = (bool)(param->begins_mask_) & (1 << i); + ends_mask_[i] = (bool)(param->ends_mask_) & (1 << i); + ellipsis_mask_[i] = (bool)(param->ellipsisMask_) & (1 << i); + new_axis_mask_[i] = (bool)(param->newAxisMask_) & (1 << i); + } + + // ApplyNewAxisMask(); + for (size_t i = 0; i < ndim_; i++) { + if (new_axis_mask_[i]) { + ndim_ += 1; + ShapeInsert(in_shape_, &in_shape_size, i, 1); + begins_[i] = 0; + ends_[i] = 1; + strides_[i] = 1; + + ShapePush(begins_, &begins_size, 0); + ShapePush(ends_, &ends_size, in_shape_[ndim_ - 1]); + ShapePush(strides_, &strides_size, 1); + + begins_mask_[i] = false; + ends_mask_[i] = false; + ellipsis_mask_[i] = false; + } + } + // ApplyBeginMask(); + for (size_t i = 0; i < ndim_; i++) { + if (begins_mask_[i]) { + begins_[i] = 0; + } + } + // ApplyEndMask(); + for (size_t i = 0; i < ndim_; i++) { + if (ends_mask_[i]) { + ends_[i] = in_shape_[i]; + } + } + // ApplyEllipsisMask(); + for (size_t i = 0; i < ndim_; i++) { + if (ellipsis_mask_[i]) { + begins_[i] = 0; + ends_[i] = in_shape_[i]; + break; + } + } + + if (!inferflag) { + return NNACL_OK; + } + + size_t output_size = inputs[1]->shape_[0]; + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + if (inputs[1]->data_ == NULL) { + return NNACL_ERR; + } + + for (int i = 0; i < output_size; i++) { + ShapePush(output_shape, &output_shape_size, ((int *)(inputs[1]->data_))[i]); + } + SetShapeArray(outputs[0], output_shape, output_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/strided_slice_grad_infer.h b/mindspore/lite/nnacl/infer/strided_slice_grad_infer.h new file mode 100644 index 0000000000..9e4ed8ea56 --- /dev/null +++ b/mindspore/lite/nnacl/infer/strided_slice_grad_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_STRIDED_SLICE_GRAD_INFER_H +#define MINDSPORE_LITE_NNACL_STRIDED_SLICE_GRAD_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/strided_slice_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int StridedSliceGradInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_STRIDED_SLICE_GRAD_INFER_H diff --git a/mindspore/lite/nnacl/infer/strided_slice_infer.c b/mindspore/lite/nnacl/infer/strided_slice_infer.c new file mode 100644 index 0000000000..ad37aa247b --- /dev/null +++ b/mindspore/lite/nnacl/infer/strided_slice_infer.c @@ -0,0 +1,328 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/strided_slice_infer.h" + +const size_t kStridedSliceOutputNum = 1; +const size_t kStridedSliceInputNum = 1; +const size_t kStridedSliceMultiInputNumMin = 3; +const size_t kStridedSliceMultiInputNumMax = 5; + +bool CheckInputs(const TensorC *const *inputs, size_t inputs_size) { + for (size_t i = 1; i < inputs_size; ++i) { + if (inputs[i]->data_ == NULL) { + return false; + } + } + return true; +} + +int HandleAxesCheckNull(const TensorC *input_tensor, const TensorC *begin_tensor, int *begin_data, + const TensorC *end_tensor, int *end_data) { + if (input_tensor == NULL || begin_tensor == NULL || end_tensor == NULL || begin_data == NULL || end_data == NULL) { + return NNACL_NULL_PTR; + } + return NNACL_OK; +} + +int HandleAxesInputExist(const TensorC *const *inputs, int *ndim_, int *in_shape_, int *begins_, int *strides_, + int *ends_) { + const TensorC *input_tensor = inputs[0]; + const TensorC *begin_tensor = inputs[1]; + int *begin_data = (int *)(begin_tensor->data_); + const TensorC *end_tensor = inputs[2]; + int *end_data = (int *)(end_tensor->data_); + + int handle_check_ret = HandleAxesCheckNull(input_tensor, begin_tensor, begin_data, end_tensor, end_data); + if (handle_check_ret != NNACL_OK) { + return handle_check_ret; + } + + // when input contains axes, begins, ends, strides will be expand to the same length as input rank + *ndim_ = (int)(input_tensor->shape_size_); + int begin_ndim = GetElementNum(begin_tensor); + + int *axes_data = NULL; + const TensorC *axes_tensor = inputs[3]; + if (GetElementNum(axes_tensor) != 0) { + if (GetElementNum(axes_tensor) != begin_ndim) { + return NNACL_ERR; + } + axes_data = (int *)(axes_tensor->data_); + if (axes_data == NULL) { + return NNACL_NULL_PTR; + } + } + + int *stride_data = NULL; + const TensorC *stride_tensor = inputs[4]; + if (GetElementNum(stride_tensor) != 0) { + if (GetElementNum(stride_tensor) != begin_ndim) { + return NNACL_ERR; + } + stride_data = (int *)(stride_tensor->data_); + if (stride_data == NULL) { + return NNACL_ERR; + } + } + + int axes[MAX_SHAPE_SIZE]; + if (axes_data == NULL) { + for (int i = 0; i < begin_ndim; ++i) { + axes[i] = i; + } + } else { + for (size_t i = 0; i < begin_ndim; i++) { + axes[i] = axes_data[i]; + } + for (int i = 0; i < begin_ndim; ++i) { + if (axes[i] < 0) { + axes[i] += *ndim_; + } + } + } + + for (size_t i = 0; i < *ndim_; i++) { + in_shape_[i] = 0; + begins_[i] = 0; + strides_[i] = 0; + } + for (size_t i = 0; i < *ndim_; ++i) { + in_shape_[i] = input_tensor->shape_[i]; + } + for (size_t i = 0; i < *ndim_; ++i) { + int axes_it = 0; + for (size_t j = 0; j < begin_ndim; j++) { + if (axes[j] == i) { + axes_it = j; + break; + } else { + axes_it++; + } + } + if (axes_it != begin_ndim) { + int axis = axes_it; + // begins or ends exceed limit will be set to limit + begins_[i] = imax(imin(begin_data[axis], input_tensor->shape_[i] - 1), -input_tensor->shape_[i]); + ends_[i] = imax(imin(end_data[axis], input_tensor->shape_[i]), -input_tensor->shape_[i] - 1); + strides_[i] = stride_data[axis]; + } else { + begins_[i] = 0; + ends_[i] = input_tensor->shape_[i]; + strides_[i] = 1; + } + } + return NNACL_OK; +} + +// note: begin, end, stride length are equal, but may less than rank of input +int StridedSliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + if (outputs_size != kStridedSliceOutputNum) { + return NNACL_PARAM_INVALID; + } + if (inputs_size != kStridedSliceInputNum && + !(inputs_size <= kStridedSliceMultiInputNumMax && inputs_size >= kStridedSliceMultiInputNumMin)) { + return NNACL_PARAM_INVALID; + } + if (parameter == NULL || outputs[0] == NULL || inputs[0] == NULL) { + return NNACL_NULL_PTR; + } + const TensorC *input = inputs[0]; + SetDataTypeFormat(outputs[0], inputs[0]); + + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int in_shape_[MAX_SHAPE_SIZE]; + int begins_[MAX_SHAPE_SIZE]; + int ends_[MAX_SHAPE_SIZE]; + size_t in_shape_size_ = 0; + if (parameter->infer_flag_) { + ShapeSet(in_shape_, &in_shape_size_, input->shape_, input->shape_size_); + } + size_t begins_size_ = 0; + size_t ends_size_ = 0; + int strides_[MAX_SHAPE_SIZE]; + size_t strides_size_ = 0; + int begins_mask_[MAX_SHAPE_SIZE]; + int ends_mask_[MAX_SHAPE_SIZE]; + int ellipsis_mask_[MAX_SHAPE_SIZE]; + size_t ellipsis_mask_size_ = 0; + int new_axis_mask_[MAX_SHAPE_SIZE]; + size_t new_axis_mask_size_ = 0; + int shrink_axis_mask_[MAX_SHAPE_SIZE]; + size_t shrink_axis_mask_size_ = 0; + + StridedSliceParameter *param = (StridedSliceParameter *)parameter; + param->num_axes_ = in_shape_size_; + param->in_shape_length_ = in_shape_size_; + + int ndim_ = 0; + if (inputs_size == kStridedSliceInputNum) { + ndim_ = (int)(param->num_axes_); + + for (int i = 0; i < ndim_; i++) { + ShapePush(begins_, &begins_size_, param->begins_[i]); + ShapePush(ends_, &ends_size_, param->ends_[i]); + ShapePush(strides_, &strides_size_, param->strides_[i]); + } + } + if (!CheckInputs(inputs, inputs_size)) { + return NNACL_INFER_INVALID; + } + if (inputs_size == 4) { + const TensorC *begin_tensor = inputs[1]; + int *begin_data = (int *)(begin_tensor->data_); + const TensorC *end_tensor = inputs[2]; + int *end_data = (int *)(end_tensor->data_); + const TensorC *stride_tensor = inputs[3]; + int *stride_data = (int *)(stride_tensor->data_); + if (begin_data == NULL || end_data == NULL || stride_data == NULL) { + return NNACL_ERR; + } + ndim_ = GetElementNum(begin_tensor); + for (int i = 0; i < ndim_; ++i) { + ShapePush(begins_, &begins_size_, begin_data[i]); + ShapePush(ends_, &ends_size_, end_data[i]); + ShapePush(strides_, &strides_size_, stride_data[i]); + } + } + if (inputs_size == 5) { + int ret = HandleAxesInputExist(inputs, &ndim_, in_shape_, begins_, strides_, ends_); + if (ret != NNACL_OK) { + return ret; + } + } + + // set all mask to original input shape + ellipsis_mask_size_ = ndim_; + new_axis_mask_size_ = ndim_; + shrink_axis_mask_size_ = ndim_; + begins_size_ = ndim_; + ends_size_ = ndim_; + strides_size_ = ndim_; + + // convert bit to vector + for (int i = 0; i < ndim_; i++) { + begins_mask_[i] = (uint32_t)(param->begins_mask_) & (1 << i); + ends_mask_[i] = (uint32_t)(param->ends_mask_) & (1 << i); + ellipsis_mask_[i] = (uint32_t)(param->ellipsisMask_) & (1 << i); + new_axis_mask_[i] = (uint32_t)(param->newAxisMask_) & (1 << i); + shrink_axis_mask_[i] = (uint32_t)(param->shrinkAxisMask_) & (1 << i); + } + + // ApplyNewAxisMask(); + for (size_t i = 0; i < new_axis_mask_size_; i++) { + if (new_axis_mask_[i]) { + ndim_ += 1; + ShapeInsert(in_shape_, &in_shape_size_, i, 1); + begins_[i] = 0; + ends_[i] = 1; + strides_[i] = 1; + + ShapePush(begins_, &begins_size_, 0); + ShapePush(ends_, &ends_size_, in_shape_[ndim_ - 1]); + ShapePush(strides_, &strides_size_, 1); + + begins_mask_[i] = false; + ends_mask_[i] = false; + ellipsis_mask_[i] = false; + shrink_axis_mask_[i] = false; + } + } + // ApplyBeginMask(); + for (int i = 0; i < ndim_; i++) { + if (begins_mask_[i]) { + begins_[i] = 0; + } + } + // ApplyEndMask(); + for (int i = 0; i < ndim_; i++) { + if (ends_mask_[i]) { + ends_[i] = in_shape_[i]; + } + } + // ApplyEllipsisMask(); + for (size_t i = 0; i < ellipsis_mask_size_; i++) { + if (ellipsis_mask_[i]) { + begins_[i] = 0; + ends_[i] = in_shape_[i]; + break; + } + } + + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + ShapeSet(output_shape, &output_shape_size, in_shape_, in_shape_size_); + + // TransIndexToPositive(); + for (int i = 0; i < (int)(begins_size_); ++i) { + if (begins_[i] < 0) { + begins_[i] += in_shape_[i]; + } + if (ends_[i] < 0) { + ends_[i] += in_shape_[i]; + } + } + + for (int i = 0; i < ndim_; i++) { + if (strides_[i] == 0) { + return NNACL_ERR; + } + output_shape[i] = (ends_[i] - begins_[i] + strides_[i] + (strides_[i] < 0 ? 1 : -1)) / strides_[i]; + } + + // ApplyShrinkMask + int old_out_shape[MAX_SHAPE_SIZE]; + size_t old_out_shape_size = 0; + ShapeSet(old_out_shape, &old_out_shape_size, output_shape, output_shape_size); + output_shape_size = 0; + for (size_t i = 0; i < shrink_axis_mask_size_; i++) { + if (shrink_axis_mask_[i]) { + ends_[i] = begins_[i] + 1; + strides_[i] = 1; + } else { + ShapePush(output_shape, &output_shape_size, old_out_shape[i]); + } + } + for (size_t i = shrink_axis_mask_size_; i < old_out_shape_size; i++) { + ShapePush(output_shape, &output_shape_size, old_out_shape[i]); + } + + SetShapeArray(outputs[0], output_shape, output_shape_size); + + for (int i = 0; i < ndim_; i++) { + param->begins_[i] = begins_[i]; + param->ends_[i] = ends_[i]; + param->in_shape_[i] = in_shape_[i]; + param->strides_[i] = strides_[i]; + } + + for (int i = ndim_; i < param->in_shape_length_; i++) { + param->begins_[i] = 0; + param->ends_[i] = in_shape_[i]; + param->in_shape_[i] = in_shape_[i]; + param->strides_[i] = 1; + } + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/strided_slice_infer.h b/mindspore/lite/nnacl/infer/strided_slice_infer.h new file mode 100644 index 0000000000..1c9792eb43 --- /dev/null +++ b/mindspore/lite/nnacl/infer/strided_slice_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_STRIDED_SLICE_INFER_H +#define MINDSPORE_LITE_NNACL_STRIDED_SLICE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/strided_slice_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int StridedSliceInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_STRIDED_SLICE_INFER_H diff --git a/mindspore/lite/nnacl/infer/switch_infer.c b/mindspore/lite/nnacl/infer/switch_infer.c new file mode 100644 index 0000000000..b51af00b92 --- /dev/null +++ b/mindspore/lite/nnacl/infer/switch_infer.c @@ -0,0 +1,103 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/switch_infer.h" +#include <string.h> + +int SwitchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + if (2 * (inputs_size - 1) != outputs_size) { + return NNACL_ERR; + } + + for (size_t i = 0; i < outputs_size / 2; i++) { + const TensorC *input = inputs[i + 1]; + TensorC *output_true = outputs[i]; + TensorC *output_false = outputs[i + outputs_size / 2]; + + SetDataTypeFormat(output_false, input); + SetDataTypeFormat(output_true, input); + + if (input->data_type_ == kObjectTypeTensorType) { + TensorListC *input_tensorlist = (TensorListC *)(input); + TensorListC *output_true_tensorlist = (TensorListC *)(output_true); + TensorListC *output_false_tensorlist = (TensorListC *)(output_false); + + ShapeSet(output_true_tensorlist->element_shape_, &output_true_tensorlist->element_shape_size_, + input_tensorlist->element_shape_, input_tensorlist->element_shape_size_); + ShapeSet(output_false_tensorlist->element_shape_, &output_false_tensorlist->element_shape_size_, + input_tensorlist->element_shape_, input_tensorlist->element_shape_size_); + output_true_tensorlist->max_elements_num_ = input_tensorlist->max_elements_num_; + output_false_tensorlist->max_elements_num_ = input_tensorlist->max_elements_num_; + output_true_tensorlist->tensors_data_type_ = input_tensorlist->tensors_data_type_; + output_false_tensorlist->tensors_data_type_ = input_tensorlist->tensors_data_type_; + + // note: need delete below? + for (size_t j = 0; j < output_false_tensorlist->element_num_; j++) { + memcpy(output_true_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC)); + memcpy(output_false_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC)); + } + + } else { + } + } + + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + for (size_t i = 0; i < outputs_size / 2; i++) { + const TensorC *input = inputs[i + 1]; + TensorC *output_true = outputs[i]; + TensorC *output_false = outputs[i + outputs_size / 2]; + + SetDataTypeFormat(output_false, input); + SetDataTypeFormat(output_true, input); + + if (input->data_type_ == kObjectTypeTensorType) { + TensorListC *input_tensorlist = (TensorListC *)(input); + TensorListC *output_true_tensorlist = (TensorListC *)(output_true); + TensorListC *output_false_tensorlist = (TensorListC *)(output_false); + + ShapeSet(output_true_tensorlist->element_shape_, &output_true_tensorlist->element_shape_size_, + input_tensorlist->element_shape_, input_tensorlist->element_shape_size_); + ShapeSet(output_false_tensorlist->element_shape_, &output_false_tensorlist->element_shape_size_, + input_tensorlist->element_shape_, input_tensorlist->element_shape_size_); + output_true_tensorlist->max_elements_num_ = input_tensorlist->max_elements_num_; + output_false_tensorlist->max_elements_num_ = input_tensorlist->max_elements_num_; + output_true_tensorlist->tensors_data_type_ = input_tensorlist->tensors_data_type_; + output_false_tensorlist->tensors_data_type_ = input_tensorlist->tensors_data_type_; + + output_false_tensorlist->element_num_ = input_tensorlist->element_num_; + output_true_tensorlist->element_num_ = input_tensorlist->element_num_; + + for (size_t j = 0; j < output_false_tensorlist->element_num_; j++) { + memcpy(output_true_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC)); + memcpy(output_false_tensorlist->tensors_[j], input_tensorlist->tensors_[j], sizeof(TensorC)); + } + + } else { + SetShapeTensor(output_true, input); + SetShapeTensor(output_false, input); + } + } + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/switch_infer.h b/mindspore/lite/nnacl/infer/switch_infer.h new file mode 100644 index 0000000000..673d1efa63 --- /dev/null +++ b/mindspore/lite/nnacl/infer/switch_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_SWITCH_INFER_H +#define MINDSPORE_LITE_NNACL_SWITCH_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/softmax_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int SwitchInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_SWITCH_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.c b/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.c new file mode 100644 index 0000000000..694491f819 --- /dev/null +++ b/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.c @@ -0,0 +1,75 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/tensorlist_fromtensor_infer.h" + +int TensorListFromTensorInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + const TensorC *input0 = inputs[0]; + + if (input0->shape_size_ < 1) { + return NNACL_ERR; + } + int dim0 = input0->shape_[0]; + if (dim0 < 0) { + return NNACL_ERR; + } + const TensorC *input1 = inputs[1]; + if (input1->data_ == NULL) { + return NNACL_NULL_PTR; + } + int *ele_shape_ptr = (int *)(input1->data_); + TensorListC *output = (TensorListC *)(outputs[0]); + + vvector *tensor_shape = (vvector *)malloc(sizeof(vvector)); + if (tensor_shape == NULL) { + return NNACL_NULL_PTR; + } + tensor_shape->size_ = dim0; + tensor_shape->shape_ = (int **)malloc(tensor_shape->size_ * sizeof(int *)); + if (tensor_shape->shape_ == NULL) { + free(tensor_shape); + return NNACL_NULL_PTR; + } + tensor_shape->shape_size_ = (int *)malloc(tensor_shape->size_ * sizeof(int)); + if (tensor_shape->shape_size_ == NULL) { + free(tensor_shape->shape_); + free(tensor_shape); + return NNACL_NULL_PTR; + } + + for (size_t i = 0; i < dim0; i++) { + tensor_shape->shape_[i] = (int *)(input0->shape_ + 1); + tensor_shape->shape_size_[i] = input0->shape_size_ - 1; + } + + ShapeSet(output->element_shape_, &(output->element_shape_size_), ele_shape_ptr, GetElementNum(input1)); + output->element_num_ = dim0; + output->data_type_ = kObjectTypeTensorType; + output->format_ = Format_NHWC; + MallocTensorListData(output, input0->data_type_, tensor_shape); + free(tensor_shape->shape_); + free(tensor_shape->shape_size_); + free(tensor_shape); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.h b/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.h new file mode 100644 index 0000000000..9ac106cc22 --- /dev/null +++ b/mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_TENSORLIST_FROMTENSOR_INFER_H +#define MINDSPORE_LITE_NNACL_TENSORLIST_FROMTENSOR_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TensorListFromTensorInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_TENSORLIST_FROMTENSOR_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.c b/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.c new file mode 100644 index 0000000000..9c84fc65fe --- /dev/null +++ b/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.c @@ -0,0 +1,80 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/tensorlist_getitem_infer.h" + +int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + TensorListC *input0 = (TensorListC *)(inputs[0]); + const TensorC *get_index = inputs[1]; + if (GetElementNum(get_index) != 1) { + return NNACL_ERR; + } + if (get_index->data_ == NULL) { + return NNACL_INFER_INVALID; + } + int index = ((int *)(get_index->data_))[0]; + if (index < 0 || index > (input0->element_num_ - 1)) { + return NNACL_ERR; + } + TensorC *tensor_index = input0->tensors_[index]; + TensorC *output = outputs[0]; + if (tensor_index->data_type_ != kTypeUnknown) { + output->data_type_ = tensor_index->data_type_; + ShapeSet(output->shape_, &(output->shape_size_), tensor_index->shape_, tensor_index->shape_size_); + } else { + const TensorC *input2 = inputs[2]; + if (input2->data_ == NULL) { + return NNACL_NULL_PTR; + } + int *ele_shape_data = (int *)(input2->data_); + int element_shape[MAX_SHAPE_SIZE]; + size_t element_shape_size = 0; + for (int i = 0; i < GetElementNum(input2); ++i) { + ShapePush(element_shape, &element_shape_size, ele_shape_data[i]); + } + int status = + TensorListMergeShape(element_shape, &element_shape_size, input0->element_shape_, input0->element_shape_size_); + if (status != NNACL_OK) { + return NNACL_ERR; + } + if (!TensorListIsFullyDefined(element_shape, element_shape_size)) { + for (int i = 0; i < input0->element_num_; ++i) { + TensorC *input = input0->tensors_[i]; + if (input->data_type_ != kTypeUnknown) { + status = TensorListMergeShape(element_shape, &element_shape_size, input->shape_, input->shape_size_); + if (status != NNACL_OK) { + return NNACL_ERR; + } + } + } + } + if (!TensorListIsFullyDefined(element_shape, element_shape_size)) { // the pre is the same judge condition + return NNACL_ERR; + } + output->data_type_ = input0->tensors_data_type_; + SetShapeArray(output, element_shape, element_shape_size); + } + output->format_ = input0->tensors_[index]->format_; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.h b/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.h new file mode 100644 index 0000000000..663a626a04 --- /dev/null +++ b/mindspore/lite/nnacl/infer/tensorlist_getitem_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_TENSORLIST_GETITEM_INFER_H +#define MINDSPORE_LITE_NNACL_TENSORLIST_GETITEM_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/tensorlist_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TensorListGetItemInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_TENSORLIST_GETITEM_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.c b/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.c new file mode 100644 index 0000000000..cf6adc1a24 --- /dev/null +++ b/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.c @@ -0,0 +1,79 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/tensorlist_reserve_infer.h" + +int TensorListReserveInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input0 = inputs[0]; + int ele_shape_type = input0->data_type_; + if (ele_shape_type != kNumberTypeInt && ele_shape_type != kNumberTypeInt32) { + return NNACL_ERR; + } + if (input0->data_ == NULL) { + return NNACL_INFER_INVALID; + } + int *ele_shape_ptr = (int *)(input0->data_); + + const TensorC *input1 = inputs[1]; + int num_ele_type = input1->data_type_; + if (num_ele_type != kNumberTypeInt && ele_shape_type != kNumberTypeInt32) { + return NNACL_ERR; + } + if (GetElementNum(input1) != 1) { + return NNACL_ERR; + } + if (input1->data_ == NULL) { + return NNACL_INFER_INVALID; + } + int num_elements = ((int *)(input1->data_))[0]; + TensorListC *output = (TensorListC *)(outputs[0]); + output->data_type_ = kObjectTypeTensorType; + output->format_ = Format_NHWC; + ShapeSet(output->element_shape_, &(output->element_shape_size_), ele_shape_ptr, GetElementNum(input0)); + output->element_num_ = num_elements; + + vvector *tmp_shape = (vvector *)malloc(sizeof(vvector)); + if (tmp_shape == NULL) { + return NNACL_NULL_PTR; + } + tmp_shape->size_ = num_elements; + tmp_shape->shape_ = (int **)malloc(tmp_shape->size_ * sizeof(int *)); + if (tmp_shape->shape_ == NULL) { + free(tmp_shape); + return NNACL_NULL_PTR; + } + tmp_shape->shape_size_ = (int *)malloc(tmp_shape->size_ * sizeof(int)); + if (tmp_shape->shape_size_ == NULL) { + free(tmp_shape->shape_); + free(tmp_shape); + return NNACL_NULL_PTR; + } + + for (size_t i = 0; i < num_elements; i++) { + tmp_shape->shape_size_[i] = 0; + tmp_shape->shape_[i] = NULL; + } + MallocTensorListData(output, kTypeUnknown, tmp_shape); + free(tmp_shape->shape_size_); + free(tmp_shape->shape_); + free(tmp_shape); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.h b/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.h new file mode 100644 index 0000000000..4cd2c453e2 --- /dev/null +++ b/mindspore/lite/nnacl/infer/tensorlist_reserve_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_TENSORLIST_RESERVE_INFER_H +#define MINDSPORE_LITE_NNACL_TENSORLIST_RESERVE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TensorListReserveInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_TENSORLIST_RESERVE_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.c b/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.c new file mode 100644 index 0000000000..e7ee7d310e --- /dev/null +++ b/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.c @@ -0,0 +1,128 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/tensorlist_setitem_infer.h" + +int PreJudge(const TensorC *get_index, TensorListC *input0, const TensorC *value_tensor) { + if (get_index->data_ == NULL || value_tensor->data_ == NULL) { + return NNACL_INFER_INVALID; + } + + if (get_index->data_type_ != kNumberTypeInt && get_index->data_type_ != kNumberTypeInt32) { + return NNACL_ERR; + } + if (GetElementNum(get_index) != 1) { + return NNACL_ERR; + } + if (get_index->data_ == NULL) { + return NNACL_NULL_PTR; + } + return NNACL_OK; +} + +int TensorListSetItemInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + TensorListC *input0 = (TensorListC *)(inputs[0]); + const TensorC *get_index = inputs[1]; + const TensorC *value_tensor = inputs[2]; + TensorListC *output0 = (TensorListC *)(outputs[0]); + output0->data_type_ = input0->data_type_; + output0->format_ = input0->format_; + + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int judge_ret = PreJudge(get_index, input0, value_tensor); + if (judge_ret != NNACL_OK) { + return judge_ret; + } + + int index = ((int *)(get_index->data_))[0]; + if (index < 0 || (index >= ((int)(input0->element_num_)) && index != 0)) { + return NNACL_ERR; + } + + output0->max_elements_num_ = input0->max_elements_num_; + + if (input0->element_num_ == 0 && input0->element_shape_size_ == 0 && index == 0) { + ShapeSet(input0->element_shape_, &(input0->element_shape_size_), value_tensor->shape_, value_tensor->shape_size_); + ShapeSet(output0->element_shape_, &(output0->element_shape_size_), value_tensor->shape_, value_tensor->shape_size_); + } else { + ShapeSet(output0->element_shape_, &(output0->element_shape_size_), input0->element_shape_, + input0->element_shape_size_); + } + + vvector *out_shape = (vvector *)malloc(sizeof(vvector)); + if (out_shape == NULL) { + return NNACL_NULL_PTR; + } + out_shape->size_ = 0; + out_shape->shape_ = (int **)malloc((input0->element_num_ + 1) * sizeof(int *)); + if (out_shape->shape_ == NULL) { + free(out_shape); + return NNACL_NULL_PTR; + } + out_shape->shape_size_ = (int *)malloc((input0->element_num_ + 1) * sizeof(int)); + if (out_shape->shape_size_ == NULL) { + free(out_shape->shape_); + free(out_shape); + return NNACL_NULL_PTR; + } + + if (index == 0 && input0->element_num_ == 0) { // uninitialized tensorlist + out_shape->shape_[out_shape->size_] = (int *)(value_tensor->shape_); + out_shape->shape_size_[out_shape->size_] = value_tensor->shape_size_; + out_shape->size_++; + output0->element_num_ = 1; + } else { + output0->element_num_ = input0->element_num_; + for (int i = 0; i < input0->element_num_; ++i) { + TensorC *src_ptr = input0->tensors_[i]; + if (src_ptr == NULL) { + free(out_shape->shape_); + free(out_shape->shape_size_); + free(out_shape); + return NNACL_ERR; + } + if (src_ptr->data_type_ != kTypeUnknown) { + out_shape->shape_[out_shape->size_] = src_ptr->shape_; + out_shape->shape_size_[out_shape->size_] = src_ptr->shape_size_; + out_shape->size_++; + } else { + out_shape->shape_[out_shape->size_] = NULL; + out_shape->shape_size_[out_shape->size_] = 0; + out_shape->size_++; + } + } + } + + if (input0->tensors_data_type_ == kTypeUnknown) { + input0->tensors_data_type_ = value_tensor->data_type_; + } + + out_shape->shape_[index] = (int *)(value_tensor->shape_); + out_shape->shape_size_[index] = value_tensor->shape_size_; + MallocTensorListData(output0, input0->tensors_data_type_, out_shape); + free(out_shape->shape_); + free(out_shape->shape_size_); + free(out_shape); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.h b/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.h new file mode 100644 index 0000000000..d7b6b20d10 --- /dev/null +++ b/mindspore/lite/nnacl/infer/tensorlist_setitem_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_TENSORLIST_SETITEM_INFER_H +#define MINDSPORE_LITE_NNACL_TENSORLIST_SETITEM_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TensorListSetItemInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_TENSORLIST_SETITEM_INFER_H diff --git a/mindspore/lite/nnacl/infer/tensorlist_stack_infer.c b/mindspore/lite/nnacl/infer/tensorlist_stack_infer.c new file mode 100644 index 0000000000..0881cb67ff --- /dev/null +++ b/mindspore/lite/nnacl/infer/tensorlist_stack_infer.c @@ -0,0 +1,68 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/tensorlist_stack_infer.h" + +int TensorListStackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + TensorListC *input0 = (TensorListC *)(inputs[0]); + if (input0->element_num_ == 0) { + return NNACL_ERR; + } + const TensorC *ele_shape = inputs[1]; // element shape + if (ele_shape->data_ == NULL) { + return NNACL_NULL_PTR; + } + int *ele_shape_ptr = (int *)(ele_shape->data_); + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + for (int i = 0; i < GetElementNum(ele_shape); ++i) { + ShapePush(output_shape, &output_shape_size, ele_shape_ptr[i]); + } + + int status = + TensorListMergeShape(output_shape, &output_shape_size, input0->element_shape_, input0->element_shape_size_); + if (status == NNACL_ERR) { + return NNACL_ERR; + } + if (!TensorListIsFullyDefined(output_shape, output_shape_size)) { + return NNACL_ERR; + } + if (!TensorListIsFullyDefined(input0->element_shape_, input0->element_shape_size_)) { + for (int i = 0; i < input0->element_num_; ++i) { + TensorC *tensor_ele = input0->tensors_[i]; + if (tensor_ele->data_type_ != kTypeUnknown) { + status = TensorListMergeShape(output_shape, &output_shape_size, tensor_ele->shape_, tensor_ele->shape_size_); + if (status == NNACL_ERR) { + return NNACL_ERR; + } + } + } + } + TensorC *output = outputs[0]; + output->data_type_ = input0->tensors_data_type_; + output->format_ = input0->format_; + ShapeInsert(output_shape, &output_shape_size, 0, input0->element_num_); + SetShapeArray(output, output_shape, output_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/tensorlist_stack_infer.h b/mindspore/lite/nnacl/infer/tensorlist_stack_infer.h new file mode 100644 index 0000000000..38d6ce0cfd --- /dev/null +++ b/mindspore/lite/nnacl/infer/tensorlist_stack_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_TENSORLIST_STACK_INFER_H +#define MINDSPORE_LITE_NNACL_TENSORLIST_STACK_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TensorListStackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_TENSORLIST_STACK_INFER_H diff --git a/mindspore/lite/nnacl/infer/tile_infer.c b/mindspore/lite/nnacl/infer/tile_infer.c new file mode 100644 index 0000000000..c4a6e1954b --- /dev/null +++ b/mindspore/lite/nnacl/infer/tile_infer.c @@ -0,0 +1,109 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/tile_infer.h" +#include <limits.h> + +void TileParamCaffe2Tflite(TileParameter *param, size_t out_shape_size) { + if (param->dims_size_ != 0) { + int multiples_size_tmp[5] = {0}; + for (size_t i = 0; i < out_shape_size; i++) { + multiples_size_tmp[i] = 1; + } + for (size_t i = 0; i < param->dims_size_; i++) { + multiples_size_tmp[param->dims_[i]] = param->multiples_[i]; + } + for (size_t i = 0; i < 5; i++) { + param->multiples_[i] = multiples_size_tmp[i]; + } + } +} + +int TileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + TileParameter *param = (TileParameter *)parameter; + + size_t multiples_size = 0; + if (inputs_size != 2) { + return NNACL_ERR; + } + int data_num = GetElementNum(inputs[1]); + if (data_num > (int)(input->shape_size_)) { + return NNACL_INPUT_TENSOR_ERROR; + } + multiples_size = data_num; + int *input1_data = inputs[1]->data_; + if (input1_data == NULL) { + return NNACL_INFER_INVALID; + } + for (size_t i = 0; i < data_num; i++) { + param->multiples_[i] = input1_data[i]; + } + +#ifdef SUPPORT_TRAIN + const size_t in_dims = input->shape_size_; + const size_t delta_dims = in_dims - multiples_size; + + size_t i = 0; + for (; i < delta_dims; ++i) { + int tmp = input->shape_[i]; + ShapePush(out_shape, &out_shape_size, tmp); + } + for (; i < in_dims; ++i) { + int tmp = input->shape_[i] * (param->multiples_[i - delta_dims]); + ShapePush(out_shape, &out_shape_size, tmp); + } +#else + int *dims = param->dims_; + size_t dims_size = param->dims_size_; + if (dims_size == 0) { + for (int dim = 0; dim < GetElementNum(inputs[1]); ++dim) { + ShapePush(dims, &dims_size, dim); + } + param->dims_size_ = dims_size; + } + if (multiples_size != dims_size) { + return NNACL_ERR; + } + for (size_t i = 0; i < input->shape_size_; ++i) { + ShapePush(out_shape, &out_shape_size, input->shape_[i]); + } + for (size_t i = 0; i < dims_size; ++i) { + if (input->shape_[dims[i]] != 0 && param->multiples_[i] > INT_MAX / input->shape_[dims[i]]) { + return NNACL_ERR; + } + out_shape[dims[i]] = input->shape_[dims[i]] * (param->multiples_[i]); + } + // change caffe param format to tflite + TileParamCaffe2Tflite(param, out_shape_size); +#endif + SetShapeArray(output, out_shape, out_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/tile_infer.h b/mindspore/lite/nnacl/infer/tile_infer.h new file mode 100644 index 0000000000..f5200949da --- /dev/null +++ b/mindspore/lite/nnacl/infer/tile_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_TILE_INFER_H +#define MINDSPORE_LITE_NNACL_TILE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/base/tile_base.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_TILE_INFER_H diff --git a/mindspore/lite/nnacl/infer/topk_infer.c b/mindspore/lite/nnacl/infer/topk_infer.c new file mode 100644 index 0000000000..8b851e50e5 --- /dev/null +++ b/mindspore/lite/nnacl/infer/topk_infer.c @@ -0,0 +1,50 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/topk_infer.h" + +int TopKInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSizeInputTwo(inputs, inputs_size, outputs, outputs_size, parameter, 1, 2, 2); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + if (input->shape_size_ == 4 && input->format_ != Format_NHWC) { + return NNACL_ERR; + } + TensorC *output0 = outputs[0]; + TensorC *output1 = outputs[1]; + SetDataTypeFormat(output0, input); + output1->data_type_ = kNumberTypeInt32; + output1->format_ = input->format_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + TopkParameter *param = (TopkParameter *)parameter; + const TensorC *input_k_tensor = inputs[1]; + param->k_ = ((int32_t *)input_k_tensor->data_)[0]; + + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + ShapeSet(out_shape, &out_shape_size, input->shape_, input->shape_size_); + out_shape[out_shape_size - 1] = param->k_; + + SetShapeArray(output0, out_shape, out_shape_size); + SetShapeArray(output1, out_shape, out_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/topk_infer.h b/mindspore/lite/nnacl/infer/topk_infer.h new file mode 100644 index 0000000000..791cabdf8f --- /dev/null +++ b/mindspore/lite/nnacl/infer/topk_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_TOPK_INFER_H +#define MINDSPORE_LITE_NNACL_TOPK_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/fp32/topk_fp32.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TopKInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_TOPK_INFER_H diff --git a/mindspore/lite/nnacl/infer/transpose_infer.c b/mindspore/lite/nnacl/infer/transpose_infer.c new file mode 100644 index 0000000000..2780fcc38a --- /dev/null +++ b/mindspore/lite/nnacl/infer/transpose_infer.c @@ -0,0 +1,83 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/transpose_infer.h" + +bool CheckPermTransFormat(const int *perm, const int *perm_transformat, const size_t size) { + for (size_t i = 0; i < size; ++i) { + if (perm[i] != perm_transformat[i]) { + return false; + } + } + return true; +} + +int TransposeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 2, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + const TensorC *perm_tensor = inputs[1]; + const int32_t *perm_data = (int32_t *)perm_tensor->data_; + const size_t perms_num = (size_t)perm_tensor->shape_[0]; + if (perm_tensor->shape_size_ == 0) { + return NNACL_INFER_INVALID; + } + int perm[MAX_SHAPE_SIZE]; + size_t perm_size = 0; + for (size_t i = 0; i < perms_num; i++) { + ShapePush(perm, &perm_size, perm_data[i]); + } + int out_shape[MAX_SHAPE_SIZE]; + if (input->shape_size_ != 4 && perms_num == 4) { + for (size_t i = 0; i < input->shape_size_; ++i) { + out_shape[i] = input->shape_[i]; + } + SetShapeArray(output, out_shape, input->shape_size_); + return NNACL_OK; + } + const int nchw2nhwc[4] = {0, 2, 3, 1}; + const int nhwc2nchw[4] = {0, 3, 1, 2}; + if (perms_num == 4) { + if (input->format_ == Format_NCHW && CheckPermTransFormat(perm, nchw2nhwc, perms_num)) { + output->format_ = Format_NHWC; + } else if (input->format_ == Format_NHWC && CheckPermTransFormat(perm, nhwc2nchw, perms_num)) { + output->format_ = Format_NCHW; + } + } + output->shape_size_ = perm_size; + for (size_t i = 0; i < perm_size; ++i) { + out_shape[i] = input->shape_[perm[i]]; + } + if (perm_size == 0) { + size_t shape_size = input->shape_size_; + output->shape_size_ = shape_size; + for (size_t i = 0; i < shape_size; ++i) { + out_shape[shape_size - i - 1] = input->shape_[i]; + } + } + SetShapeArray(output, out_shape, output->shape_size_); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/transpose_infer.h b/mindspore/lite/nnacl/infer/transpose_infer.h new file mode 100644 index 0000000000..4a8cb4aec8 --- /dev/null +++ b/mindspore/lite/nnacl/infer/transpose_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_TRANSPOSE_INFER_H +#define MINDSPORE_LITE_NNACL_TRANSPOSE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/transpose.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int TransposeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_TRANSPOSE_INFER_H diff --git a/mindspore/lite/nnacl/infer/uniform_real_infer.c b/mindspore/lite/nnacl/infer/uniform_real_infer.c new file mode 100644 index 0000000000..b9a65caed4 --- /dev/null +++ b/mindspore/lite/nnacl/infer/uniform_real_infer.c @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/uniform_real_infer.h" + +int UniformRealInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int32_t *input_data = (int32_t *)(inputs[0]->data_); + if (input_data == NULL) { + return NNACL_INFER_INVALID; + } + int input_num = GetElementNum(inputs[0]); + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = input_num; + for (int i = 0; i < input_num; i++) { + output_shape[i] = input_data[i]; + } + SetShapeArray(outputs[0], output_shape, output_shape_size); + outputs[0]->data_type_ = kNumberTypeFloat32; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/uniform_real_infer.h b/mindspore/lite/nnacl/infer/uniform_real_infer.h new file mode 100644 index 0000000000..ceef8dec71 --- /dev/null +++ b/mindspore/lite/nnacl/infer/uniform_real_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_UNIFORM_REAL_INFER_H +#define MINDSPORE_LITE_NNACL_UNIFORM_REAL_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int UniformRealInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_UNIFORM_REAL_INFER_H diff --git a/mindspore/lite/nnacl/infer/unique_infer.c b/mindspore/lite/nnacl/infer/unique_infer.c new file mode 100644 index 0000000000..46721dc046 --- /dev/null +++ b/mindspore/lite/nnacl/infer/unique_infer.c @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/unique_infer.h" + +int UniqueInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 2); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *input = inputs[0]; + TensorC *output0 = outputs[0]; + TensorC *output1 = outputs[1]; + + SetDataTypeFormat(output0, input); + output1->data_type_ = kNumberTypeInt32; + output1->format_ = input->format_; + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + SetShapeTensor(output0, input); + SetShapeTensor(output1, input); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/unique_infer.h b/mindspore/lite/nnacl/infer/unique_infer.h new file mode 100644 index 0000000000..ec8b8d434d --- /dev/null +++ b/mindspore/lite/nnacl/infer/unique_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_UNIQUE_INFER_H +#define MINDSPORE_LITE_NNACL_UNIQUE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int UniqueInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_UNIQUE_INFER_H diff --git a/mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.c b/mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.c new file mode 100644 index 0000000000..ea382d139a --- /dev/null +++ b/mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.c @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/unsorted_segment_sum_infer.h" + +int UnsortedSegmentSumInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 3, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + + TensorC *out = outputs[0]; + const TensorC *x = inputs[0]; + const TensorC *segment_id = inputs[1]; + int num_segments = *(int *)(inputs[2]->data_); + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + ShapePush(output_shape, &output_shape_size, num_segments); + for (int index = segment_id->shape_size_; index < (int)(x->shape_size_); index++) { + ShapePush(output_shape, &output_shape_size, x->shape_[index]); + } + SetShapeArray(out, output_shape, output_shape_size); + SetDataTypeFormat(out, x); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.h b/mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.h new file mode 100644 index 0000000000..3945e2907b --- /dev/null +++ b/mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.h @@ -0,0 +1,36 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_UNSORTED_SEGMENT_SUM_INFER_H +#define MINDSPORE_LITE_NNACL_UNSORTED_SEGMENT_SUM_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct UnsortedSegmentSumParameter { + OpParameter op_parameter_; + int segments_num_; +} UnsortedSegmentSumParameter; + +int UnsortedSegmentSumInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, + size_t outputs_size, OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_UNSORTED_SEGMENT_SUM_INFER_H diff --git a/mindspore/lite/nnacl/infer/unsqueeze_infer.c b/mindspore/lite/nnacl/infer/unsqueeze_infer.c new file mode 100644 index 0000000000..9e8f6b4f35 --- /dev/null +++ b/mindspore/lite/nnacl/infer/unsqueeze_infer.c @@ -0,0 +1,63 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/unsqueeze_infer.h" + +int UnsqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNullSize(inputs, inputs_size, outputs, outputs_size, parameter, 1, 1); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + UnSqueezeParameter *param = (UnSqueezeParameter *)parameter; + int in_rank = input->shape_size_; + int dim_rank = param->num_dim_; + int out_shape[MAX_SHAPE_SIZE]; + size_t out_shape_size = 0; + if (dim_rank == 0) { + for (size_t i = 0; i < input->shape_size_; i++) { + if (input->shape_[i] != 1) { + ShapePush(out_shape, &out_shape_size, input->shape_[i]); + } + } + } else { + int sz = in_rank + dim_rank; + size_t in_itr = 0; + size_t ax_itr = 0; + for (size_t i = 0; i < sz; i++) { + if (ax_itr < dim_rank && param->dims_[ax_itr] == (int)(i)) { + ShapePush(out_shape, &out_shape_size, 1); + ax_itr++; + } else if (ax_itr < dim_rank && param->dims_[ax_itr] + sz == i) { + ShapePush(out_shape, &out_shape_size, 1); + ax_itr++; + } else { + ShapePush(out_shape, &out_shape_size, input->shape_[in_itr]); + in_itr++; + } + } + } + SetShapeArray(output, out_shape, out_shape_size); + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/unsqueeze_infer.h b/mindspore/lite/nnacl/infer/unsqueeze_infer.h new file mode 100644 index 0000000000..72db2bcc19 --- /dev/null +++ b/mindspore/lite/nnacl/infer/unsqueeze_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_UNSQUEEZE_INFER_H +#define MINDSPORE_LITE_NNACL_UNSQUEEZE_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/unsqueeze_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int UnsqueezeInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_UNSQUEEZE_INFER_H diff --git a/mindspore/lite/nnacl/infer/unstack_infer.c b/mindspore/lite/nnacl/infer/unstack_infer.c new file mode 100644 index 0000000000..4e8971a740 --- /dev/null +++ b/mindspore/lite/nnacl/infer/unstack_infer.c @@ -0,0 +1,52 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/unstack_infer.h" + +int UnstackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + const TensorC *input = inputs[0]; + UnstackParameter *param = (UnstackParameter *)parameter; + int axis = param->axis_ < 0 ? param->axis_ + input->shape_size_ : param->axis_; + if (axis < 0 || axis >= input->shape_size_) { + return NNACL_PARAM_INVALID; + } + for (size_t i = 0; i < outputs_size; i++) { + SetDataTypeFormat(outputs[i], input); + } + + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + int output_shape[MAX_SHAPE_SIZE]; + size_t output_shape_size = 0; + for (size_t i = 0; i < input->shape_size_; ++i) { + if (i != axis) { + ShapePush(output_shape, &output_shape_size, input->shape_[i]); + } + } + for (size_t i = 0; i < outputs_size; i++) { + if (outputs[i] == NULL) { + return NNACL_NULL_PTR; + } + SetShapeArray(outputs[i], output_shape, output_shape_size); + } + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/unstack_infer.h b/mindspore/lite/nnacl/infer/unstack_infer.h new file mode 100644 index 0000000000..787e369f01 --- /dev/null +++ b/mindspore/lite/nnacl/infer/unstack_infer.h @@ -0,0 +1,32 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_UNSTACK_INFER_H +#define MINDSPORE_LITE_NNACL_UNSTACK_INFER_H + +#include "nnacl/infer/common_infer.h" +#include "nnacl/unstack_parameter.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int UnstackInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_UNSTACK_INFER_H diff --git a/mindspore/lite/nnacl/infer/where_infer.c b/mindspore/lite/nnacl/infer/where_infer.c new file mode 100644 index 0000000000..88e84a1c84 --- /dev/null +++ b/mindspore/lite/nnacl/infer/where_infer.c @@ -0,0 +1,75 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/where_infer.h" + +int WhereInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + int check_ret = CheckAugmentNull(inputs, inputs_size, outputs, outputs_size, parameter); + if (check_ret != NNACL_OK) { + return check_ret; + } + + const TensorC *input = inputs[0]; + TensorC *output = outputs[0]; + + // Need to dynamically allocate at runtime. + if (inputs_size == 1) { + return NNACL_INFER_INVALID; + } + + if (inputs_size < 3 || outputs_size != 1) { + return NNACL_INPUT_TENSOR_ERROR; + } + + SetDataTypeFormat(output, input); + if (!parameter->infer_flag_) { + return NNACL_INFER_INVALID; + } + + const TensorC *input0 = inputs[0]; + const TensorC *input1 = inputs[1]; + const TensorC *input2 = inputs[2]; + int num = GetElementNum(input0); + int num1 = GetElementNum(input1); + int num2 = GetElementNum(input2); + int nummax = num > num1 ? num : (num1 > num2 ? num1 : num2); + int axisout = 0; + size_t temp = 0; + for (size_t j = 0; j < input0->shape_size_; j++) { + if (input0->shape_[j] == input1->shape_[j] && input0->shape_[j] != input2->shape_[j]) { + axisout = j; + break; + } + if (input0->shape_[j] == input2->shape_[j] && input0->shape_[j] != input1->shape_[j]) { + axisout = j; + break; + } + if (input1->shape_[j] == input2->shape_[j] && input0->shape_[j] != input1->shape_[j]) { + axisout = j; + break; + } + temp += 1; + if (temp == input0->shape_size_) { + SetShapeTensor(output, input); + output->data_type_ = input->data_type_; + return NNACL_OK; + } + } + ShapeSet(output->shape_, &output->shape_size_, input0->shape_, input0->shape_size_); + output->shape_[axisout] = nummax; + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/where_infer.h b/mindspore/lite/nnacl/infer/where_infer.h new file mode 100644 index 0000000000..182a8b45ce --- /dev/null +++ b/mindspore/lite/nnacl/infer/where_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_WHERE_INFER_H +#define MINDSPORE_LITE_NNACL_WHERE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int WhereInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_WHERE_INFER_H diff --git a/mindspore/lite/nnacl/infer/while_infer.c b/mindspore/lite/nnacl/infer/while_infer.c new file mode 100644 index 0000000000..1e0de40e13 --- /dev/null +++ b/mindspore/lite/nnacl/infer/while_infer.c @@ -0,0 +1,30 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/infer/while_infer.h" + +int WhileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter) { + if (inputs_size != outputs_size) { + return NNACL_ERR; + } + for (size_t i = 0; i < inputs_size; i++) { + SetDataTypeFormat(outputs[i], inputs[i]); + SetShapeTensor(outputs[i], inputs[i]); + } + + return NNACL_OK; +} diff --git a/mindspore/lite/nnacl/infer/while_infer.h b/mindspore/lite/nnacl/infer/while_infer.h new file mode 100644 index 0000000000..10616d5b19 --- /dev/null +++ b/mindspore/lite/nnacl/infer/while_infer.h @@ -0,0 +1,31 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_WHILE_INFER_H +#define MINDSPORE_LITE_NNACL_WHILE_INFER_H + +#include "nnacl/infer/common_infer.h" + +#ifdef __cplusplus +extern "C" { +#endif + +int WhileInferShape(const TensorC *const *inputs, size_t inputs_size, TensorC **outputs, size_t outputs_size, + OpParameter *parameter); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_WHILE_INFER_H diff --git a/mindspore/lite/nnacl/int8/layer_norm_int8.c b/mindspore/lite/nnacl/int8/layer_norm_int8.c index 4502ee2981..79448af47f 100644 --- a/mindspore/lite/nnacl/int8/layer_norm_int8.c +++ b/mindspore/lite/nnacl/int8/layer_norm_int8.c @@ -62,12 +62,10 @@ int LayerNormInt8(const int8_t *src_data, const float *gamma_data, const float * deno); } } else { - int x = i / param->norm_outer_size_; - const int8_t *src_param = src_norm + x * param->params_inner_size_; - int8_t *dst_param = dst_norm + x * param->params_inner_size_; - const float *gamma = gamma_data + x * param->params_inner_size_; - const float *beta = beta_data + x * param->params_inner_size_; - LayerNormGammaAndBetaInt8(dst_param, src_param, gamma, beta, quant, param->norm_inner_size_, mean, deno); + int x = i / param->params_outer_size_; + const float *gamma = gamma_data + x * param->norm_inner_size_; + const float *beta = beta_data + x * param->norm_inner_size_; + LayerNormGammaAndBetaInt8(dst_norm, src_norm, gamma, beta, quant, param->norm_inner_size_, mean, deno); } } return NNACL_OK; diff --git a/mindspore/lite/nnacl/int8/reduce_int8.c b/mindspore/lite/nnacl/int8/reduce_int8.c index a2fa720448..e40e45be40 100644 --- a/mindspore/lite/nnacl/int8/reduce_int8.c +++ b/mindspore/lite/nnacl/int8/reduce_int8.c @@ -137,10 +137,7 @@ int ReduceMeanHW(int n, int plane, int count, int c, int8_t *in_data, int8_t *ou quant_arg.multiplier_), quant_arg.right_shift_); mean += bias; - mean = MSMIN(mean, INT8_MAX); - mean = MSMAX(mean, INT8_MIN); - out_ptr[0] = mean; - out_ptr++; + *out_ptr++ = MSMAX(MSMIN(mean, INT8_MAX), INT8_MIN); } } return NNACL_OK; @@ -243,13 +240,7 @@ int ReduceMeanLastAxis(const int outer_size, const int inner_size, const int axi } mean = mean_scaled + quant->out_zp_; - if (mean > INT8_MAX) { - *inner_dst = INT8_MAX; - } else if (mean < INT8_MIN) { - *inner_dst = INT8_MIN; - } else { - *inner_dst = (int8_t)mean; - } + *inner_dst = MSMAX(MSMIN(mean, INT8_MAX), INT8_MIN); } } return NNACL_OK; diff --git a/mindspore/lite/nnacl/int8/splice_int8.c b/mindspore/lite/nnacl/int8/splice_int8.c new file mode 100644 index 0000000000..3ee891a25b --- /dev/null +++ b/mindspore/lite/nnacl/int8/splice_int8.c @@ -0,0 +1,30 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "nnacl/int8/splice_int8.h" +void SpliceInt8(const int8_t *src_data, int src_row, int src_col, const SpliceParameter *splice_parameter, + int8_t *dst_data, int dst_row, int dst_col) { + for (int r = 0; r < dst_row; ++r) { + for (int off = 0; off < splice_parameter->context_dim_; ++off) { + int r_off = r + splice_parameter->context_[off]; + r_off = MSMAX(r_off, 0); + r_off = MSMIN(r_off, src_row - 1); + const int8_t *tmp_src_data = src_data + r_off * src_col * sizeof(int8_t); + int8_t *tmp_dst_data = dst_data + r * dst_col * sizeof(int8_t); + memcpy(tmp_dst_data + off * src_col, tmp_src_data, src_col * sizeof(int8_t)); + } + } +} diff --git a/mindspore/lite/nnacl/int8/splice_int8.h b/mindspore/lite/nnacl/int8/splice_int8.h new file mode 100644 index 0000000000..326aea4a0d --- /dev/null +++ b/mindspore/lite/nnacl/int8/splice_int8.h @@ -0,0 +1,30 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_INT8_SPLICE_INT8_H_ +#define MINDSPORE_LITE_NNACL_INT8_SPLICE_INT8_H_ +#include <string.h> +#include "nnacl/splice_parameter.h" +#ifdef __cplusplus +extern "C" { +#endif + +void SpliceInt8(const int8_t *src_data, int src_row, int src_col, const SpliceParameter *splice_parameter, + int8_t *dst_data, int dst_row, int dst_col); + +#ifdef __cplusplus +} +#endif +#endif // MINDSPORE_LITE_NNACL_INT8_SPLICE_INT8_H_ diff --git a/mindspore/lite/nnacl/l2_norm_parameter.h b/mindspore/lite/nnacl/l2_norm_parameter.h index 615f8273d9..4343ef3f5e 100644 --- a/mindspore/lite/nnacl/l2_norm_parameter.h +++ b/mindspore/lite/nnacl/l2_norm_parameter.h @@ -17,7 +17,7 @@ #define MINDSPORE_LITE_NNACL_L2NORM_PARAMETER_H_ #include "nnacl/op_base.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" typedef struct L2NormParameter { // Primitive parameter diff --git a/mindspore/lite/nnacl/layer_norm_parameter.h b/mindspore/lite/nnacl/layer_norm_parameter.h index 4b8b077e9d..928662d9d7 100644 --- a/mindspore/lite/nnacl/layer_norm_parameter.h +++ b/mindspore/lite/nnacl/layer_norm_parameter.h @@ -17,13 +17,15 @@ #define MINDSPORE_LITE_NNACL_LAYER_NORM_PARAMETER_H_ #include "nnacl/op_base.h" -#include "mindspore/lite/nnacl/int8/quantize.h" +#include "nnacl/int8/quantize.h" enum ElementwiseMode { ELEMENTWISE_NOT = 0, ELEMENTWISE_PER_CHANNEL = 1, ELEMENTWISE_PER_NUM = 2 }; typedef struct LayerNormParameter { // Primitive parameter OpParameter op_parameter_; float epsilon_; + enum ElementwiseMode elementwise_mode_; + bool elementwise_affine_; int begin_norm_axis_; int begin_params_axis_; // shape correlative diff --git a/mindspore/lite/nnacl/lstm_parameter.h b/mindspore/lite/nnacl/lstm_parameter.h index 8a04e3a81a..98d78727db 100644 --- a/mindspore/lite/nnacl/lstm_parameter.h +++ b/mindspore/lite/nnacl/lstm_parameter.h @@ -30,10 +30,8 @@ typedef struct LstmParameter { int input_step_; int output_step_; bool bidirectional_; - // smooth factor for hidden/cell state calculation: - // output_hidden = old_hidden * smooth + new_hidden * (1 - smooth) - // output_cell = old_cell * smooth + new_cell * (1 - smooth) - float smooth_; + float zoneout_cell_; + float zoneout_hidden_; int col_align_; int row_align_; } LstmParameter; diff --git a/mindspore/lite/nnacl/matmul_parameter.h b/mindspore/lite/nnacl/matmul_parameter.h index 4e6ff89aa1..8c1376043b 100644 --- a/mindspore/lite/nnacl/matmul_parameter.h +++ b/mindspore/lite/nnacl/matmul_parameter.h @@ -59,6 +59,8 @@ typedef struct MatMulParameter { bool a_const_; bool b_const_; ActType act_type_; + bool use_axis_; + int axis_; } MatMulParameter; typedef struct MatmulQuantParameter { diff --git a/mindspore/lite/nnacl/nnacl_common.h b/mindspore/lite/nnacl/nnacl_common.h index 365257c958..3e02fe8991 100644 --- a/mindspore/lite/nnacl/nnacl_common.h +++ b/mindspore/lite/nnacl/nnacl_common.h @@ -23,7 +23,7 @@ extern "C" { #endif -inline void ComputeStrides(const int *shape, int *strides, const int ndim) { +static inline void ComputeStrides(const int *shape, int *strides, const int ndim) { int stride = 1; for (int i = ndim - 1; i >= 0; i--) { strides[i] = stride; diff --git a/mindspore/lite/nnacl/op_base.h b/mindspore/lite/nnacl/op_base.h index 8ce19188b0..a6dadef202 100644 --- a/mindspore/lite/nnacl/op_base.h +++ b/mindspore/lite/nnacl/op_base.h @@ -59,6 +59,7 @@ #define kNHWC_C 3 #define kInputSize1 2 #define kInputSize2 3 +#define MAX_AXIS_SIZE 6 #define MAX_LEN 256 typedef enum LiteDataType { @@ -76,6 +77,7 @@ typedef enum DataOrder { typedef struct OpParameter { char name_[100]; + bool infer_flag_; int type_; int thread_num_; } OpParameter; @@ -92,7 +94,7 @@ typedef struct QuantMulArg { } QuantMulArg; typedef enum ActType { ActType_No, ActType_Relu, ActType_Sigmod, ActType_Relu6, ActType_Prelu } ActType; -typedef enum PadMode { Pad_No, Pad_Same, Pad_Valid } PadMode; +typedef enum PadMode { Pad_pad, Pad_same, Pad_valid } PadMode; typedef enum RoundingMode { Rounding_No, Rounding_Away_from_zero, Rounding_Up } RoundingMode; typedef enum CalFixedMultiplierMode { Method_No, diff --git a/mindspore/lite/nnacl/slice_parameter.h b/mindspore/lite/nnacl/slice_parameter.h index e535ed6371..d3627965e5 100644 --- a/mindspore/lite/nnacl/slice_parameter.h +++ b/mindspore/lite/nnacl/slice_parameter.h @@ -35,6 +35,7 @@ typedef struct SliceParameter { int32_t begin_[COMM_SHAPE_SIZE]; int32_t end_[COMM_SHAPE_SIZE]; int32_t size_[COMM_SHAPE_SIZE]; + int32_t axis_[COMM_SHAPE_SIZE]; // other parameter SliceQuantArg quant_arg_; diff --git a/mindspore/lite/nnacl/splice_parameter.h b/mindspore/lite/nnacl/splice_parameter.h new file mode 100644 index 0000000000..d9cc2a45e2 --- /dev/null +++ b/mindspore/lite/nnacl/splice_parameter.h @@ -0,0 +1,28 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_NNACL_SPLICE_PARAMETER_H_ +#define MINDSPORE_LITE_NNACL_SPLICE_PARAMETER_H_ +#include "nnacl/op_base.h" +typedef struct SpliceParameter { + OpParameter op_parameter_; + int context_dim_; + int forward_indexes_dim_; + int *context_; + int *forward_indexes_; + int output_dim_; +} SpliceParameter; +#endif // MINDSPORE_LITE_NNACL_SPLICE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/squeeze_parameter.h b/mindspore/lite/nnacl/squeeze_parameter.h index 44cadaae2c..77a419aa12 100644 --- a/mindspore/lite/nnacl/squeeze_parameter.h +++ b/mindspore/lite/nnacl/squeeze_parameter.h @@ -16,12 +16,31 @@ #ifndef MINDSPORE_LITE_NNACL_SQUEEZE_PARAMETER_H_ #define MINDSPORE_LITE_NNACL_SQUEEZE_PARAMETER_H_ - #include "nnacl/op_base.h" +#include "nnacl/int8/quantize.h" + +#define SQUEEZE_OFFSET_MAX_SIZE 4 typedef struct SqueezeQuantArg { QuantArg *in_quant_args_; QuantArg *out_quant_args_; } SqueezeQuantArg; +typedef struct SqueezeParameter { + // primitive parameter + OpParameter op_parameter_; + int axis_[8]; + size_t axis_size_; + + // shape correlative + const int *in_shape_; + const int *out_shape_; + int offset_size_; + int64_t offset_[SQUEEZE_OFFSET_MAX_SIZE]; + int64_t in_offset_[SQUEEZE_OFFSET_MAX_SIZE]; + int input_dim_; + // other parameter + SqueezeQuantArg quant_arg; +} SqueezeParameter; + #endif // MINDSPORE_LITE_NNACL_SQUEEZE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/strided_slice_parameter.h b/mindspore/lite/nnacl/strided_slice_parameter.h index 64384cf582..91fa2d6571 100644 --- a/mindspore/lite/nnacl/strided_slice_parameter.h +++ b/mindspore/lite/nnacl/strided_slice_parameter.h @@ -33,6 +33,11 @@ typedef struct StridedSliceParameter { // other parameter int num_axes_; LiteDataType data_type; + int begins_mask_; + int ends_mask_; + int ellipsisMask_; + int newAxisMask_; + int shrinkAxisMask_; } StridedSliceParameter; #endif // MINDSPORE_LITE_NNACL_STRIDED_SLICE_PARAMETER_H_ diff --git a/mindspore/lite/nnacl/tensor_c.h b/mindspore/lite/nnacl/tensor_c.h new file mode 100644 index 0000000000..7a3d44b3da --- /dev/null +++ b/mindspore/lite/nnacl/tensor_c.h @@ -0,0 +1,28 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_NNACL_TENSOR_C_H_ +#define MINDSPORE_LITE_NNACL_TENSOR_C_H_ +#include "nnacl/op_base.h" + +typedef struct TensorC { + int data_type_; + int format_; + void *data_; + size_t shape_size_; + int shape_[MAX_SHAPE_SIZE]; +} TensorC; + +#endif // MINDSPORE_LITE_NNACL_TENSOR_C_H_ diff --git a/mindspore/lite/nnacl/tensorlist_parameter.h b/mindspore/lite/nnacl/tensorlist_parameter.h index 30a9a4e3b3..0cf8156913 100644 --- a/mindspore/lite/nnacl/tensorlist_parameter.h +++ b/mindspore/lite/nnacl/tensorlist_parameter.h @@ -18,13 +18,12 @@ #define MINDSPORE_LITE_NNACL_TENSORLIST_PARAMETER_H_ #include "nnacl/op_base.h" -#include "ir/dtype/type_id.h" typedef struct TensorListParameter { // primitive parameter OpParameter op_parameter_; - mindspore::TypeId shape_type_; - mindspore::TypeId element_dtype_; + int shape_type_; + int element_dtype_; // other parameter int num_element_; diff --git a/mindspore/lite/nnacl/transpose.h b/mindspore/lite/nnacl/transpose.h index 06087526b2..c22e6e70e1 100644 --- a/mindspore/lite/nnacl/transpose.h +++ b/mindspore/lite/nnacl/transpose.h @@ -25,6 +25,7 @@ typedef struct TransposeParameter { // primitive parameter OpParameter op_parameter_; int perm_[MAX_SHAPE_SIZE]; + size_t perm_size_; bool conjugate_; // shape correlative diff --git a/mindspore/lite/nnacl/unsqueeze_parameter.h b/mindspore/lite/nnacl/unsqueeze_parameter.h index 137dc2ac51..e543d27209 100644 --- a/mindspore/lite/nnacl/unsqueeze_parameter.h +++ b/mindspore/lite/nnacl/unsqueeze_parameter.h @@ -32,6 +32,7 @@ typedef struct UnSqueezeParameter { // primitive parameter OpParameter op_parameter_; int dims_[COMM_SHAPE_SIZE]; + int num_dim_; // shape correlative const int *in_shape_; diff --git a/mindspore/lite/schema/model.fbs b/mindspore/lite/schema/model.fbs index ef8906952a..7a4a14a0c9 100644 --- a/mindspore/lite/schema/model.fbs +++ b/mindspore/lite/schema/model.fbs @@ -19,7 +19,7 @@ include "ops.fbs"; namespace mindspore.schema; // This corresponds to the version. -file_identifier "MSL1"; +file_identifier "MSL2"; // File extension of any written files. file_extension "ms"; @@ -60,226 +60,6 @@ table Tensor { enableHuffmanCode: bool = false; } -union PrimitiveType { - Concat, - SoftMax, - Activation, - Conv2D, - FusedBatchNorm, - BatchNorm, - BiasAdd, - Pooling, - ROIPooling, - DepthwiseConv2D, - DeDepthwiseConv2D, - Resize, - DetectionPostProcess, - FullConnection, - Mean, // DEPRECATED - DeConv2D, - Scale, - Reshape, - Eltwise, - NetOutput, - Add, - Sub, - MatMul, - StridedSlice, - Power, - Slice, - Stack, - Mul, - RealDiv, - Pad, - Maximum, - Minimum, - PReLU, - LeakyReLU, - ArgMax, - ArgMin, - Exp, - Crop, - Range, - Rsqrt, - ExpandDims, - Tile, - Cast, - Shape, - Nchw2Nhwc, // DEPRECATED - Nhwc2Nchw, // DEPRECATED - QuantDTypeCast, - Split, - Permute, // DEPRECATED - FakeQuantWithMinMaxVars, - Equal, - Less, - Greater, - NotEqual, - LessEqual, - GreaterEqual, - Min, - Floor, - Abs, - Neg, - Cos, - Sin, - Sqrt, - Square, - Constant, - Log, - Tan, - Atan, - Asin, - Clip, - Transpose, - Squeeze, - Unsqueeze, - Upsample, - Dropout, - Broadcast, - BroadcastTo, - Lrn, - ZerosLike, - TopK, - SpaceToDepth, - SpaceToBatch, - SparseToDense, - ReverseSequence, - Rank, - Gather, - GatherNd, - Fill, - Elu, - DepthToSpace, - BatchToSpace, - AddN, - Ceil, - EmbeddingLookup, - EmbeddingLookupSparse, - FloorDiv, - FloorMod, - L2Norm, - LocalResponseNormalization, - MatrixDiag, - Reduce, - Reverse, - Round, - Select, - Scatter, - ScatterND, - ConstantOfShape, - Unique, - Unstack, - LogicalAnd, - LogicalOr, - LogicalXor, - LogicalNot, - OnnxInt8Quantize, - OnnxInt8Dequantize, - FakeQuantWithMinMax, - FakeQuantWithMinMaxPerChannel, - BatchNormFold, - MulFold, - AddFold, - SquaredDifference, - Flatten, - FlattenGrad, - TupleGetItem, - Div, - Where, - OneHot, - Lstm, - Conv2DGradFilter, - Conv2DGradInput, - PoolingGrad, - BNGrad, - Assign, - ApplyMomentum, - BiasGrad, - SoftmaxCrossEntropy, - AddGrad, - SubGrad, - MulGrad, - DivGrad, - PowerGrad, - ActivationGrad, - PriorBox, - SpaceToBatchND, - Depend, - Return, - MakeTuple, - ToFormat, - Proposal, - Custom, - BlackBox, - NegGrad, - LogGrad, - BatchToSpaceND, - LshProjection, - HashtableLookup, - SkipGram, - DeConv2DGradFilter, - CustomPredict, - CustomNormalize, - CustomExtractFeatures, - AudioSpectrogram, - Mfcc, - Rfft, - FftReal, - FftImag, - Sgd, - Adam, - GroupConv2DGradInput, - Loop, - NonMaxSuppression, - InstanceNorm, - Identity, - LayerNorm, - While, - ControlDepend, - UnsortedSegmentSum, - AssignAdd, - OnesLike, - BinaryCrossEntropyGrad, - BinaryCrossEntropy, - LpNormalization, - DropoutGrad, - MaximumGrad, - MinimumGrad, - Switch, - Partial, - TensorListFromTensor, - TensorListStack, - TensorListGetItem, - TensorListSetItem, - TensorListReserve, - All, - Assert, - Adder, - SparseSoftmaxCrossEntropy, - SmoothL1Loss, - SmoothL1LossGrad, - SigmoidCrossEntropyWithLogits, - SigmoidCrossEntropyWithLogitsGrad, - Reciprocal, - Merge, - Mod, - If, - GeLU, - Gru, - NonZero, - InvertPermutation, - Size, - RandomStandardNormal, - CropAndResize, - Erf, - StridedSliceGrad, - IsFinite, - LinSpace, - UniformReal, - AbsGrad -} - enum QuantType: int { QUANT_NONE, AwareTraining, diff --git a/mindspore/lite/schema/model_v0.fbs b/mindspore/lite/schema/model_v0.fbs index 9ac325cf35..2c7b0dda67 100644 --- a/mindspore/lite/schema/model_v0.fbs +++ b/mindspore/lite/schema/model_v0.fbs @@ -35,6 +35,8 @@ table QuantParam { varCorr: float = 1; meanCorr: float = 0; dstDtype: int = 32; + roundType: int = 1; + multiplier: int = 1; // calculate fixed point multiplier method } table Tensor { @@ -256,10 +258,6 @@ union PrimitiveType { Reciprocal, Merge, Mod, - If, - GeLU, - Gru, - NonZero, } enum QuantType: int { diff --git a/mindspore/lite/schema/ops.fbs b/mindspore/lite/schema/ops.fbs index ff3c43eab4..ad77ac2526 100644 --- a/mindspore/lite/schema/ops.fbs +++ b/mindspore/lite/schema/ops.fbs @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,1234 +13,1014 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +include "ops_types.fbs"; namespace mindspore.schema; -enum ResizeMethod: byte { - UNKNOWN = -1, - LINEAR = 0, - NEAREST = 1, - CUBIC = 2 -} - -enum CoordinateTransformMode: byte { - COMMON = 0, - HALF_PIXEL = 1, - PYTORCH_HALF_PIXEL = 2, - TF_HALF_PIXEL = 3, - TF_CROP_AND_RESIZE = 4, - ALIGN_CORNERS = 5, - ASYMMETRIC = 6, - ALIGN_CORNERS_WITH_HALF_PIEXL = 7 -} - -enum NearestMode : byte { - NORMAL = 0, - ROUND_HALF_DOWN = 1, - ROUND_HALF_UP = 2, - FLOOR = 3, - CEIL = 4 -} - -enum Format : int { - NCHW = 0, - NHWC, - NHWC4, - HWKC, - HWCK, - KCHW, - CKHW, - KHWC, - CHWK, - HW, - HW4, - NC, - NC4, - NC4HW4 = 100, - NUM_OF_FORMAT -} - -enum ActivationType : byte { - NO_ACTIVATION = 0, - RELU = 1, - SIGMOID = 2, - RELU6 = 3, - ELU = 4, - LEAKY_RELU = 5, - ABS = 6, - RELU1 = 7, - SOFTSIGN = 8, - SOFTPLUS = 9, - TANH = 10, - SELU = 11, - HSWISH = 12, - HSIGMOID = 13, - THRESHOLDRELU = 14, - LINEAR = 15, - HARD_TANH = 16, - SIGN = 17, - SWISH = 18, - UNKNOWN = 19 -} -enum ActivationGradType : byte { - NO_ACTIVATION = 0, - RELU = 1, - SIGMOID = 2, - RELU6 = 3, - ELU = 4, - LEAKY_RELU = 5, - ABS = 6, - RELU1 = 7, - SOFTSIGN = 8, - SOFTPLUS = 9, - TANH = 10, - SELU = 11, - HSWISH = 12, - HSIGMOID = 13, - THRESHOLDRELU = 14, - LINEAR = 15, - UNKNOWN = 16, - LOG = 17 -} -enum ReduceType : byte { - REDUCE_MAX = 0, - REDUCE_MEAN = 1, - REDUCE_ALL = 2, - REDUCE_ANY = 3, - REDUCE_LOG_SUM_EXP = 4, - REDUCE_PROD = 5, - REDUCE_SUM = 6, - UNKNOWN = 7 -} - -enum PoolMode : byte { - MAX_POOLING = 0, - MEAN_POOLING = 1, -} - -enum EltwiseMode : byte { - PROD = 0, - SUM = 1, - MAXIMUM = 2, - UNKNOWN = 3 -} - -enum PadMode : byte { - NOTSET = 0, - SAME_UPPER = 1, - VALID = 2, - CAFFE = 4, - SAME_LOWER = 5 -} - -enum RoundMode : byte { - FLOOR = 0, - CEIL = 1 -} - -enum PaddingMode : byte { - CONSTANT = 0, - REFLECT = 1, - SYMMETRIC = 2, - MODE_RESERVED = 3 -} - -enum LshProjectionType : byte { - UNKNOWN = 0, - SPARSE = 1, - DENSE = 2 -} - -table Pad { - paddings: [int]; - paddingMode: PaddingMode; - constantValue: float; +union PrimitiveType { + Abs, + Activation, + ActivationGrad, + Adam, + AddFusion, + AdderFusion, + AddGrad, + AddN, + All, + ApplyMomentum, + ArgMaxFusion, + ArgMinFusion, + Assert, + Assign, + AssignAdd, + AudioSpectrogram, + AvgPoolFusion, + AvgPoolGrad, + BatchNorm, + BatchNormGrad, + BatchToSpace, + BatchToSpaceND, + BiasAdd, + BinaryCrossEntropy, + BinaryCrossEntropyGrad, + BiasAddGrad, + BroadcastTo, + Cast, + Ceil, + Clip, + Concat, + ControlDepend, + Conv2DBackpropFilterFusion, + Conv2DBackpropInputFusion, + Conv2DFusion, + Conv2dTransposeFusion, + Cos, + ConstantOfShape, + Crop, + CustomExtractFeatures, + CustomNormalize, + CustomPredict, + DeConv2DGradFilter, + Depend, + DepthToSpace, + DetectionPostProcess, + DivFusion, + DivGrad, + Dropout, + DropoutGrad, + Elu, + Eltwise, + Equal, + EmbeddingLookupFusion, + ExpFusion, + ExpandDims, + FakeQuantWithMinMaxVars, + FakeQuantWithMinMaxVarsPerChannel, + FftReal, + FftImag, + Flatten, + FlattenGrad, + Floor, + FloorDiv, + FloorMod, + Fill, + FullConnection, + FusedBatchNorm, + Gather, + GatherNd, + Greater, + GreaterEqual, + HashtableLookup, + InstanceNorm, + LayerNormFusion, + LeakyRelu, + Less, + LessEqual, + Log, + LogGrad, + LogicalAnd, + LogicalNot, + LogicalOr, + LpNormalization, + LRN, + LshProjection, + LSTM, + L2NormalizeFusion, + MatMul, + Maximum, + MaximumGrad, + MaxPoolFusion, + MaxPoolGrad, + Merge, + Mfcc, + Minimum, + MinimumGrad, + Mod, + MulFusion, + MulGrad, + Neg, + NegGrad, + NotEqual, + NonMaxSuppression, + OneHot, + OnesLike, + PadFusion, + PartialFusion, + PowerGrad, + PowFusion, + PriorBox, + PReLUFusion, + QuantDTypeCast, + Rank, + Range, + Reciprocal, + RealDiv, + ReduceFusion, + Reshape, + Resize, + ReverseSequence, + ReverseV2, + Rfft, + ROIPooling, + Round, + Rsqrt, + ScaleFusion, + ScatterNd, + SGD, + Shape, + SigmoidCrossEntropyWithLogits, + SigmoidCrossEntropyWithLogitsGrad, + Sin, + SkipGram, + SliceFusion, + SmoothL1Loss, + SmoothL1LossGrad, + Softmax, + SoftmaxCrossEntropyWithLogits, + SpaceToBatch, + SpaceToBatchND, + SpaceToDepth, + SparseSoftmaxCrossEntropy, + SparseToDense, + Split, + Sqrt, + Squeeze, + Square, + SquaredDifference, + Stack, + StridedSlice, + SubFusion, + SubGrad, + Switch, + TensorListFromTensor, + TensorListGetItem, + TensorListReserve, + TensorListSetItem, + TensorListStack, + TileFusion, + TopKFusion, + Transpose, + Unique, + UnsortedSegmentSum, + Unsqueeze, + Unstack, + While, + Where, + ZerosLike, + Select, + If, + GRU, + NonZero, + InvertPermutation, + Size, + RandomStandardNormal, + CropAndResize, + Erf, + StridedSliceGrad, + IsFinite, + LinSpace, + UniformReal, + AbsGrad, } -table Maximum { -} - -table Minimum { -} - -table Flatten { -} -table FlattenGrad { -} -table Concat { - axis: int; - n: int; // DEPRECATED -} - -table SoftMax { - axis: int = -1; +table Abs { } table Activation { - type: ActivationType = 0; - alpha: float = 0.2; - min_val: float = -1.0; - max_val: float = 1.0; -} -table ActivationGrad { - type: ActivationType = 0; - alpha: float = 0.2; -} - - -table Conv2D { - format: Format = 0; - group: int; - channelIn: int; - channelOut: int; - kernelW: int; - kernelH: int; - strideW: int; - strideH: int; - padMode: PadMode; - padUp: int; - padDown: int; - padLeft: int; - padRight: int; - dilateW: int; - dilateH: int; - hasBias: bool = false; // DEPRECATED - activationType: ActivationType = 0; -} - -table Adder { - format: Format = 0; - group: int; - channelIn: int; - channelOut: int; - kernelW: int; - kernelH: int; - strideW: int; - strideH: int; - padMode: PadMode; - padUp: int; - padDown: int; - padLeft: int; - padRight: int; - dilateW: int; - dilateH: int; - hasBias: bool = false; - activationType: ActivationType = 0; -} - -table Conv2DGradFilter { - format: Format = 0; - group: int; - channelIn: int; - channelOut: int; - kernelW: int; - kernelH: int; - strideW: int; - strideH: int; - padMode: PadMode; - padUp: int; - padDown: int; - padLeft: int; - padRight: int; - dilateW: int; - dilateH: int; - hasBias: bool = false; // DEPRECATED - filter_shape: [int]; // DEPRECATED - activationType: ActivationType = 0; -} - -table Conv2DGradInput { - format: Format = 0; - group: int; - channelIn: int; - channelOut: int; - kernelW: int; - kernelH: int; - strideW: int; - strideH: int; - padMode: PadMode; - padUp: int; - padDown: int; - padLeft: int; - padRight: int; - dilateW: int; - dilateH: int; - hasBias: bool = false; // DEPRECATED - input_shape: [int]; // DEPRECATED - activationType: ActivationType = 0; -} - -table GroupConv2DGradInput { - format: Format = 0; - group: int; - channelIn: int; - channelOut: int; - kernelW: int; - kernelH: int; - strideW: int; - strideH: int; - padMode: PadMode; - padUp: int; - padDown: int; - padLeft: int; - padRight: int; - dilateW: int; - dilateH: int; - hasBias: bool = false; // DEPRECATED - input_shape: [int]; - activationType: ActivationType = 0; -} - -table FusedBatchNorm { - epsilon: float = 0.00001; // eg. epsilon=0.001 - momentum: float = 0.9; - spatial: int = 1; -} - -table BatchNorm { - epsilon: float = 0.00001; // eg. epsilon=0.001 -} - -table BiasGrad { + activation_type: ActivationType = 0; + alpha: float; + min_val: float; + max_val: float; } - -table SoftmaxCrossEntropy { +table ActivationGrad { + activation_type: ActivationType; + alpha: float; } -table SparseSoftmaxCrossEntropy { - isGrad: bool; +table Adam { + use_locking: bool; + use_nesterov: bool; } -table make_tuple { +table AddFusion { + activation_type: ActivationType = 0; } - -table PoolingGrad { +table AdderFusion { format: Format = 0; - poolingMode: PoolMode; - global: bool = false; - windowW: int; - windowH: int; - strideW: int; - strideH: int; - padMode: PadMode; - padUp: int; - padDown: int; - padLeft: int; - padRight: int; - roundMode: RoundMode; -} -table Shape { -} - -table ConstantOfShape{ - dataType: int; - value: [float]; -} - -table Nchw2Nhwc { // DEPRECATED - -} - -table Nhwc2Nchw { // DEPRECATED - + kernel_size: [long]; + stride: [long]; + dilation: [long]; + pad_mode: PadMode; + pad_list: [long]; + group: long; + in_channel: long; + out_channel: long; + activation_type: ActivationType = 0; } -table FakeQuantWithMinMaxVars { - narrowRange: bool; - numBits: int; -} - -table BiasAdd { - axis: [int]; // DEPRECATED +table AddGrad { } -table ROIPooling { - pooledH: int; - pooledW: int; - scale: float; +table AddN { } -table Pooling { - format: Format = 0; - poolingMode: PoolMode; - global: bool = false; - windowW: int; - windowH: int; - strideW: int; - strideH: int; - padMode: PadMode; - padUp: int; - padDown: int; - padLeft: int; - padRight: int; - roundMode: RoundMode; - activationType: ActivationType = 0; - avgMode: int = 0; -} - -table DepthwiseConv2D { - format: Format = 0; - channelIn: int; - channelMultiplier: int; - kernelW: int; - kernelH: int; - strideW: int; - strideH: int; - padMode: PadMode; - padUp: int; - padDown: int; - padLeft: int; - padRight: int; - dilateW: int; - dilateH: int; - hasBias: bool = false; // DEPRECATED - activationType: ActivationType = 0; -} - -table DeDepthwiseConv2D { - format: Format = 0; - channelIn: int; - channelMultiplier: int; - kernelW: int; - kernelH: int; - strideW: int; - strideH: int; - padMode: PadMode; - padUp: int; - padDown: int; - padLeft: int; - padRight: int; - dilateW: int; - dilateH: int; - hasBias: bool = false; // DEPRECATED - activationType: ActivationType = 0; +table All { + keep_dims: long; } - -table Resize { - format: Format = 0; - method: ResizeMethod; - newHeight: long; - newWidth: long; - alignCorners: bool = false; // DEPRECATED IN FUTURE: use 'coordinateTransformMode' instead. - preserveAspectRatio: bool = false; - coordinateTransformMode : CoordinateTransformMode; - cubicCoeff : float; - excludeOutside : int; - extrapolationValue : float = 0; - nearestMode : NearestMode; +table ApplyMomentum { + use_nesterov: bool; + use_locking: bool; + gradient_scale: float; } -table DetectionPostProcess { - format: Format = 0; - inputSize: int; - hScale: float; - wScale: float; - xScale: float; - yScale: float; - NmsIouThreshold: float; - NmsScoreThreshold: float; - MaxDetections: long; - DetectionsPerClass: long; - MaxClassesPerDetection: long; - NumClasses: long; - UseRegularNms: bool; - OutQuantized: bool; +table ArgMaxFusion { + axis: long; + top_k: long = 1; + keep_dims: bool; + out_max_value: bool; } -table FullConnection { - hasBias: bool; - axis: int; - useAxis: bool; - activationType: ActivationType = 0; +table ArgMinFusion { + axis: long; + top_k: long; + keep_dims: bool; + out_max_value: bool; } -// Mean(input_tensor, axis, keep_dims) -table Mean { // DEPRECATED - axis: [int]; - keepDims: bool = false; +table Assert { + summarize: long; } -table DeConv2D { - format: Format = 0; - group: int; - channelIn: int; - channelOut: int; - kernelW: int; - kernelH: int; - strideW: int; - strideH: int; - padMode: PadMode; - padUp: int; - padDown: int; - padLeft: int; - padRight: int; - dilateW: int; - dilateH: int; - hasBias: bool = false; // DEPRECATED - activationType: ActivationType = 0; - outputPaddingW: int; - outputPaddingH: int; +table Assign { } -table DeConv2DGradFilter { - format: Format = 0; - group: int; - channelIn: int; - channelOut: int; - kernelW: int; - kernelH: int; - strideW: int; - strideH: int; - padMode: PadMode; - padUp: int; - padDown: int; - padLeft: int; - padRight: int; - dilateW: int; - dilateH: int; - hasBias: bool = false; // DEPRECATED - activationType: ActivationType = 0; -} - -table BNGrad { - eps: float; - momentum: float; -} - -table Scale { - axis: int; - activationType: ActivationType = 0; +table AssignAdd { } -table Eltwise { - mode: EltwiseMode; +table AudioSpectrogram { + window_size: long; + stride: long; + mag_square: bool; } -table Add { - activationType: ActivationType = 0; +table AvgPoolFusion { + kernel_size: [long]; + strides: [long]; + pad: [long]; + pad_mode: PadMode; + round_mode: RoundMode; + format: Format; + global: bool; + activation_type: ActivationType = 0; } -table Sub { - activationType: ActivationType = 0; +table AvgPoolGrad { + kernel_size: [long]; + strides: [long]; + pad_mode: PadMode; + format: Format; } -table Mul { - activationType: ActivationType = 0; +table BatchNorm { + epsilon: float; + format: Format; + is_training: bool; } -table Div { - activationType: ActivationType = 0; +table BatchNormGrad { + epsilon: float; } -table AddGrad { +table BatchToSpace { + block_size: [long]; + crops: Vec2D; } -table SubGrad { +table BatchToSpaceND { + block_shape: [long]; + crops: Vec2D; } -table MulGrad { +table BiasAdd { + format: Format; } -table DivGrad { -} -table RealDiv { +table BinaryCrossEntropy { + reduction: Reduction; } -table Rsqrt { +table BinaryCrossEntropyGrad { + reduction: Reduction = 1; } -table Equal { +table BiasAddGrad { } -table Less { +table BroadcastTo { + shape: [long]; } -table Greater { +table Cast { } -table NotEqual { +table Ceil { } -table LessEqual { +table Clip { + max: float; + min: float; } -table GreaterEqual { +table Concat { + axis: long; } -table Min { +table ControlDepend { + depend_mode: long; } -table Slice { +table Conv2DBackpropFilterFusion { format: Format = 0; - axes: [int]; - begin: [int]; - size: [int]; -} - -table Floor { -} - -table Abs { -} - -table Neg { -} - -table NegGrad { -} - -table Exp { - base : float = -1.0; - scale : float = 1.0; - shift : float = 0.0; + kernel_size: [long]; + stride: [long]; + dilation: [long]; + pad_mode: PadMode; + pad_list: [long]; + mode: long; + group: long; + in_channel: long; + out_channel: long; + activation_type: ActivationType = 0; +} + +table Conv2DBackpropInputFusion { + format: Format = 0; + kernel_size: [long]; + stride: [long]; + dilation: [long]; + pad_mode: PadMode; + pad: [long]; + pad_list: [long]; + mode: long; + group: long; + in_channel: long; + out_channel: long; + activation_type: ActivationType = 0; +} + +table Conv2DFusion { + format: Format = 0; + kernel_size: [long]; + stride: [long]; + dilation: [long]; + pad_mode: PadMode; + pad_list: [long]; + mode: long; + group: long; + in_channel: long; + out_channel: long; + activation_type: ActivationType = 0; +} + +table Conv2dTransposeFusion { + format: Format = 0; + kernel_size: [long]; + stride: [long]; + dilation: [long]; + pad_mode: PadMode; + pad: [long]; + pad_list: [long]; + mode: long; + group: long; + in_channel: long; + out_channel: long; + activation_type: ActivationType = 0; } table Cos { } -table Sin { -} - -table Sqrt { +table ConstantOfShape { + data_type: long; + value: [float]; } -table Square { +table Crop { + axis: long; + offsets: [long]; } -table Ceil { +table CustomExtractFeatures { } -table Log { +table CustomNormalize { } -table LogGrad { +table CustomPredict { + output_num: long; + weight_threshold: float; } -table Tan { +table DeConv2DGradFilter { + in_channel: long; + out_channel: long; + kernel_size: [long]; + pad_mode: PadMode; + pad_list: [long]; + stride: [long]; + dilation: [long]; + group: long; + format: Format; + activation_type: ActivationType; } -table Atan { +table Depend { } -table Asin { +table DepthToSpace { + block_size: long; + format: Format = 0; } -table Reshape { +table DetectionPostProcess { format: Format = 0; - shape: [long]; + input_size: long; + scale: [float]; + nms_iou_threshold: float; + nms_score_threshold: float; + max_detections: long; + detections_per_class: long; + max_classes_per_detection: long; + num_classes: long; + use_regular_nms: bool; + out_quantized: bool; } -table Power { - power: float; - scale: float; - shift: float; -} -table PowerGrad { - power: float; - scale: float; - shift: float; -} -table ArgMax { - axis: int; - outMaxValue: bool; - topK: int = 1; - keepDims: bool; - axisType: int; +table DivFusion { + activation_type: ActivationType = 0; } -table ArgMin { - axis: int; - outMaxValue: bool; - topK: int = 1; - keepDims: bool; - axisType: int; +table DivGrad { } -table NetOutput { +table Dropout { + keep_prob: float = 0.5; } -table MatMul { - broadcast : bool = false; // DEPRECATED - transposeA : bool = false; - transposeB : bool = false; +table DropoutGrad { + keep_prob: float; } -table PReLU { - channelShared : bool = false; - slope: [float]; +table Elu { + alpha: float; } -table LeakyReLU { - negativeSlope: float; +table Eltwise { + mode: EltwiseMode; } -table StridedSlice { - beginMask: int; - endMask: int; - ellipsisMask: int; - newAxisMask: int; - shrinkAxisMask: int; - begin: [int]; - end: [int]; - stride: [int]; - isScale: [int]; +table Equal { } -table Stack { - axis: int; - n: int; - isScale: [int]; +table EmbeddingLookupFusion { + max_norm: float; } -table Range { - dType: int; - start: int; - limit: int; - delta: int = 1; +table ExpFusion { + base: float = -1; + scale: float = 1.0; + shift: float = 0.0; } table ExpandDims { - dim: int; -} - -table Tile { - multiples: [int]; - dims: [int]; } -table Cast { - srcT: int; - dstT: int; -} - -table QuantDTypeCast { - srcT: int; - dstT: int; -} - -table Split { - numberSplit: int; - sizeSplits: [int]; - splitDim: int; -} - -table Crop { - axis : long; - offsets : [long]; -} - -table Permute { // DEPRECATED - order: [long]; -} - -table Clip { - max: float; - min: float; +table FakeQuantWithMinMaxVars { + num_bits: long; + narrow_range: bool; } -table Constant { +table FakeQuantWithMinMaxVarsPerChannel { + num_bits: long; + narrow_range: bool; } - -table Elu { - alpha: float = 1.0; +table FftReal { } -table Broadcast { +table FftImag { } -table BroadcastTo { - dst_shape: [int]; +table Flatten { } -table Lrn { - alpha: float = 0.0001; - beta: float = 0.75; - bias: float = 1.0; - size: int; +table FlattenGrad { } -enum ReduceMode : byte { - ReduceMean = 0, - ReduceMax = 1, - ReduceMin = 2, - ReduceProd = 3, - ReduceSum = 4, - ReduceSumSquare = 5, - ReduceASum = 6, - ReduceAll = 7 +table Floor { } -table Reduce { - axes: [int]; - keepDims: int; - mode: ReduceMode; - reduceToEnd: bool = false; - coeff: float = 1.0; +table FloorDiv { } -table Transpose { - perm: [int]; - conjugate: bool = false; // DEPRECATED +table FloorMod { } -table Squeeze { - axis: [int]; +table Fill { } -table Unsqueeze { - axis: [int]; +table FullConnection { + has_bias: bool; + use_axis: bool; + axis: long; + activation_type: ActivationType = 0; } -table Upsample { - mode: string; - scales: [float]; +table FusedBatchNorm { + epsilon: float = 0.0001; + momentum: float = 0.9; + mode: long; } -table Dropout { - ratio : float = 0.5; +table Gather { } -table LocalResponseNormalization { - depth_radius: int; - bias: float; - alpha: float; - beta: float; +table GatherNd { } -table ZerosLike { +table Greater { } -table TopK { - k : int; - sorted : bool = true; +table GreaterEqual { } -table SpaceToDepth { - blockSize : int; - format: Format = 0; +table HashtableLookup { } -table SpaceToBatch { - blockShape : [int]; - paddings : [int]; +table InstanceNorm { + epsilon: float; } -table SparseToDense { - validateIndices: bool; +table LayerNormFusion { + begin_norm_axis: long; + epsilon: float = 0.00001; + elementwise_affine: bool; + begin_params_axis: long; } -table ReverseSequence { - seqAxis: int; - batchAxis: int; +table LeakyRelu { + negative_slope: float; } -table Rank { +table Less { } - -table Gather { - axis: int; - batchDims: int; +table LessEqual { } -table GatherNd { - batchDims: int; // DEPRECATED +table Log { } -table Fill { - dims: [int]; +table LogGrad { } -table DepthToSpace { - blockSize: int; - format: Format = 0; +table LogicalAnd { } - -table BatchToSpace { - blockShape: [int]; - crops: [int]; +table LogicalNot { } -table BatchToSpaceND { - blockShape: [int]; - crops: [int]; +table LogicalOr { } -table AddN { - N: int; // DEPRECATED +table LpNormalization { + axis: long; + p: long; } - -table EmbeddingLookup { - maxNorm: float = 0.0; +table LRN { + depth_radius: long; + bias: float; + alpha: float; + beta: float; + norm_region: string; } -table EmbeddingLookupSparse { - spIds: [int]; - spWeights: [float]; - //combiner: Combiner=0; - maxNortm: float; +table LshProjection { + type: LshProjectionType; } -table FloorDiv { +table LSTM { + bidirectional: bool; + has_bias: bool; + input_size: long; + hidden_size: long; + num_layers: long; + num_directions: long; + dropout: float; + zoneout_cell: float = 0; + zoneout_hidden: float = 0; } -table FloorMod { +table L2NormalizeFusion { + axis: [long]; + epsilon: float; + activation_type: ActivationType = 0; } -table Mod { +table MatMul { + transpose_a: bool = false; + transpose_b: bool = false; } -table L2Norm { - axis: [int]; - epsilon: float; - activationType: ActivationType = 0; +table Maximum { } -table LogicalAnd { +table MaximumGrad { + grad_x: bool; + grad_y: bool; } -table LogicalOr { +table MaxPoolFusion { + kernel_size: [long]; + strides: [long]; + pad: [long]; + pad_mode: PadMode; + round_mode: RoundMode; + format: Format; + global: bool; + activation_type: ActivationType = 0; } -table LogicalXor { +table MaxPoolGrad { + kernel_size: [long]; + strides: [long]; + pad_mode: PadMode; + format: Format; } -table LogicalNot { +table Merge { } -table MatrixDiag { - k: int; - numRows: int; - numCols: int; - paddingValue: float; +table Mfcc { + freq_upper_limit: float; + freq_lower_limit: float; + filter_bank_channel_num: long; + dct_coeff_num: long; } -table Select { +table Minimum { } -table TfReduce { - type: ReduceType = 7; +table MinimumGrad { + grad_x: bool; + grad_y: bool; } -table Reverse { - axis: [int]; +table Mod { } -table Round { +table MulFusion { + activation_type: ActivationType = 0; } -table Scatter { +table MulGrad { } -table ScatterND { +table Neg { } -table Unique { - outType: int; // DEPRECATED +table NegGrad { } -table Unstack { - num: int; // deprecated - axis: int; +table NotEqual { } -table OnnxInt8Quantize { +table NonMaxSuppression { + center_point_box: long; } -table OnnxInt8Dequantize { +table OneHot { + axis: long; } -table FakeQuantWithMinMax { +table OnesLike { } -table FakeQuantWithMinMaxPerChannel { +table PadFusion { + paddings: Vec2D; + padding_mode: PaddingMode; + constant_value: float; } -table BatchNormFold { +table PartialFusion { + sub_graph_index: long; } -table MulFold { +table PowerGrad { + power: float; + scale: float; + shift: float; } -table AddFold { +table PowFusion { + scale: float = 1; + shift: float = 0; } -table SquaredDifference { +table PriorBox { + min_sizes: [long]; + max_sizes: [long]; + aspect_ratios: [float]; + variances: [float]; + image_size_w: long; + image_size_h: long; + step_w: float; + step_h: float; + clip: bool; + flip: bool; + offset: float; } -table TupleGetItem { +table PReLUFusion { + channel_shared: bool; } -table ApplyMomentum { - gradientScale: float; - useNesterov: bool; +table Rank { } -table Sgd { - weightDecay: float; - dampening: float; - useNesterov: bool; +table Range { + d_type: long; + start: long; + limit: long; + delta: long = 1; } -table Adam { - useNesterov: bool; +table Reciprocal { } -table Assign { +table RealDiv { } -table AssignAdd { +table ReduceFusion { + keep_dims: bool; + mode: ReduceMode; + reduce_to_end: bool; + coeff: float; } -table Where{ - condition: [bool]; +table Reshape { } -table OneHot { - axis: int; +table Resize { + format: Format = 0; + method: ResizeMethod; + new_height: long; + new_width: long; + preserve_aspect_ratio: bool = false; + coordinate_transform_mode: CoordinateTransformMode; + cubic_coeff: float; + exclude_outside: long; + extrapolation_value: float; + nearest_mode: NearestMode; } -table Lstm{ - bidirection: bool = false; - smooth: float = 0.0; +table ReverseSequence { + seq_dim: long; + batch_dim: long; } -table Gru{ - bidirection: bool = false; +table ReverseV2 { + axis: [long]; } -table PriorBox { - min_sizes: [int]; - max_sizes: [int]; - aspect_ratios: [float]; - variances: [float]; - image_size_w: int; - image_size_h: int; - step_w: float; - step_h: float; - clip: bool = true; - flip: bool = true; - offset: float; +table Rfft { + fft_length: long; } -table SpaceToBatchND { - blockShape : [int]; - paddings : [int]; +table ROIPooling { + pooled_h: long; + pooled_w: long; + scale: float; } -table MakeTuple { +table Round { } -table ToFormat { - srcT: int; - dstT: int; +table Rsqrt { } - -table Depend { +table QuantDTypeCast { + src_t: long; + dst_t: long; } -table ControlDepend { +table ScaleFusion { + axis: long; + activation_type: ActivationType = 0; } -table Return { +table ScatterNd { } -table Proposal { - feat_stride : float; - base_size : float; - min_size : float; - ratio : [float]; - scale : [float]; - pre_nms_topn : int; - post_nms_topn : int; - nms_thresh : float; +table SGD { + nesterov: bool; + dampening: float; + weight_decay: float; } -table Custom { - custom : [ubyte]; +table Shape { } - -table BlackBox { - id : string; - size : int; - address : [ubyte]; +table SigmoidCrossEntropyWithLogits { } -table LshProjection { - type : LshProjectionType; +table SigmoidCrossEntropyWithLogitsGrad { } -table HashtableLookup { +table Sin { } table SkipGram { - includeAllGrams : bool; - maxSkipSize : int; - ngramSize : int; + include_all_grams: bool; + max_skip_size: long; + ngram_size: long; } -table CustomPredict { - outputNum : int; - weightThreshold : float; +table SliceFusion { + axes: [long]; } -table CustomNormalize { -} - -table CustomExtractFeatures { +table SmoothL1Loss { + beta: float; } -table AudioSpectrogram { - windowSize : int; - stride : int; - magSquare : bool; +table SmoothL1LossGrad { + beta: float; } -table Mfcc { - freqUpperLimit : float; - freqLowerLimit : float; - filterBankChannelNum : int; - dctCoeffNum : int; +table Softmax { + axis: [long]; } -table Rfft { - fftLength : int; +table SoftmaxCrossEntropyWithLogits { } -table FftReal { +table SpaceToBatch { + block_size: [long]; + paddings: Vec2D; } -table FftImag { +table SpaceToBatchND { + block_shape: [long]; + paddings: Vec2D; } -table DropoutGrad { - ratio : float = 0.5; +table SpaceToDepth { + block_size: long; + format: Format; } -table MaximumGrad { +table SparseSoftmaxCrossEntropy { + grad: bool; } -table MinimumGrad { +table SparseToDense { } -table NonMaxSuppression { - centerPointBox : int = 0; +table Split { + output_num: long; + size_splits: [long]; + axis: long; } -table InstanceNorm { - epsilon : float = 0.00001; +table Sqrt { } -table Loop { - subGraphIndex : int; +table Squeeze { + axis: [long]; } -table Identity { +table Square { } -table LayerNorm { - begin_norm_axis : int; - begin_params_axis : int; - epsilon : float = 0.00001; +table SquaredDifference { } -table While { - condSubgraphIndex : int; - bodySubgraphIndex : int; +table Stack { + axis: long; } -table If { +table StridedSlice { + begin_mask: long; + end_mask: long; + ellipsis_mask: long; + new_axis_mask: long; + shrink_axis_mask: long; } -table UnsortedSegmentSum { - numSegments : int; +table SubFusion { + activation_type: ActivationType = 0; } -table OnesLike { - +table SubGrad { } -table BinaryCrossEntropy { - reduction : int = 1; +table Switch { } -table BinaryCrossEntropyGrad { - reduction : int = 1; +table TensorListFromTensor { + element_dtype: long; + shape_type: long; } -table LpNormalization { - axis : int; - p : int; +table TensorListGetItem { + element_dtype: long; } -table Switch { +table TensorListReserve { + element_dtype: long; + shape_type: long; } -table Partial { - subGraphIndex : int; +table TensorListSetItem { + element_dtype: long; } -table TensorListFromTensor { - elementDType : int; - shapeType : int; +table TensorListStack { + num_elements: long; + element_dtype: long; } -table TensorListStack { - numElements : int; - elementDType : int; +table TileFusion { + dims: [long]; } -table TensorListGetItem { - elementDType : int; +table TopKFusion { + sorted: bool = true; + axis: long; + largest: long; } -table TensorListSetItem { - elementDType : int; +table Transpose { } -table TensorListReserve { - elementDType : int; - shapeType : int; +table Unique { } -table All { - keepDims : int; +table UnsortedSegmentSum { } -table Assert { - summarize : int; +table Unsqueeze { + axis: [long]; } -table SmoothL1Loss { - beta : float; +table Unstack { + axis: long = 0; } -table SmoothL1LossGrad { - beta : float; +table While { + cond_subgraph_index: long; + body_subgraph_index: long; } -table SigmoidCrossEntropyWithLogits { +table Where { } -table SigmoidCrossEntropyWithLogitsGrad { +table ZerosLike { } -table Reciprocal { +table Select { } -table Merge { +table If { } -table GeLU { - approximate : bool = false; +table GRU { + bidirectional: bool = false; } table NonZero { @@ -1260,14 +1040,14 @@ table RandomStandardNormal { table CropAndResize { method : ResizeMethod; extrapolation_value : float; -} +} table StridedSliceGrad { - beginMask: int; - endMask: int; - ellipsisMask: int; - newAxisMask: int; - shrinkAxisMask: int; + begin_mask: int; + end_mask: int; + ellipsis_mask: int; + new_axis_mask: int; + shrink_axis_mask: int; begin: [int]; end: [int]; stride: [int]; @@ -1288,5 +1068,4 @@ table UniformReal { seed2 : int; } table AbsGrad { - transpose_a :bool; -} \ No newline at end of file +} diff --git a/mindspore/lite/schema/ops_types.fbs b/mindspore/lite/schema/ops_types.fbs new file mode 100644 index 0000000000..8d642dbb28 --- /dev/null +++ b/mindspore/lite/schema/ops_types.fbs @@ -0,0 +1,141 @@ +/** + * Copyright 2019 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +namespace mindspore.schema; + +enum ResizeMethod: byte { + UNKNOWN = -1, + LINEAR = 0, + NEAREST = 1, + CUBIC = 2 +} + +enum CoordinateTransformMode: byte { + ASYMMETRIC = 0, + ALIGN_CORNERS = 1, + HALF_PIXEL = 2 +} + +enum NearestMode : byte { + NORMAL = 0, + ROUND_HALF_DOWN = 1, + ROUND_HALF_UP = 2, + FLOOR = 3, + CEIL = 4 +} + +enum Format : int { + NCHW = 0, + NHWC, + NHWC4, + HWKC, + HWCK, + KCHW, + CKHW, + KHWC, + CHWK, + HW, + HW4, + NC, + NC4, + NC4HW4, + NUM_OF_FORMAT +} + +enum ActivationType : byte { + NO_ACTIVATION = 0, + RELU = 1, + SIGMOID = 2, + RELU6 = 3, + ELU = 4, + LEAKY_RELU = 5, + ABS = 6, + RELU1 = 7, + SOFTSIGN = 8, + SOFTPLUS = 9, + TANH = 10, + SELU = 11, + HSWISH = 12, + HSIGMOID = 13, + THRESHOLDRELU = 14, + LINEAR = 15, + HARD_TANH = 16, + SIGN = 17, + SWISH = 18, + GELU = 19, + UNKNOWN = 20 +} + +enum ReduceMode : byte { + ReduceMean = 0, + ReduceMax = 1, + ReduceMin = 2, + ReduceProd = 3, + ReduceSum = 4, + ReduceSumSquare = 5, + ReduceASum = 6, + ReduceAll = 7 +} + +enum PoolMode : byte { + MAX_POOLING = 0, + MEAN_POOLING = 1, +} + +enum EltwiseMode : byte { + PROD = 0, + SUM = 1, + MAXIMUM = 2, + UNKNOWN = 3 +} + +enum PadMode : byte { + PAD = 0, + SAME = 1, + VALID = 2, +} + +enum RoundMode : byte { + FLOOR = 0, + CEIL = 1 +} + +enum PaddingMode : byte { + CONSTANT = 0, + REFLECT = 1, + SYMMETRIC = 2, + MODE_RESERVED = 3 +} + +enum LshProjectionType : byte { + UNKNOWN = 0, + SPARSE = 1, + DENSE = 2 +} + +enum Reduction : byte { + REDUCTION_SUM = 0, + MEAN = 1, + NONE = 2 +} + +table Vec { + data: [long]; +} + +table Vec2D { + data: [Vec]; +} diff --git a/mindspore/lite/schema/ops_v0.fbs b/mindspore/lite/schema/ops_v0.fbs index c013aa5345..5df2576b0b 100644 --- a/mindspore/lite/schema/ops_v0.fbs +++ b/mindspore/lite/schema/ops_v0.fbs @@ -17,7 +17,7 @@ namespace mindspore.schema.v0; enum ResizeMethod: byte { - UNKNOW = -1, + UNKNOWN = -1, LINEAR = 0, NEAREST = 1, CUBIC = 2 @@ -80,27 +80,9 @@ enum ActivationType : byte { HARD_TANH = 16, SIGN = 17, SWISH = 18, - UNKNOW = 19 -} -enum ActivationGradType : byte { - NO_ACTIVATION = 0, - RELU = 1, - SIGMOID = 2, - RELU6 = 3, - ELU = 4, - LEAKY_RELU = 5, - ABS = 6, - RELU1 = 7, - SOFTSIGN = 8, - SOFTPLUS = 9, - TANH = 10, - SELU = 11, - HSWISH = 12, - HSIGMOID = 13, - THRESHOLDRELU = 14, - LINEAR = 15, - UNKNOW = 16 + UNKNOWN = 19 } + enum ReduceType : byte { REDUCE_MAX = 0, REDUCE_MEAN = 1, @@ -109,7 +91,7 @@ enum ReduceType : byte { REDUCE_LOG_SUM_EXP = 4, REDUCE_PROD = 5, REDUCE_SUM = 6, - UNKNOW = 7 + UNKNOWN = 7 } enum PoolMode : byte { @@ -121,7 +103,7 @@ enum EltwiseMode : byte { PROD = 0, SUM = 1, MAXIMUM = 2, - UNKNOW = 3 + UNKNOWN = 3 } enum PadMode : byte { @@ -1007,11 +989,6 @@ table OneHot { table Lstm{ bidirection: bool = false; - smooth: float = 0.0; -} - -table Gru{ - bidirection: bool = false; } table PriorBox { @@ -1156,9 +1133,6 @@ table While { bodySubgraphIndex : int; } -table If { -} - table UnsortedSegmentSum { numSegments : int; } @@ -1239,10 +1213,3 @@ table Reciprocal { table Merge { } - -table GeLU { - approximate : bool = false; -} - -table NonZero { -} diff --git a/mindspore/lite/src/CMakeLists.txt b/mindspore/lite/src/CMakeLists.txt index 23c63879e8..09f8139f67 100644 --- a/mindspore/lite/src/CMakeLists.txt +++ b/mindspore/lite/src/CMakeLists.txt @@ -35,9 +35,12 @@ set(LITE_SRC ${CMAKE_CURRENT_SOURCE_DIR}/common/graph_util.cc ${CMAKE_CURRENT_SOURCE_DIR}/common/log_adapter.cc ${CMAKE_CURRENT_SOURCE_DIR}/common/string_util.cc + ${CMAKE_CURRENT_SOURCE_DIR}/common/prim_util.cc + ${CMAKE_CURRENT_SOURCE_DIR}/common/tensor_util.cc ${CMAKE_CURRENT_SOURCE_DIR}/runtime/allocator.cc ${CMAKE_CURRENT_SOURCE_DIR}/runtime/runtime_api.cc ${CMAKE_CURRENT_SOURCE_DIR}/runtime/thread_pool.c + ${CMAKE_CURRENT_SOURCE_DIR}/runtime/infer_manager.cc ${CMAKE_CURRENT_SOURCE_DIR}/tensor.cc ${CMAKE_CURRENT_SOURCE_DIR}/tensorlist.cc ${CMAKE_CURRENT_SOURCE_DIR}/executor.cc @@ -104,6 +107,12 @@ if(SUPPORT_TRAIN) ${CMAKE_CURRENT_SOURCE_DIR}/train/accuracy_monitor.cc ${CMAKE_CURRENT_SOURCE_DIR}/train/classification_train_accuracy_monitor.cc ) + if(ENABLE_V0) + set(LITE_SRC + ${LITE_SRC} + ${CMAKE_CURRENT_SOURCE_DIR}/train/train_populate_parameter_v0.cc + ) + endif() endif() if(ENABLE_MINDRT) diff --git a/mindspore/lite/src/common/common.h b/mindspore/lite/src/common/common.h index 58ecbfc3da..dd8540f03b 100644 --- a/mindspore/lite/src/common/common.h +++ b/mindspore/lite/src/common/common.h @@ -32,7 +32,7 @@ enum CHWK_SHAPE { CHWK_C = 0, CHWK_H = 1, CHWK_W = 2, CHWK_K = 3 }; enum KHWC_SHAPE { KHWC_K = 0, KHWC_H = 1, KHWC_W = 2, KHWC_C = 3 }; enum CHW_SHAPE { CHW_C = 0, CHW_H = 1, CHW_W = 2 }; enum HWC_SHAPE { HWC_H = 0, HWC_W = 1, HWC_C = 2 }; -enum SCHEMA_VERSION { SCHEMA_INVALID = -1, SCHEMA_CUR = 0, SCHEMA_V0 = 1 }; +enum SCHEMA_VERSION : int { SCHEMA_INVALID = -1, SCHEMA_CUR = 0, SCHEMA_V0 = 1 }; static constexpr int kNCHWDimNumber = 4; static constexpr int kNHWCDimNumber = 4; diff --git a/mindspore/lite/src/common/graph_util.cc b/mindspore/lite/src/common/graph_util.cc index 59c8015e2b..d9c1e526dd 100644 --- a/mindspore/lite/src/common/graph_util.cc +++ b/mindspore/lite/src/common/graph_util.cc @@ -20,7 +20,11 @@ #include "src/common/graph_util.h" #include "src/common/utils.h" #include "src/common/log_adapter.h" +#include "src/common/version_manager.h" #include "include/errorcode.h" +#ifdef ENABLE_V0 +#include "schema/model_v0_generated.h" +#endif namespace mindspore { namespace lite { @@ -82,10 +86,17 @@ std::vector<size_t> GetLinkedPostNodeIdx(const lite::Model *model, const size_t return post_node_idxes; } -bool IsPackedOp(schema::PrimitiveType op_type) { - static std::vector<schema::PrimitiveType> packed_ops = { - schema::PrimitiveType_Conv2D, schema::PrimitiveType_DeConv2D, schema::PrimitiveType_DepthwiseConv2D, - schema::PrimitiveType_DeDepthwiseConv2D, schema::PrimitiveType_MatMul}; +bool IsPackedOp(int op_type) { +#ifdef ENABLE_V0 + static std::vector<int> v0_packed_ops = { + schema::v0::PrimitiveType_Conv2D, schema::v0::PrimitiveType_DeConv2D, schema::v0::PrimitiveType_DepthwiseConv2D, + schema::v0::PrimitiveType_DeDepthwiseConv2D, schema::v0::PrimitiveType_MatMul}; + if (VersionManager::GetInstance()->CheckV0Schema()) { + return IsContain(v0_packed_ops, op_type); + } +#endif + static std::vector<int> packed_ops = {schema::PrimitiveType_Conv2DFusion, schema::PrimitiveType_Conv2dTransposeFusion, + schema::PrimitiveType_MatMul}; return IsContain(packed_ops, op_type); } } // namespace lite diff --git a/mindspore/lite/src/common/graph_util.h b/mindspore/lite/src/common/graph_util.h index a158d751fa..76b6256c5d 100644 --- a/mindspore/lite/src/common/graph_util.h +++ b/mindspore/lite/src/common/graph_util.h @@ -37,7 +37,7 @@ std::vector<size_t> GetGraphOutputNodes(const lite::Model *model); std::vector<size_t> GetLinkedPostNodeIdx(const lite::Model *model, size_t tensor_idx); -bool IsPackedOp(schema::PrimitiveType op_type); +bool IsPackedOp(int op_type); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/common/log_util.h b/mindspore/lite/src/common/log_util.h new file mode 100644 index 0000000000..2a87d11e2a --- /dev/null +++ b/mindspore/lite/src/common/log_util.h @@ -0,0 +1,31 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_COMMON_LOG_UTIL_H_ +#define MINDSPORE_LITE_SRC_COMMON_LOG_UTIL_H_ + +#include "src/common/log_adapter.h" +#include "include/errorcode.h" + +#define MSLITE_CHECK_PTR(ptr) \ + do { \ + if ((ptr) == nullptr) { \ + MS_LOG(ERROR) << ": The pointer[" << #ptr << "] is null."; \ + return mindspore::lite::RET_ERROR; \ + } \ + } while (0) + +#endif // MINDSPORE_LITE_SRC_COMMON_LOG_UTIL_H_ diff --git a/mindspore/lite/src/common/prim_inner.h b/mindspore/lite/src/common/prim_inner.h new file mode 100644 index 0000000000..65120615e9 --- /dev/null +++ b/mindspore/lite/src/common/prim_inner.h @@ -0,0 +1,36 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_COMMON_PRIM_INNER_H_ +#define MINDSPORE_LITE_SRC_COMMON_PRIM_INNER_H_ +#include <string> + +namespace mindspore { +namespace lite { +enum PRIM_INNER_TYPE : int { + PRIM_TO_FORMAT = 10000, + PRIM_RETURN = 10001, + PRIM_MAKE_TUPLE = 10002, + PRIM_TUPLE_GET_ITEM = 10003, + PRIM_LOOP = 10004, + PRIM_CONSTANT = 10005, + PRIM_OPENCL_FUSION_ELTWISE = 10006, +}; + +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_LITE_SRC_COMMON_PRIM_INNER_H_ diff --git a/mindspore/lite/src/common/prim_util.cc b/mindspore/lite/src/common/prim_util.cc new file mode 100644 index 0000000000..e8cad919a6 --- /dev/null +++ b/mindspore/lite/src/common/prim_util.cc @@ -0,0 +1,122 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/common/prim_util.h" +#include "src/common/version_manager.h" +#include "schema/model_generated.h" +#ifdef ENABLE_V0 +#include "schema/model_v0_generated.h" +#endif + +namespace mindspore { +namespace lite { +int GetPrimitiveType(const void *primitive) { + if (primitive == nullptr) { + return -1; + } +#ifdef ENABLE_V0 + if (VersionManager::GetInstance()->GetSchemaVersion() == SCHEMA_V0) { + return static_cast<const schema::v0::Primitive *>(primitive)->value_type(); + } +#endif + return static_cast<const schema::Primitive *>(primitive)->value_type(); +} + +const char *PrimitiveTypeName(int type) { +#ifdef ENABLE_V0 + if (VersionManager::GetInstance()->GetSchemaVersion() == SCHEMA_V0) { + return schema::v0::EnumNamePrimitiveType(static_cast<schema::v0::PrimitiveType>(type)); + } +#endif + return schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(type)); +} + +const char *PrimitiveCurVersionTypeName(int type) { + return schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(type)); +} + +int GenPrimVersionKey(int primitive_type, int schema_version) { return primitive_type * 1000 + schema_version; } + +bool IsPartialNode(const void *primitive) { + int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); + if (schema_version == SCHEMA_CUR) { + return reinterpret_cast<const schema::Primitive *>(primitive)->value_type() == schema::PrimitiveType_PartialFusion; + } +#ifdef ENABLE_V0 + if (schema_version == SCHEMA_V0) { + return reinterpret_cast<const schema::v0::Primitive *>(primitive)->value_type() == + schema::v0::PrimitiveType_Partial; + } +#endif + return false; +} + +int GetPartialGraphIndex(const void *primitive) { + int index = -1; + int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); + if (schema_version == SCHEMA_CUR) { + index = static_cast<const schema::Primitive *>(primitive)->value_as_PartialFusion()->sub_graph_index(); + } +#ifdef ENABLE_V0 + if (schema_version == SCHEMA_V0) { + index = static_cast<const schema::v0::Primitive *>(primitive)->value_as_Partial()->subGraphIndex(); + } +#endif + return index; +} + +bool IsWhileNode(const void *primitive) { + int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); + if (schema_version == SCHEMA_CUR) { + return reinterpret_cast<const schema::Primitive *>(primitive)->value_type() == schema::PrimitiveType_While; + } +#ifdef ENABLE_V0 + if (schema_version == SCHEMA_V0) { + return reinterpret_cast<const schema::v0::Primitive *>(primitive)->value_type() == schema::v0::PrimitiveType_While; + } +#endif + return false; +} + +int GetWhileBodySubgraphIndex(const void *primitive) { + int index = -1; + int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); + if (schema_version == SCHEMA_CUR) { + index = reinterpret_cast<const schema::Primitive *>(primitive)->value_as_While()->body_subgraph_index(); + } +#ifdef ENABLE_V0 + if (schema_version == SCHEMA_V0) { + index = reinterpret_cast<const schema::v0::Primitive *>(primitive)->value_as_While()->bodySubgraphIndex(); + } +#endif + return index; +} + +int GetWhileCondSubgraphIndex(const void *primitive) { + int index = -1; + int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); + if (schema_version == SCHEMA_CUR) { + index = reinterpret_cast<const schema::Primitive *>(primitive)->value_as_While()->cond_subgraph_index(); + } +#ifdef ENABLE_V0 + if (schema_version == SCHEMA_V0) { + index = reinterpret_cast<const schema::v0::Primitive *>(primitive)->value_as_While()->condSubgraphIndex(); + } +#endif + return index; +} +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/common/prim_util.h b/mindspore/lite/src/common/prim_util.h new file mode 100644 index 0000000000..f414a2d644 --- /dev/null +++ b/mindspore/lite/src/common/prim_util.h @@ -0,0 +1,34 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_COMMON_PRIM_UTIL_H_ +#define MINDSPORE_LITE_SRC_COMMON_PRIM_UTIL_H_ + +namespace mindspore { +namespace lite { +int GetPrimitiveType(const void *prim); +const char *PrimitiveTypeName(int type); +const char *PrimitiveCurVersionTypeName(int type); +int GenPrimVersionKey(int primitive_type, int schema_version); +bool IsPartialNode(const void *primitive); +int GetPartialGraphIndex(const void *primitive); +bool IsWhileNode(const void *primitive); +int GetWhileBodySubgraphIndex(const void *primitive); +int GetWhileCondSubgraphIndex(const void *primitive); +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_LITE_SRC_COMMON_PRIM_UTIL_H_ diff --git a/mindspore/lite/src/common/tensor_util.cc b/mindspore/lite/src/common/tensor_util.cc new file mode 100644 index 0000000000..7c00a156a0 --- /dev/null +++ b/mindspore/lite/src/common/tensor_util.cc @@ -0,0 +1,375 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/common/tensor_util.h" +#include "schema/model_generated.h" +#include "include/errorcode.h" +#include "src/common/log_adapter.h" + +namespace mindspore { +namespace lite { +int InputTensor2TensorC(const std::vector<lite::Tensor *> &tensors_in, std::vector<TensorC *> *tensors_out) { + for (size_t i = 0; i < tensors_in.size(); ++i) { + size_t shape_size = tensors_in[i]->shape().size(); + if (shape_size >= MAX_SHAPE_SIZE) { + MS_LOG(ERROR) << "shape size " << shape_size << " unsupported!"; + return RET_ERROR; + } + auto *tensor_c = static_cast<TensorC *>(malloc(sizeof(TensorC))); + if (tensor_c == nullptr) { + MS_LOG(ERROR) << "malloc tensor fail!"; + return RET_ERROR; + } + tensor_c->format_ = tensors_in[i]->format(); + tensor_c->data_type_ = tensors_in[i]->data_type(); + tensor_c->shape_size_ = shape_size; + tensor_c->data_ = tensors_in[i]->data_c(); + for (size_t j = 0; j < shape_size; ++j) { + tensor_c->shape_[j] = tensors_in[i]->shape()[j]; + } + tensors_out->push_back(tensor_c); + } + return RET_OK; +} + +int OutputTensor2TensorC(const std::vector<lite::Tensor *> &tensors, std::vector<TensorC *> *tensors_c) { + for (size_t i = 0; i < tensors.size(); ++i) { + auto *tensor_c = static_cast<TensorC *>(malloc(sizeof(TensorC))); + if (tensor_c == nullptr) { + MS_LOG(ERROR) << "malloc tensor fail!"; + return RET_ERROR; + } + tensor_c->data_type_ = kNumberTypeFloat32; + tensor_c->format_ = schema::Format::Format_NCHW; + tensor_c->data_ = nullptr; + tensor_c->shape_size_ = 0; + tensors_c->push_back(tensor_c); + } + return RET_OK; +} + +void TensorC2LiteTensor(const std::vector<TensorC *> &tensors_in, std::vector<lite::Tensor *> *tensors_out) { + for (size_t i = 0; i < tensors_in.size(); ++i) { + tensors_out->at(i)->set_format(static_cast<schema::Format>(tensors_in[i]->format_)); + tensors_out->at(i)->set_data_type(static_cast<TypeId>(tensors_in[i]->data_type_)); + tensors_out->at(i)->set_shape({tensors_in[i]->shape_, tensors_in[i]->shape_ + tensors_in[i]->shape_size_}); + } +} + +void FreeAllTensorC(std::vector<TensorC *> *tensors_in) { + for (auto &i : *tensors_in) { + if (i == nullptr) { + continue; + } + if (i->data_type_ == kObjectTypeTensorType) { + TensorListC *tensorListC = reinterpret_cast<TensorListC *>(i); + FreeTensorListC(tensorListC); + tensorListC = nullptr; + } else { + free(i); + i = nullptr; + } + } + tensors_in->clear(); +} + +void FreeTensorListC(TensorListC *tensorlist_c) { + for (size_t i = 0; i < tensorlist_c->element_num_; i++) { + free(tensorlist_c->tensors_[i]); + tensorlist_c->tensors_[i] = nullptr; + } + if (tensorlist_c->tensors_ != nullptr) { + free(tensorlist_c->tensors_); + tensorlist_c->tensors_ = nullptr; + } + free(tensorlist_c); +} + +TensorC *NewTensorC() { + auto *tensor_c = static_cast<TensorC *>(malloc(sizeof(TensorC))); + if (tensor_c == nullptr) { + MS_LOG(ERROR) << "malloc tensor fail!"; + return nullptr; + } + tensor_c->data_type_ = kNumberTypeFloat32; + tensor_c->format_ = schema::Format::Format_NCHW; + tensor_c->data_ = nullptr; + tensor_c->shape_size_ = 0; + return tensor_c; +} + +void Tensor2TensorC(Tensor *src, TensorC *dst) { + dst->format_ = src->format(); + dst->data_ = src->data_c(); + dst->data_type_ = src->data_type(); + dst->shape_size_ = src->shape().size(); + for (size_t i = 0; i < dst->shape_size_; i++) { + dst->shape_[i] = src->shape().at(i); + } +} + +void TensorC2Tensor(TensorC *src, Tensor *dst) { + dst->set_format(static_cast<schema::Format>(src->format_)); + dst->set_data_type(static_cast<TypeId>(src->data_type_)); // get data during the runtime period + dst->set_shape(std::vector<int>(src->shape_, src->shape_ + src->shape_size_)); +} + +int TensorList2TensorListC(TensorList *src, TensorListC *dst) { + dst->data_type_ = static_cast<TypeIdC>(src->data_type()); + dst->format_ = src->format(); + dst->element_num_ = src->shape().empty() ? 0 : src->tensors().size(); + + dst->tensors_ = reinterpret_cast<TensorC **>(malloc(dst->element_num_ * sizeof(TensorC *))); + if (dst->tensors_ == nullptr) { + return RET_ERROR; + } + memset(dst->tensors_, 0, dst->element_num_ * sizeof(TensorC *)); + for (size_t i = 0; i < dst->element_num_; i++) { + dst->tensors_[i] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC))); + if (dst->tensors_[i] == nullptr) { + return NNACL_ERR; + } + memset(dst->tensors_[i], 0, sizeof(TensorC)); + Tensor2TensorC(src->tensors().at(i), dst->tensors_[i]); + } + + dst->tensors_data_type_ = src->tensors_data_type(); + dst->element_shape_size_ = src->element_shape().size(); + for (size_t i = 0; i < dst->element_shape_size_; i++) { + dst->element_shape_[i] = src->element_shape().at(i); + } + dst->max_elements_num_ = src->max_elements_num(); + return NNACL_OK; +} + +void TensorListC2TensorList(TensorListC *src, TensorList *dst) { + dst->set_data_type(static_cast<TypeId>(src->data_type_)); + dst->set_format(static_cast<schema::Format>(src->format_)); + dst->set_shape(std::vector<int>(1, src->element_num_)); + dst->set_tensors_data_type(static_cast<TypeId>(src->tensors_data_type_)); + + // Set Tensors + for (size_t i = 0; i < src->element_num_; i++) { + TensorC2Tensor(src->tensors_[i], dst->GetTensor(i)); + } + + dst->set_element_shape(std::vector<int>(src->element_shape_, src->element_shape_ + src->element_shape_size_)); + dst->set_max_elements_num(src->max_elements_num_); +} + +int GenerateMergeOutTensorC(const std::vector<lite::Tensor *> &inputs, std::vector<lite::Tensor *> *outputs, + std::vector<TensorC *> *out_tensor_c) { + int ret = RET_OK; + for (size_t i = 0; i < outputs->size(); i++) { + if (inputs.at(i)->data_type() == kObjectTypeTensorType) { + auto *output_tensorlist = reinterpret_cast<TensorListC *>(malloc(sizeof(TensorListC))); + if (output_tensorlist == nullptr) { + return RET_ERROR; + } + memset(output_tensorlist, 0, sizeof(TensorListC)); + output_tensorlist->element_num_ = inputs[i]->shape().empty() ? 0 : inputs[i]->shape().at(0); + if (output_tensorlist->element_num_ != 0) { + output_tensorlist->tensors_ = + reinterpret_cast<TensorC **>(malloc(output_tensorlist->element_num_ * sizeof(TensorC *))); + if (output_tensorlist->tensors_ == nullptr) { + free(output_tensorlist); + output_tensorlist = nullptr; + return RET_ERROR; + } + memset(output_tensorlist->tensors_, 0, output_tensorlist->element_num_ * sizeof(TensorC *)); + for (size_t j = 0; j < output_tensorlist->element_num_; j++) { + output_tensorlist->tensors_[j] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC))); + if (output_tensorlist->tensors_[j] == nullptr) { + for (size_t k = 0; k < j; k++) { + free(output_tensorlist->tensors_[k]); + output_tensorlist->tensors_[k] = nullptr; + } + free(output_tensorlist->tensors_); + output_tensorlist->tensors_ = nullptr; + free(output_tensorlist); + output_tensorlist = nullptr; + return RET_ERROR; + } + memset(output_tensorlist->tensors_[j], 0, sizeof(TensorC)); + } + } + + out_tensor_c->push_back(reinterpret_cast<TensorC *const>(output_tensorlist)); + } else { + auto *output_tensor = NewTensorC(); + if (output_tensor == nullptr) { + MS_LOG(ERROR) << "malloc tensor_c failed"; + ret = RET_ERROR; + break; + } + out_tensor_c->push_back(reinterpret_cast<TensorC *const>(output_tensor)); + } + } + return ret; +} + +int GenerateSwitchOutTensorC(const std::vector<lite::Tensor *> &inputs, std::vector<lite::Tensor *> *outputs, + std::vector<TensorC *> *out_tensor_c) { + int ret = RET_OK; + MS_ASSERT(inputs.size() == outputs->size() / 2 + 1); + out_tensor_c->resize(outputs->size()); + for (size_t i = 0; i < outputs->size() / 2; i++) { + if (inputs.at(i + 1)->data_type() == kObjectTypeTensorType) { + auto *output_tensorlist1 = reinterpret_cast<TensorListC *>(malloc(sizeof(TensorListC))); + if (output_tensorlist1 == nullptr) { + MS_LOG(ERROR) << "malloc tensorlist_c failed"; + ret = RET_ERROR; + break; + } + + memset(output_tensorlist1, 0, sizeof(TensorListC)); + output_tensorlist1->element_num_ = inputs[i + 1]->shape().empty() ? 0 : inputs[i + 1]->shape().at(0); + if (output_tensorlist1->element_num_ != 0) { + output_tensorlist1->tensors_ = + reinterpret_cast<TensorC **>(malloc(output_tensorlist1->element_num_ * sizeof(TensorC *))); + if (output_tensorlist1->tensors_ == nullptr) { + free(output_tensorlist1); + output_tensorlist1 = nullptr; + return RET_ERROR; + } + memset(output_tensorlist1->tensors_, 0, output_tensorlist1->element_num_ * sizeof(TensorC *)); + for (size_t j = 0; j < output_tensorlist1->element_num_; j++) { + output_tensorlist1->tensors_[j] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC))); + if (output_tensorlist1->tensors_[j] == nullptr) { + for (size_t k = 0; k < j; k++) { + free(output_tensorlist1->tensors_[k]); + output_tensorlist1->tensors_[k] = nullptr; + } + free(output_tensorlist1->tensors_); + output_tensorlist1->tensors_ = nullptr; + return RET_ERROR; + } + memset(output_tensorlist1->tensors_[j], 0, sizeof(TensorC)); + } + } + + out_tensor_c->at(i) = reinterpret_cast<TensorC *const>(output_tensorlist1); + + auto *output_tensorlist2 = reinterpret_cast<TensorListC *>(malloc(sizeof(TensorListC))); + if (output_tensorlist2 == nullptr) { + return RET_ERROR; + } + memset(output_tensorlist2, 0, sizeof(TensorListC)); + output_tensorlist2->element_num_ = inputs[i + 1]->shape().empty() ? 0 : inputs[i + 1]->shape().at(0); + if (output_tensorlist2->element_num_ != 0) { + output_tensorlist2->tensors_ = + reinterpret_cast<TensorC **>(malloc(output_tensorlist2->element_num_ * sizeof(TensorC *))); + if (output_tensorlist2->tensors_ == nullptr) { + free(output_tensorlist2); + output_tensorlist2 = nullptr; + return RET_ERROR; + } + memset(output_tensorlist2->tensors_, 0, output_tensorlist2->element_num_ * sizeof(TensorC *)); + for (size_t j = 0; j < output_tensorlist2->element_num_; j++) { + output_tensorlist2->tensors_[j] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC))); + if (output_tensorlist2->tensors_[j] == nullptr) { + for (size_t k = 0; k < j; k++) { + free(output_tensorlist2->tensors_[k]); + output_tensorlist2->tensors_[k] = nullptr; + } + free(output_tensorlist2->tensors_); + output_tensorlist2->tensors_ = nullptr; + free(output_tensorlist2); + output_tensorlist2 = nullptr; + return RET_ERROR; + } + memset(output_tensorlist2->tensors_[j], 0, sizeof(TensorC)); + } + } + + out_tensor_c->at(i + outputs->size() / 2) = reinterpret_cast<TensorC *const>(output_tensorlist2); + } else { + auto *output_tensor1 = NewTensorC(); + if (output_tensor1 == nullptr) { + MS_LOG(ERROR) << "malloc tensor_c failed"; + ret = RET_ERROR; + break; + } + out_tensor_c->at(i) = reinterpret_cast<TensorC *const>(output_tensor1); + auto *output_tensor2 = NewTensorC(); + if (output_tensor2 == nullptr) { + MS_LOG(ERROR) << "malloc tensor_c failed"; + ret = RET_ERROR; + break; + } + out_tensor_c->at(i + outputs->size() / 2) = reinterpret_cast<TensorC *const>(output_tensor2); + } + } + return ret; +} + +int GenerateOutTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &inputs, + std::vector<lite::Tensor *> *outputs, std::vector<TensorC *> *out_tensor_c) { + int ret = RET_OK; + if (parameter->type_ == mindspore::schema::PrimitiveType_TensorListFromTensor || + parameter->type_ == mindspore::schema::PrimitiveType_TensorListReserve || + parameter->type_ == mindspore::schema::PrimitiveType_TensorListSetItem) { + // TensorListC ->TensorC + auto *tensor_list_c = reinterpret_cast<TensorListC *>(malloc(sizeof(TensorListC))); + if (tensor_list_c == nullptr) { + return RET_ERROR; + } + memset(tensor_list_c, 0, sizeof(TensorListC)); + out_tensor_c->push_back(reinterpret_cast<TensorC *const>(tensor_list_c)); + } else if (parameter->type_ == mindspore::schema::PrimitiveType_Merge) { + ret = GenerateMergeOutTensorC(inputs, outputs, out_tensor_c); + } else if (parameter->type_ == mindspore::schema::PrimitiveType_Switch) { + ret = GenerateSwitchOutTensorC(inputs, outputs, out_tensor_c); + } else { + ret = OutputTensor2TensorC(*outputs, out_tensor_c); + } + return ret; +} + +int GenerateInTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &inputs, + std::vector<lite::Tensor *> *outputs, std::vector<TensorC *> *in_tensor_c) { + int ret = RET_OK; + for (auto input : inputs) { + if (input->data_type() == kObjectTypeTensorType) { + // Tensor ->TensorList -> TensorListC -> TensorC + auto *tensor_list = reinterpret_cast<TensorList *>(input); + auto *tensor_list_c = reinterpret_cast<TensorListC *>(malloc(sizeof(TensorListC))); + if (tensor_list_c == nullptr) { + ret = RET_NULL_PTR; + break; + } + memset(tensor_list_c, 0, sizeof(TensorListC)); + ret = TensorList2TensorListC(tensor_list, tensor_list_c); + if (ret != RET_OK) { + return NNACL_ERR; + } + in_tensor_c->push_back(reinterpret_cast<TensorC *>(tensor_list_c)); + } else { + // Tensor -> TensorC + auto *tensor_c = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC))); + if (tensor_c == nullptr) { + ret = RET_NULL_PTR; + break; + } + Tensor2TensorC(input, tensor_c); + in_tensor_c->emplace_back(tensor_c); + } + } + return ret; +} + +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/common/tensor_util.h b/mindspore/lite/src/common/tensor_util.h new file mode 100644 index 0000000000..6fdce47445 --- /dev/null +++ b/mindspore/lite/src/common/tensor_util.h @@ -0,0 +1,48 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_COMMON_TENSOR_UTIL_H_ +#define MINDSPORE_LITE_SRC_COMMON_TENSOR_UTIL_H_ +#include <vector> +#include "src/tensor.h" +#include "src/tensorlist.h" +#include "nnacl/tensor_c.h" +#include "nnacl/infer/common_infer.h" + +namespace mindspore { +namespace lite { +int InputTensor2TensorC(const std::vector<lite::Tensor *> &tensors_in, std::vector<TensorC *> *tensors_out); +int OutputTensor2TensorC(const std::vector<lite::Tensor *> &tensors_in, std::vector<TensorC *> *tensors_out); +void TensorC2LiteTensor(const std::vector<TensorC *> &tensors_in, std::vector<lite::Tensor *> *tensors_out); +void FreeAllTensorC(std::vector<TensorC *> *tensors_in); +void FreeTensorListC(TensorListC *tensorListC); +TensorC *NewTensorC(); +void Tensor2TensorC(Tensor *src, TensorC *dst); +void TensorC2Tensor(TensorC *src, Tensor *dst); +int TensorList2TensorListC(TensorList *src, TensorListC *dst); +void TensorListC2TensorList(TensorListC *src, TensorList *dst); +int GenerateMergeOutTensorC(const std::vector<lite::Tensor *> &inputs, std::vector<lite::Tensor *> *outputs, + std::vector<TensorC *> *out_tensor_c); +int GenerateSwitchOutTensorC(const std::vector<lite::Tensor *> &inputs, std::vector<lite::Tensor *> *outputs, + std::vector<TensorC *> *out_tensor_c); +int GenerateInTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &inputs, + std::vector<lite::Tensor *> *outputs, std::vector<TensorC *> *in_tensor_c); +int GenerateOutTensorC(const OpParameter *const parameter, const std::vector<lite::Tensor *> &inputs, + std::vector<lite::Tensor *> *outputs, std::vector<TensorC *> *out_tensor_c); +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_LITE_SRC_COMMON_TENSOR_UTIL_H_ diff --git a/mindspore/lite/src/common/version_manager.h b/mindspore/lite/src/common/version_manager.h index 5b336c000d..4998963ed7 100644 --- a/mindspore/lite/src/common/version_manager.h +++ b/mindspore/lite/src/common/version_manager.h @@ -18,8 +18,7 @@ #define MINDSPORE_LITE_SRC_COMMON_VERSION_MANAGER_H_ #include <string> -#include "src/lite_model.h" - +#include "src/common/common.h" namespace mindspore { namespace lite { class VersionManager { @@ -32,6 +31,7 @@ class VersionManager { void SetSchemaVersion(const int schema_version) { schema_version_ = schema_version; } int GetSchemaVersion() const { return schema_version_; } + bool CheckV0Schema() const { return schema_version_ == SCHEMA_VERSION::SCHEMA_V0; } private: VersionManager() = default; diff --git a/mindspore/lite/src/dequant.cc b/mindspore/lite/src/dequant.cc index 1591281a05..6987c9a45c 100644 --- a/mindspore/lite/src/dequant.cc +++ b/mindspore/lite/src/dequant.cc @@ -18,7 +18,7 @@ #include <memory> #include "src/dequant.h" #include "src/huffman_decode.h" -#include "src/ops/matmul.h" +#include "nnacl/matmul_parameter.h" namespace mindspore::lite { float *DequantUtil::DequantWeight(lite::Tensor *input_tensor, bool channel_first) { @@ -66,7 +66,7 @@ int DequantUtil::UnPackToInt(const schema::Tensor *input_tensor, void *unpack_in return RET_OK; } -std::map<Tensor *, std::pair<TypeId, void *>> DequantUtil::DequantTensor(const mindspore::lite::PrimitiveC *primitive, +std::map<Tensor *, std::pair<TypeId, void *>> DequantUtil::DequantTensor(OpParameter *op_param, const std::vector<Tensor *> &in_tensors, TypeId data_type, bool need_restore) { std::map<Tensor *, std::pair<TypeId, void *>> tensor_origin_data; @@ -76,13 +76,12 @@ std::map<Tensor *, std::pair<TypeId, void *>> DequantUtil::DequantTensor(const m MS_ASSERT(weight_tensor != nullptr); input_i++; auto channel_first = true; - if ((schema::PrimitiveType)primitive->Type() == schema::PrimitiveType_MatMul && - weight_tensor->shape().size() == 2) { - auto param = reinterpret_cast<mindspore::lite::MatMul *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); + if (op_param->type_ == schema::PrimitiveType_MatMul && weight_tensor->shape().size() == 2) { + auto param = reinterpret_cast<MatMulParameter *>(op_param); if (input_i == 1) { - channel_first = !param->GetTransposeA(); + channel_first = !param->a_transpose_; } else if (input_i == 2) { - channel_first = param->GetTransposeB(); + channel_first = param->b_transpose_; } else { MS_LOG(WARNING) << "unexpected input_i"; } diff --git a/mindspore/lite/src/dequant.h b/mindspore/lite/src/dequant.h index a45aa03620..f2c7a76655 100644 --- a/mindspore/lite/src/dequant.h +++ b/mindspore/lite/src/dequant.h @@ -33,7 +33,7 @@ class DequantUtil { static int UnPackToInt(const schema::Tensor *input_tensor, void *weight_unpack_data); - static std::map<Tensor *, std::pair<TypeId, void *>> DequantTensor(const mindspore::lite::PrimitiveC *primitive, + static std::map<Tensor *, std::pair<TypeId, void *>> DequantTensor(OpParameter *op_param, const std::vector<Tensor *> &in_tensors, TypeId data_type, bool need_restore = true); diff --git a/mindspore/lite/src/kernel_registry.cc b/mindspore/lite/src/kernel_registry.cc index 3a964e45c4..063daa9bd0 100644 --- a/mindspore/lite/src/kernel_registry.cc +++ b/mindspore/lite/src/kernel_registry.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,9 @@ #include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/ops/populate/populate_register.h" +#include "src/common/version_manager.h" +#include "src/common/prim_util.h" +#include "nnacl/pooling_parameter.h" #ifdef ENABLE_ARM64 #include <asm/hwcap.h> #include "common/utils.h" @@ -38,6 +41,7 @@ KernelRegistry *KernelRegistry::GetInstance() { if (instance.creator_arrays_ == nullptr) { return nullptr; } + memset(instance.creator_arrays_, 0, array_size_ * sizeof(KernelRegistry)); } return &instance; } @@ -76,7 +80,7 @@ int KernelRegistry::GetCreatorFuncIndex(const kernel::KernelKey desc) { int index; int device_index = static_cast<int>(desc.arch) - kKernelArch_MIN; int dType_index = static_cast<int>(desc.data_type) - kNumberTypeBegin; - int op_index = static_cast<int>(desc.type) - PrimitiveType_MIN; + int op_index = static_cast<int>(desc.type); index = device_index * data_type_length_ * op_type_length_ + dType_index * op_type_length_ + op_index; return index; } @@ -91,8 +95,7 @@ void KernelRegistry::RegKernel(const KernelKey desc, const kernel::KernelCreator creator_arrays_[index] = creator; } -void KernelRegistry::RegKernel(const KERNEL_ARCH arch, const TypeId data_type, const schema::PrimitiveType op_type, - kernel::KernelCreator creator) { +void KernelRegistry::RegKernel(KERNEL_ARCH arch, TypeId data_type, int op_type, kernel::KernelCreator creator) { KernelKey desc = {arch, data_type, op_type}; int index = GetCreatorFuncIndex(desc); if (index >= array_size_) { @@ -105,36 +108,6 @@ void KernelRegistry::RegKernel(const KERNEL_ARCH arch, const TypeId data_type, c bool KernelRegistry::Merge(const std::unordered_map<KernelKey, KernelCreator> &new_creators) { return false; } -kernel::LiteKernel *KernelRegistry::GetKernel(const std::vector<Tensor *> &in_tensors, - const std::vector<Tensor *> &out_tensors, const PrimitiveC *primitive, - const InnerContext *ctx, const kernel::KernelKey &key) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != ctx); - auto func_pointer = PopulateRegistry::GetInstance()->GetParameterCreator(schema::PrimitiveType(primitive->Type())); - if (func_pointer == nullptr) { - MS_LOG(ERROR) << "ParameterCreator function pointer is nullptr, type: " - << schema::EnumNamePrimitiveType((schema::PrimitiveType)primitive->Type()); - return nullptr; - } - auto parameter = func_pointer(primitive); - if (parameter == nullptr) { - MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " - << schema::EnumNamePrimitiveType((schema::PrimitiveType)primitive->Type()); - return nullptr; - } - auto creator = GetCreator(key); - if (creator != nullptr) { - auto kernel = creator(in_tensors, out_tensors, parameter, ctx, key, primitive); - if (kernel != nullptr) { - kernel->set_desc(key); - } - return kernel; - } else { - free(parameter); - } - return nullptr; -} - KernelRegistry::~KernelRegistry() { KernelRegistry *instance = GetInstance(); std::unique_lock<std::mutex> malloc_creator_array(instance->lock_); @@ -143,4 +116,21 @@ KernelRegistry::~KernelRegistry() { instance->creator_arrays_ = nullptr; } } + +int KernelRegistry::GetKernel(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors, + const InnerContext *ctx, const kernel::KernelKey &key, OpParameter *parameter, + kernel::LiteKernel **kernel) { + MS_ASSERT(ctx != nullptr); + MS_ASSERT(kernel != nullptr); + auto creator = GetCreator(key); + if (creator != nullptr) { + *kernel = creator(in_tensors, out_tensors, parameter, ctx, key); + if (*kernel != nullptr) { + (*kernel)->set_desc(key); + return RET_OK; + } + return RET_ERROR; + } + return RET_NOT_SUPPORT; +} } // namespace mindspore::lite diff --git a/mindspore/lite/src/kernel_registry.h b/mindspore/lite/src/kernel_registry.h index 77922c4c35..31f3b875fb 100644 --- a/mindspore/lite/src/kernel_registry.h +++ b/mindspore/lite/src/kernel_registry.h @@ -40,10 +40,11 @@ class KernelRegistry { const kernel::KernelCreator *GetCreatorArrays(); int GetCreatorFuncIndex(kernel::KernelKey desc); void RegKernel(kernel::KernelKey desc, kernel::KernelCreator creator); - void RegKernel(kernel::KERNEL_ARCH arch, TypeId data_type, schema::PrimitiveType type, kernel::KernelCreator creator); + void RegKernel(kernel::KERNEL_ARCH arch, TypeId data_type, int type, kernel::KernelCreator creator); bool Merge(const std::unordered_map<kernel::KernelKey, kernel::KernelCreator> &newCreators); - kernel::LiteKernel *GetKernel(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors, - const PrimitiveC *primitive, const InnerContext *ctx, const kernel::KernelKey &key); + int GetKernel(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors, + const InnerContext *ctx, const kernel::KernelKey &key, OpParameter *op_parameter, + kernel::LiteKernel **kernel); protected: static const int device_type_length_{kKernelArch_MAX - kKernelArch_MIN + 1}; @@ -63,7 +64,7 @@ class KernelRegistrar { } ~KernelRegistrar() = default; - KernelRegistrar(const kernel::KERNEL_ARCH arch, const TypeId data_type, const schema::PrimitiveType op_type, + KernelRegistrar(const kernel::KERNEL_ARCH arch, const TypeId data_type, const int op_type, kernel::KernelCreator creator) { KernelRegistry::GetInstance()->RegKernel(arch, data_type, op_type, creator); } diff --git a/mindspore/lite/src/lite_kernel.cc b/mindspore/lite/src/lite_kernel.cc index 4fb2c18cd5..485c7bd58b 100644 --- a/mindspore/lite/src/lite_kernel.cc +++ b/mindspore/lite/src/lite_kernel.cc @@ -20,6 +20,8 @@ #include <set> #include "src/tensor.h" #include "src/common/utils.h" +#include "src/runtime/infer_manager.h" +#include "src/common/version_manager.h" namespace mindspore::kernel { using mindspore::lite::RET_ERROR; @@ -87,10 +89,10 @@ int LiteKernel::FreeInWorkTensor() const { int LiteKernel::PreProcess() { if (!InferShapeDone()) { - (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->set_infer_flag(true); - auto ret = (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->InferShape(in_tensors_, out_tensors_); + op_parameter_->infer_flag_ = true; + auto ret = lite::KernelInferShape(in_tensors_, &out_tensors_, op_parameter_); if (ret != 0) { - (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->set_infer_flag(false); + op_parameter_->infer_flag_ = false; MS_LOG(ERROR) << "InferShape fail!"; return ret; } diff --git a/mindspore/lite/src/lite_kernel.h b/mindspore/lite/src/lite_kernel.h index 365a40e5e9..3d7788fbe6 100644 --- a/mindspore/lite/src/lite_kernel.h +++ b/mindspore/lite/src/lite_kernel.h @@ -20,7 +20,6 @@ #include <vector> #include <memory> #include <utility> -#include "src/ops/primitive_c.h" #include "src/common/utils.h" #ifdef ENABLE_ARM #include <arm_neon.h> @@ -29,6 +28,7 @@ #include "src/inner_context.h" #include "src/tensor.h" #include "include/errorcode.h" +#include "schema/model_generated.h" static constexpr int kPerTensor = 1; static constexpr size_t kPerBatch = 3; @@ -38,7 +38,7 @@ enum KERNEL_ARCH { kCPU, kGPU, kAPU, kNPU, kKernelArch_MIN = kCPU, kKernelArch_M struct KernelKey { KERNEL_ARCH arch; TypeId data_type; - schema::PrimitiveType type; + int type; bool operator<(const KernelKey &dst) const { if (arch != dst.arch) { @@ -57,11 +57,10 @@ class LiteKernel { public: LiteKernel() = default; LiteKernel(OpParameter *parameter, std::vector<lite::Tensor *> in_tensors, std::vector<lite::Tensor *> out_tensors, - const lite::InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive) + const lite::InnerContext *ctx) : op_parameter_(parameter), in_tensors_(std::move(in_tensors)), out_tensors_(std::move(out_tensors)), - primitive_(primitive), context_(ctx) { if (op_parameter_ != nullptr && ctx != nullptr) { op_parameter_->thread_num_ = ctx->thread_num_; @@ -169,8 +168,6 @@ class LiteKernel { void set_desc(const KernelKey kernel_key) { desc_ = kernel_key; } - const mindspore::lite::PrimitiveC *GetPrimitive() const { return primitive_; } - SubGraphType subgraph_type() const { return this->subgraph_type_; } virtual std::string ToString() const; @@ -184,7 +181,12 @@ class LiteKernel { #endif protected: - bool InferShapeDone() { return !(primitive_ != nullptr && !primitive_->infer_flag()); } + bool InferShapeDone() { + if (op_parameter_ != nullptr) { + return op_parameter_->infer_flag_; + } + return false; + } KernelKey desc_{}; std::string name_; @@ -192,7 +194,6 @@ class LiteKernel { // tensor will free in ~lite_session() std::vector<lite::Tensor *> in_tensors_; std::vector<lite::Tensor *> out_tensors_; - const mindspore::lite::PrimitiveC *primitive_ = nullptr; const lite::InnerContext *context_ = nullptr; std::vector<LiteKernel *> in_kernels_; std::vector<LiteKernel *> out_kernels_; @@ -208,8 +209,7 @@ class LiteKernel { typedef LiteKernel *(*KernelCreator)(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *parameter, - const lite::InnerContext *ctx, const KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive); + const lite::InnerContext *ctx, const KernelKey &desc); class LiteKernelUtil { public: @@ -231,9 +231,8 @@ class LiteKernelUtil { template <class T> kernel::LiteKernel *LiteKernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *parameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - auto *kernel = new (std::nothrow) T(parameter, inputs, outputs, ctx, primitive); + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { + auto *kernel = new (std::nothrow) T(parameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "kernel: " << parameter->name_ << "is nullptr."; free(parameter); diff --git a/mindspore/lite/src/lite_model.cc b/mindspore/lite/src/lite_model.cc index 17c58cf4aa..5863459720 100644 --- a/mindspore/lite/src/lite_model.cc +++ b/mindspore/lite/src/lite_model.cc @@ -18,26 +18,28 @@ #include <vector> #include <set> #include <unordered_map> -#include "src/ops/while.h" +#include "src/common/prim_util.h" #ifdef ENABLE_V0 #include "src/ops/compat/compat_register.h" #endif namespace mindspore::lite { #ifdef ENABLE_V0 -int LiteModel::ConvertAttrs(Model::Node *node, const schema::v0::Primitive *prim, - std::vector<schema::Tensor *> *dst_tensor) { +int LiteModel::ConvertAttrs(Model::Node *node, std::vector<schema::Tensor *> *dst_tensor) { if (node == nullptr || dst_tensor == nullptr) { MS_LOG(ERROR) << "node or tensor_vec is nullptr."; return RET_ERROR; } + auto primitive = node->primitive_; + MS_ASSERT(primitive != nullptr); + auto prim = reinterpret_cast<const schema::v0::Primitive *>(primitive); int primitive_type = prim->value_type(); auto creator = CompatRegistry::GetInstance()->GetTransferAttrFunc(SCHEMA_VERSION::SCHEMA_V0, primitive_type); if (creator == nullptr) { MS_LOG(DEBUG) << "the node don't need to convert attr to tensor."; return RET_OK; } - int status = creator(reinterpret_cast<const void *>(prim), node, dst_tensor, &this->attr_tensor_bufs_); + int status = creator(node, dst_tensor, &this->attr_tensor_bufs_); if (status != RET_OK && status != RET_NO_CHANGE) { MS_LOG(ERROR) << "translate attr to tensor failed."; return status; @@ -45,14 +47,12 @@ int LiteModel::ConvertAttrs(Model::Node *node, const schema::v0::Primitive *prim return RET_OK; } -int LiteModel::ConvertAttrToTensors(const void *meta_graph) { - MS_ASSERT(meta_graph != nullptr); +int LiteModel::ConvertAttrToTensors() { int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); if (schema_version != SCHEMA_VERSION::SCHEMA_V0) { MS_LOG(DEBUG) << "no need to convert attr to tensor."; return RET_OK; } - auto meta_graph_v0 = reinterpret_cast<const schema::v0::MetaGraph *>(meta_graph); std::unordered_map<int, std::set<int>> subgraph_node_indexes; for (size_t subgraph_index = 0; subgraph_index < this->sub_graphs_.size(); ++subgraph_index) { for (size_t node_index = 0; node_index < this->sub_graphs_[subgraph_index]->node_indices_.size(); ++node_index) { @@ -62,9 +62,7 @@ int LiteModel::ConvertAttrToTensors(const void *meta_graph) { int cur_all_tensors_size = this->all_tensors_.size(); for (size_t index = 0; index < this->all_nodes_.size(); ++index) { std::vector<schema::Tensor *> dst_tensors; - auto prim = meta_graph_v0->nodes()->GetAs<schema::v0::CNode>(index)->primitive(); - MS_ASSERT(prim != nullptr); - int status = ConvertAttrs(this->all_nodes_[index], prim, &dst_tensors); + int status = ConvertAttrs(this->all_nodes_[index], &dst_tensors); if (status != RET_OK) { MS_LOG(ERROR) << "fail to convert attr to tensor."; return RET_ERROR; @@ -96,6 +94,11 @@ void LiteModel::Free() { free(this->buf); this->buf = nullptr; } + auto nodes_size = this->all_nodes_.size(); + for (size_t i = 0; i < nodes_size; ++i) { + auto node = this->all_nodes_[i]; + node->primitive_ = nullptr; + } for (auto &tensor_buf : attr_tensor_bufs_) { free(tensor_buf); tensor_buf = nullptr; @@ -109,9 +112,6 @@ void LiteModel::Destroy() { for (size_t i = 0; i < nodes_size; ++i) { auto node = this->all_nodes_[i]; MS_ASSERT(node != nullptr); - MS_ASSERT(node->primitive_ != nullptr); - delete node->primitive_; - node->primitive_ = nullptr; delete node; } this->all_nodes_.clear(); @@ -193,15 +193,10 @@ int LiteModel::NodeVerify() const { return RET_ERROR; } - auto prim = node->primitive_; - if (prim->Type() == schema::PrimitiveType_While) { - auto whileOp = reinterpret_cast<mindspore::lite::While *>(const_cast<mindspore::lite::PrimitiveC *>(prim)); - if (whileOp == nullptr) { - MS_LOG(ERROR) << "whileOp is null."; - return RET_ERROR; - } - if (static_cast<uint32_t>(whileOp->GetBodySubgraphIndex()) >= subGraph_size || - static_cast<uint32_t>(whileOp->GetCondSubgraphIndex()) >= subGraph_size) { + if (IsWhileNode(node->primitive_)) { + auto body_index = GetWhileBodySubgraphIndex(node->primitive_); + auto cond_index = GetWhileCondSubgraphIndex(node->primitive_); + if (static_cast<uint32_t>(body_index) >= subGraph_size || static_cast<uint32_t>(cond_index) >= subGraph_size) { MS_LOG(ERROR) << "index of subGraph is beyond subGraph_size."; return RET_ERROR; } diff --git a/mindspore/lite/src/lite_model.h b/mindspore/lite/src/lite_model.h index 02af51f19c..7885e27da7 100644 --- a/mindspore/lite/src/lite_model.h +++ b/mindspore/lite/src/lite_model.h @@ -19,15 +19,12 @@ #include <string> #include <vector> +#include "include/errorcode.h" #include "include/model.h" -#include "src/ops/primitive_c.h" #include "include/version.h" #include "schema/model_generated.h" #include "src/common/common.h" #include "src/common/version_manager.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif #ifdef ENABLE_V0 #include "schema/model_v0_generated.h" #endif @@ -48,9 +45,9 @@ class LiteModel : public Model { private: #ifdef ENABLE_V0 - int ConvertAttrs(Model::Node *node, const schema::v0::Primitive *prim, std::vector<schema::Tensor *> *dst_tensor); + int ConvertAttrs(Model::Node *node, std::vector<schema::Tensor *> *dst_tensor); - int ConvertAttrToTensors(const void *meta_graph); + int ConvertAttrToTensors(); #endif template <typename T = schema::MetaGraph, typename U = schema::CNode> @@ -66,31 +63,10 @@ class LiteModel : public Model { return false; } auto c_node = meta_graph.nodes()->template GetAs<U>(i); - MS_ASSERT(c_node != nullptr); - auto src_prim = reinterpret_cast<const schema::Primitive *>(c_node->primitive()); -#ifdef PRIMITIVE_WRITEABLE - node->primitive_ = PrimitiveC::Create(const_cast<schema::Primitive *>(src_prim)); -#else - auto primitive = const_cast<schema::Primitive *>(src_prim); - auto func_pointer = OpsRegistry::GetInstance()->GetPrimitiveCreator(primitive->value_type()); - if (func_pointer == nullptr) { - MS_LOG(ERROR) << "PrimitiveCreator function pointer is nullptr, type: " - << schema::EnumNamePrimitiveType(primitive->value_type()); - delete node; - return false; - } - node->primitive_ = func_pointer(primitive); -#endif - if (node->primitive_ == nullptr) { - MS_LOG(ERROR) << "unpack primitive == nullptr!"; - delete node; - return false; - } - node->primitive_->set_quant_type(static_cast<schema::QuantType>(c_node->quantType())); - MS_ASSERT(c_node->name() != nullptr); + node->primitive_ = c_node->primitive(); + node->quant_type_ = c_node->quantType(); node->name_ = c_node->name()->c_str(); node->node_type_ = static_cast<NodeType>(c_node->nodeType()); - MS_ASSERT(c_node->inputIndex() != nullptr); auto count = c_node->inputIndex()->size(); for (uint32_t j = 0; j < count; ++j) { node->input_indices_.push_back(size_t(c_node->inputIndex()->template GetAs<uint32_t>(j))); @@ -195,7 +171,7 @@ class LiteModel : public Model { } } #ifdef ENABLE_V0 - if (ConvertAttrToTensors(&meta_graph) != RET_OK) { + if (ConvertAttrToTensors() != RET_OK) { MS_LOG(ERROR) << "fail to convert attr to tensor."; return RET_ERROR; } diff --git a/mindspore/lite/src/lite_session.cc b/mindspore/lite/src/lite_session.cc index 6bbdee5f63..4875231935 100644 --- a/mindspore/lite/src/lite_session.cc +++ b/mindspore/lite/src/lite_session.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ #include "src/runtime/allocator.h" #include "src/executor.h" #include "src/common/utils.h" +#include "src/common/prim_util.h" #include "src/common/graph_util.h" #include "src/kernel_registry.h" #include "src/lite_model.h" @@ -52,7 +53,7 @@ static bool WeightTensorNeedCopy(const lite::Model *model, const uint32_t tensor return std::none_of(post_node_idxes.begin(), post_node_idxes.end(), [&](const size_t &post_node_idx) { auto node = model->all_nodes_[post_node_idx]; MS_ASSERT(node != nullptr); - return IsPackedOp(static_cast<schema::PrimitiveType>(node->primitive_->Type())); + return IsPackedOp(GetPrimitiveType(node->primitive_)); }); } diff --git a/mindspore/lite/src/ops/CMakeLists.txt b/mindspore/lite/src/ops/CMakeLists.txt index 27bb7067ec..465d5296fc 100644 --- a/mindspore/lite/src/ops/CMakeLists.txt +++ b/mindspore/lite/src/ops/CMakeLists.txt @@ -6,7 +6,8 @@ file(GLOB OPS_SRC ) if(ENABLE_V0) file(GLOB_RECURSE COMPAT_SRC ${CMAKE_CURRENT_SOURCE_DIR}/compat/*.cc) - set(OPS_SRC ${OPS_SRC} ${COMPAT_SRC}) + file(GLOB OPS_SRC_V0 ${CMAKE_CURRENT_SOURCE_DIR}/populate/v0/*.cc) + set(OPS_SRC ${OPS_SRC} ${COMPAT_SRC} ${OPS_SRC_V0}) endif() add_library(cpu_ops_mid OBJECT ${OPS_SRC}) diff --git a/mindspore/lite/src/ops/abs.cc b/mindspore/lite/src/ops/abs.cc deleted file mode 100644 index 8b4ccdae2c..0000000000 --- a/mindspore/lite/src/ops/abs.cc +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/abs.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Abs::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Abs; - } - if (this->primitive_->value.type != schema::PrimitiveType_Abs) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::AbsT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else -int Abs::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateAbs(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Abs, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *AbsCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Abs>(primitive); } -Registry AbsRegistry(schema::PrimitiveType_Abs, AbsCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/abs.h b/mindspore/lite/src/ops/abs.h deleted file mode 100644 index f985351177..0000000000 --- a/mindspore/lite/src/ops/abs.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_self.h" - -#ifndef MINDSPORE_LITE_SRC_OPS_ABS_H_ -#define MINDSPORE_LITE_SRC_OPS_ABS_H_ - -namespace mindspore { -namespace lite { -class Abs : public ArithmeticSelf { - public: - Abs() = default; - ~Abs() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Abs, ArithmeticSelf); - explicit Abs(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore -#endif // MINDSPORE_LITE_SRC_OPS_ABS_H_ diff --git a/mindspore/lite/src/ops/abs_grad.cc b/mindspore/lite/src/ops/abs_grad.cc deleted file mode 100644 index 3ca69a667c..0000000000 --- a/mindspore/lite/src/ops/abs_grad.cc +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/abs_grad.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int AbsGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_AbsGrad; - } - if (this->primitive_->value.type != schema::PrimitiveType_AbsGrad) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::AbsGradT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int AbsGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(primitive != nullptr); - MS_ASSERT(fbb != nullptr); - auto attr = primitive->value_as_AbsGrad(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_AbsGrad return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateAbsGrad(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_AbsGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *AbsGradCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<AbsGrad>(primitive); } -Registry AbsGradRegistry(schema::PrimitiveType_AbsGrad, AbsGradCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/abs_grad.h b/mindspore/lite/src/ops/abs_grad.h deleted file mode 100644 index 763c31c800..0000000000 --- a/mindspore/lite/src/ops/abs_grad.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_ABS_GRAD_H_ -#define MINDSPORE_LITE_SRC_OPS_ABS_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class AbsGrad : public PrimitiveC { - public: - AbsGrad() = default; - ~AbsGrad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(AbsGrad, PrimitiveC); - explicit AbsGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore -#endif // MINDSPORE_LITE_SRC_OPS_ABS_GRAD_H_ diff --git a/mindspore/lite/src/ops/activation.cc b/mindspore/lite/src/ops/activation.cc deleted file mode 100644 index e959d8cb8e..0000000000 --- a/mindspore/lite/src/ops/activation.cc +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/activation.h" -#include <memory> -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Activation::GetType() const { return this->primitive_->value.AsActivation()->type; } -float Activation::GetAlpha() const { return this->primitive_->value.AsActivation()->alpha; } -float Activation::GetMinVal() const { return this->primitive_->value.AsActivation()->min_val; } -float Activation::GetMaxVal() const { return this->primitive_->value.AsActivation()->max_val; } - -void Activation::SetType(int type) { this->primitive_->value.AsActivation()->type = (schema::ActivationType)type; } -void Activation::SetAlpha(float alpha) { this->primitive_->value.AsActivation()->alpha = alpha; } -void Activation::SetMinVal(float min_val) { this->primitive_->value.AsActivation()->min_val = min_val; } -void Activation::SetMaxVal(float max_val) { this->primitive_->value.AsActivation()->max_val = max_val; } - -int Activation::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Activation; - } - if (this->primitive_->value.type != schema::PrimitiveType_Activation) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - auto attr = std::make_unique<schema::ActivationT>(); - if (prim.name() == "ReLU") { - attr->type = schema::ActivationType_RELU; - } else if (prim.name() == "Sigmoid") { - attr->type = schema::ActivationType_SIGMOID; - } else if (prim.name() == "ReLU6") { - attr->type = schema::ActivationType_RELU6; - } else if (prim.name() == "Swish") { - attr->type = schema::ActivationType_SWISH; - } else if (prim.name() == "HSwish") { - attr->type = schema::ActivationType_HSWISH; - } else if (prim.name() == "HSigmoid") { - attr->type = schema::ActivationType_HSIGMOID; - } else if (prim.name() == "Tanh") { - attr->type = schema::ActivationType_TANH; - } - this->primitive_->value.value = attr.release(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - return RET_OK; -} -#else -int Activation::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Activation(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Activation return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateActivation(*fbb, attr->type(), attr->alpha(), attr->min_val(), attr->max_val()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Activation, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int Activation::GetType() const { return this->primitive_->value_as_Activation()->type(); } -float Activation::GetAlpha() const { return this->primitive_->value_as_Activation()->alpha(); } -float Activation::GetMinVal() const { return this->primitive_->value_as_Activation()->min_val(); } -float Activation::GetMaxVal() const { return this->primitive_->value_as_Activation()->max_val(); } - -PrimitiveC *ActivationCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<Activation>(primitive); -} -Registry ActivationRegistry(schema::PrimitiveType_Activation, ActivationCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/activation.h b/mindspore/lite/src/ops/activation.h deleted file mode 100644 index 7157248bf6..0000000000 --- a/mindspore/lite/src/ops/activation.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef LITE_MINDSPORE_LITE_C_OPS_ACTIVATION_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ACTIVATION_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Activation : public PrimitiveC { - public: - Activation() = default; - ~Activation() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Activation, PrimitiveC); - explicit Activation(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - void SetType(int type); - void SetAlpha(float alpha); - void SetMinVal(float minVal); - void SetMaxVal(float maxVal); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int GetType() const; - float GetAlpha() const; - float GetMinVal() const; - float GetMaxVal() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_ACTIVATION_H_ diff --git a/mindspore/lite/src/ops/activation_grad.cc b/mindspore/lite/src/ops/activation_grad.cc deleted file mode 100644 index ac4f093fc8..0000000000 --- a/mindspore/lite/src/ops/activation_grad.cc +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/activation_grad.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int ActivationGrad::GetType() const { return this->primitive_->value.AsActivationGrad()->type; } -float ActivationGrad::GetAlpha() const { return this->primitive_->value.AsActivationGrad()->alpha; } -void ActivationGrad::SetType(int type) { - this->primitive_->value.AsActivationGrad()->type = (schema::ActivationType)type; -} -void ActivationGrad::SetAlpha(float alpha) { this->primitive_->value.AsActivationGrad()->alpha = alpha; } -int ActivationGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_ActivationGrad; - } - if (this->primitive_->value.type != schema::PrimitiveType_ActivationGrad) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - auto attr = std::make_unique<schema::ActivationGradT>(); - if (prim.name() == "ReluGrad") { - attr->type = schema::ActivationType_RELU; - } else if (prim.name() == "SigmoidGrad") { - attr->type = schema::ActivationType_SIGMOID; - } else if (prim.name() == "ReLU6Grad") { - attr->type = schema::ActivationType_RELU6; - } else if (prim.name() == "HSigmoidGrad") { - attr->type = schema::ActivationType_HSIGMOID; - } else if (prim.name() == "HSwishGrad") { - attr->type = schema::ActivationType_HSWISH; - } - attr->alpha = 0; // alpha; - this->primitive_->value.value = attr.release(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - return RET_OK; -} -#else -int ActivationGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_ActivationGrad(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_ActivationGrad return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateActivationGrad(*fbb, attr->type(), attr->alpha()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ActivationGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int ActivationGrad::GetType() const { return this->primitive_->value_as_ActivationGrad()->type(); } -float ActivationGrad::GetAlpha() const { return this->primitive_->value_as_ActivationGrad()->alpha(); } - -PrimitiveC *ActivationGradCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<ActivationGrad>(primitive); -} -Registry ActivationGradRegistry(schema::PrimitiveType_ActivationGrad, ActivationGradCreator); -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/activation_grad.h b/mindspore/lite/src/ops/activation_grad.h deleted file mode 100644 index c6c6181efc..0000000000 --- a/mindspore/lite/src/ops/activation_grad.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_ACTIVATION_GRAD_H_ -#define MINDSPORE_LITE_SRC_OPS_ACTIVATION_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class ActivationGrad : public PrimitiveC { - public: - ActivationGrad() = default; - ~ActivationGrad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(ActivationGrad, PrimitiveC); - explicit ActivationGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetType(int type); - void SetAlpha(float alpha); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int GetType() const; - float GetAlpha() const; -}; -} // namespace lite -} // namespace mindspore -#endif // MINDSPORE_LITE_SRC_OPS_ACTIVATION_GRAD_H_ diff --git a/mindspore/lite/src/ops/adam.cc b/mindspore/lite/src/ops/adam.cc deleted file mode 100644 index ed2cc49c9f..0000000000 --- a/mindspore/lite/src/ops/adam.cc +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/adam.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -bool Adam::GetUseNesterov() const { return this->primitive_->value.AsAdam()->useNesterov; } -int Adam::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Adam; - } - if (this->primitive_->value.type != schema::PrimitiveType_Adam) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = std::make_unique<schema::AdamT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - attr->useNesterov = GetValue<bool>(prim.GetAttr("use_nesterov")); - - this->primitive_->value.value = attr.release(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -bool Adam::GetUseNesterov() const { return this->primitive_->value_as_Adam()->useNesterov(); } -int Adam::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Adam(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Adam return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateAdam(*fbb, attr->useNesterov()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Adam, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *AdamCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Adam>(primitive); } -Registry AdamRegistry(schema::PrimitiveType_Adam, AdamCreator); -#endif - -int Adam::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - if (10 != inputs.size()) { - MS_LOG(ERROR) << "Adam should have 10 input tensors"; - return RET_ERROR; - } - - if (inputs[0]->ElementsNum() != inputs[1]->ElementsNum() || inputs[0]->ElementsNum() != inputs[2]->ElementsNum() || - inputs[0]->ElementsNum() != inputs[9]->ElementsNum() || inputs[3]->ElementsNum() != 1 || - inputs[4]->ElementsNum() != 1 || inputs[5]->ElementsNum() != 1 || inputs[6]->ElementsNum() != 1 || - inputs[7]->ElementsNum() != 1 || inputs[8]->ElementsNum() != 1) { - MS_LOG(ERROR) << "error input data size!"; - return RET_ERROR; - } - if (!outputs.empty()) { - auto *out = outputs.front(); - MS_ASSERT(out != nullptr); - out->set_data_type(inputs[0]->data_type()); - out->set_format(inputs[0]->format()); - out->set_shape({1}); - } - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/adam.h b/mindspore/lite/src/ops/adam.h deleted file mode 100644 index 6258da7d40..0000000000 --- a/mindspore/lite/src/ops/adam.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_ADAM_H_ -#define MINDSPORE_LITE_SRC_OPS_ADAM_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Adam : public PrimitiveC { - public: - Adam() = default; - ~Adam() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Adam, PrimitiveC); - explicit Adam(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - bool GetUseNesterov() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_ADAM_H_ diff --git a/mindspore/lite/src/ops/add.cc b/mindspore/lite/src/ops/add.cc deleted file mode 100644 index 8661180456..0000000000 --- a/mindspore/lite/src/ops/add.cc +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/add.h" -#include <memory> -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Add::GetActivationType() const { return this->primitive_->value.AsAdd()->activationType; } - -void Add::SetActivationType(int activation_type) { - this->primitive_->value.AsAdd()->activationType = (schema::ActivationType)activation_type; -} - -int Add::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Add; - } - if (this->primitive_->value.type != schema::PrimitiveType_Add) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::AddT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - PopulaterQuantParam(prim, inputs); - return RET_OK; -} - -#else -int Add::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Add(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Add return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateAdd(*fbb, attr->activationType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Add, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int Add::GetActivationType() const { return this->primitive_->value_as_Add()->activationType(); } - -PrimitiveC *AddCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Add>(primitive); } -Registry AddRegistry(schema::PrimitiveType_Add, AddCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/add.h b/mindspore/lite/src/ops/add.h deleted file mode 100644 index 4bb4cddf77..0000000000 --- a/mindspore/lite/src/ops/add.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_ADD_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ADD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/arithmetic.h" - -namespace mindspore { -namespace lite { -class Add : public Arithmetic { - public: - Add() = default; - ~Add() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Add, Arithmetic); - explicit Add(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - void SetActivationType(int activation_type); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int GetActivationType() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_ADD_H_ diff --git a/mindspore/lite/src/ops/adder.cc b/mindspore/lite/src/ops/adder.cc deleted file mode 100644 index 6320c48cb9..0000000000 --- a/mindspore/lite/src/ops/adder.cc +++ /dev/null @@ -1,185 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/adder.h" -#include <memory> -#include <string> - -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#ifdef PRIMITIVE_WRITEABLE -#include "src/param_value_lite.h" -#endif - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Adder::GetFormat() const { return this->primitive_->value.AsAdder()->format; } -int Adder::GetGroup() const { return this->primitive_->value.AsAdder()->group; } -int Adder::GetChannelIn() const { return this->primitive_->value.AsAdder()->channelIn; } -int Adder::GetChannelOut() const { return this->primitive_->value.AsAdder()->channelOut; } -int Adder::GetKernelW() const { return this->primitive_->value.AsAdder()->kernelW; } -int Adder::GetKernelH() const { return this->primitive_->value.AsAdder()->kernelH; } -int Adder::GetStrideW() const { return this->primitive_->value.AsAdder()->strideW; } -int Adder::GetStrideH() const { return this->primitive_->value.AsAdder()->strideH; } -int Adder::GetPadMode() const { return this->primitive_->value.AsAdder()->padMode; } -int Adder::GetPadUp() const { return this->primitive_->value.AsAdder()->padUp; } -int Adder::GetPadDown() const { return this->primitive_->value.AsAdder()->padDown; } -int Adder::GetPadLeft() const { return this->primitive_->value.AsAdder()->padLeft; } -int Adder::GetPadRight() const { return this->primitive_->value.AsAdder()->padRight; } -int Adder::GetDilateW() const { return this->primitive_->value.AsAdder()->dilateW; } -int Adder::GetDilateH() const { return this->primitive_->value.AsAdder()->dilateH; } -int Adder::GetActivationType() const { return this->primitive_->value.AsAdder()->activationType; } - -void Adder::SetFormat(int format) { this->primitive_->value.AsAdder()->format = (schema::Format)format; } -void Adder::SetGroup(int group) { this->primitive_->value.AsAdder()->group = group; } -void Adder::SetChannelIn(int channel_in) { this->primitive_->value.AsAdder()->channelIn = channel_in; } -void Adder::SetChannelOut(int channel_out) { this->primitive_->value.AsAdder()->channelOut = channel_out; } -void Adder::SetKernelW(int kernel_w) { this->primitive_->value.AsAdder()->kernelW = kernel_w; } -void Adder::SetKernelH(int kernel_h) { this->primitive_->value.AsAdder()->kernelH = kernel_h; } -void Adder::SetStrideW(int stride_w) { this->primitive_->value.AsAdder()->strideW = stride_w; } -void Adder::SetStrideH(int stride_h) { this->primitive_->value.AsAdder()->strideH = stride_h; } -void Adder::SetPadMode(int pad_mode) { this->primitive_->value.AsAdder()->padMode = (schema::PadMode)pad_mode; } -void Adder::SetPadUp(int pad_up) { this->primitive_->value.AsAdder()->padUp = pad_up; } -void Adder::SetPadDown(int pad_down) { this->primitive_->value.AsAdder()->padDown = pad_down; } -void Adder::SetPadLeft(int pad_left) { this->primitive_->value.AsAdder()->padLeft = pad_left; } -void Adder::SetPadRight(int pad_right) { this->primitive_->value.AsAdder()->padRight = pad_right; } -void Adder::SetDilateW(int dilate_w) { this->primitive_->value.AsAdder()->dilateW = dilate_w; } -void Adder::SetDilateH(int dilate_h) { this->primitive_->value.AsAdder()->dilateH = dilate_h; } -void Adder::SetActivationType(int activation_type) { - this->primitive_->value.AsAdder()->activationType = (schema::ActivationType)activation_type; -} - -void Adder::PopulaterAdderSingleGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group) { - auto attr = std::make_unique<schema::AdderT>(); - attr->group = group; - auto format = GetValue<std::string>(prim.GetAttr("data_format")); - if (format == "NCHW") { - attr->format = schema::Format::Format_NCHW; - } else if (format == "NHWC") { - attr->format = schema::Format::Format_NHWC; - } else { - attr->format = schema::Format::Format_NUM_OF_FORMAT; - } - auto pad_list = CastToInt(prim.GetAttr("pad_list")); - attr->padUp = pad_list[0]; - attr->padDown = pad_list[1]; - attr->padLeft = pad_list[2]; - attr->padRight = pad_list[3]; - - auto dilation = CastToInt(prim.GetAttr("dilation")); - attr->dilateH = dilation[2]; - attr->dilateW = dilation[3]; - - auto kernel_size = CastToInt(prim.GetAttr("kernel_size")); - attr->kernelH = kernel_size[0]; - attr->kernelW = kernel_size[1]; - - auto stride = CastToInt(prim.GetAttr("stride")); - attr->strideH = stride[2]; - attr->strideW = stride[3]; - - attr->channelOut = CastToInt(prim.GetAttr("out_channel")).front(); - - auto pad_mode = GetValue<std::string>(prim.GetAttr("pad_mode")); - if (pad_mode == "valid") { - attr->padMode = schema::PadMode_VALID; - } else if (pad_mode == "same") { - attr->padMode = schema::PadMode_SAME_UPPER; - } else { - attr->padMode = schema::PadMode_NOTSET; - } - - if (prim.GetAttr("activation_name") != nullptr) { - auto activate_name = GetValue<std::string>(prim.GetAttr("activation_name")); - attr->activationType = kActivationTypeMap[activate_name]; - } else { - attr->activationType = schema::ActivationType_NO_ACTIVATION; - } - - primitive->value.type = schema::PrimitiveType_Adder; - primitive->value.value = attr.release(); -} - -int Adder::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Adder; - } - if (this->primitive_->value.type != schema::PrimitiveType_Adder) { - MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; - return RET_ERROR; - } - auto groupAttr = prim.GetAttr("group"); - if (groupAttr == nullptr) { - MS_LOG(ERROR) << "conv2d op has no group attr,please check pb model"; - return RET_NULL_PTR; - } - int group = CastToInt(groupAttr).front(); - PopulaterAdderSingleGroup(prim, this->primitive_, group); - PopulaterQuantParam(prim, inputs); - return RET_OK; -} - -#else -int Adder::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Adder(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Adder return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateAdder(*fbb, attr->format(), attr->group(), attr->channelIn(), attr->channelOut(), - attr->kernelW(), attr->kernelH(), attr->strideW(), attr->strideH(), - attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(), - attr->padRight(), attr->dilateW(), attr->dilateH(), attr->activationType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Adder, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -int Adder::GetFormat() const { return this->primitive_->value_as_Adder()->format(); } -int Adder::GetGroup() const { return this->primitive_->value_as_Adder()->group(); } -int Adder::GetChannelIn() const { return this->primitive_->value_as_Adder()->channelIn(); } -int Adder::GetChannelOut() const { return this->primitive_->value_as_Adder()->channelOut(); } -int Adder::GetKernelW() const { return this->primitive_->value_as_Adder()->kernelW(); } -int Adder::GetKernelH() const { return this->primitive_->value_as_Adder()->kernelH(); } -int Adder::GetStrideW() const { return this->primitive_->value_as_Adder()->strideW(); } -int Adder::GetStrideH() const { return this->primitive_->value_as_Adder()->strideH(); } -int Adder::GetPadMode() const { return this->primitive_->value_as_Adder()->padMode(); } -int Adder::GetPadUp() const { return this->primitive_->value_as_Adder()->padUp(); } -int Adder::GetPadDown() const { return this->primitive_->value_as_Adder()->padDown(); } -int Adder::GetPadLeft() const { return this->primitive_->value_as_Adder()->padLeft(); } -int Adder::GetPadRight() const { return this->primitive_->value_as_Adder()->padRight(); } -int Adder::GetDilateW() const { return this->primitive_->value_as_Adder()->dilateW(); } -int Adder::GetDilateH() const { return this->primitive_->value_as_Adder()->dilateH(); } -int Adder::GetActivationType() const { return this->primitive_->value_as_Adder()->activationType(); } - -PrimitiveC *AdderCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Adder>(primitive); } -Registry AdderRegistry(schema::PrimitiveType_Adder, AdderCreator); -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/adder.h b/mindspore/lite/src/ops/adder.h deleted file mode 100644 index 1e1d33830b..0000000000 --- a/mindspore/lite/src/ops/adder.h +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_ADDER_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ADDER_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> -#include "src/ops/conv2d.h" - -namespace mindspore { -namespace lite { -class Adder : public Conv2D { - public: - Adder() = default; - ~Adder() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Adder, Conv2D); - explicit Adder(schema::PrimitiveT *primitive) : Conv2D(primitive) {} - - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - void SetFormat(int format); - void SetGroup(int group); - void SetChannelIn(int channel_in); - void SetChannelOut(int channel_out); - void SetKernelW(int kernel_w); - void SetKernelH(int kernel_h); - void SetStrideW(int stride_w); - void SetStrideH(int stride_h); - void SetPadMode(int pad_mode); - void SetPadUp(int pad_up); - void SetPadDown(int pad_down); - void SetPadLeft(int pad_left); - void SetPadRight(int pad_right); - void SetDilateW(int dilate_w); - void SetDilateH(int dilate_h); - void SetActivationType(int activation_type); - - private: - void PopulaterAdderSingleGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb); -#endif - - public: - int GetFormat() const; - int GetGroup() const; - int GetChannelIn() const; - int GetChannelOut() const; - int GetKernelW() const; - int GetKernelH() const; - int GetStrideW() const; - int GetStrideH() const; - int GetPadMode() const; - int GetPadUp() const; - int GetPadDown() const; - int GetPadLeft() const; - int GetPadRight() const; - int GetDilateW() const; - int GetDilateH() const; - int GetActivationType() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_ADDER_H_ diff --git a/mindspore/lite/src/ops/addn.cc b/mindspore/lite/src/ops/addn.cc deleted file mode 100644 index 26e244cb28..0000000000 --- a/mindspore/lite/src/ops/addn.cc +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/addn.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int AddN::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_AddN; - } - if (this->primitive_->value.type != schema::PrimitiveType_AddN) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::AddNT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else -int AddN::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateAddN(*fbb, 0); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_AddN, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *AddNCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<AddN>(primitive); } -Registry AddNRegistry(schema::PrimitiveType_AddN, AddNCreator); -#endif - -namespace { -constexpr int kLeastInputNum = 2; -} -int AddN::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs.front(); - MS_ASSERT(input != nullptr); - auto output = outputs.front(); - MS_ASSERT(output != nullptr); - if (inputs.size() < kLeastInputNum) { - MS_LOG(ERROR) << "input size" << inputs.size() << " is error!"; - return RET_INPUT_TENSOR_ERROR; - } - output->set_format(input->format()); - output->set_data_type(input->data_type()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - size_t max_dims = inputs.at(0)->shape().size(); - size_t max_dims_idx = 0; - - // determine max_dims - for (size_t i = 1; i < inputs.size(); ++i) { - if (inputs.at(i)->shape().size() > max_dims) { - max_dims = inputs.at(i)->shape().size(); - max_dims_idx = i; - } - } - - output->set_shape(inputs.at(max_dims_idx)->shape()); - - // make sure all elements have the same size or 1 (broadcasting) in all dimensions - for (size_t i = 1; i < inputs.size(); ++i) { - if ((inputs.at(i)->shape().size() != max_dims) && - (inputs.at(i)->ElementsNum() != inputs.at(max_dims_idx)->ElementsNum())) { - MS_LOG(ERROR) << "AddN inputs shape is not equal!"; - return RET_INPUT_TENSOR_ERROR; - } - if (inputs.at(i)->data_type() != inputs.at(0)->data_type()) { - MS_LOG(ERROR) << "AddN all input data type should be the same!"; - return RET_INPUT_TENSOR_ERROR; - } - } - - for (size_t d = 0; d < input->shape().size(); ++d) { - size_t max_dim = 0; - for (size_t i = 0; i < inputs.size(); ++i) { - size_t shift = max_dims - inputs.at(i)->shape().size(); - size_t dim = (i < shift) ? 1 : inputs.at(i)->shape().at(d); - if (dim > max_dim) { - max_dim = dim; - } - } - output->shape()[d] = max_dim; // set the biggest dimension in the output tensor - } - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/addn.h b/mindspore/lite/src/ops/addn.h deleted file mode 100644 index 6d25bb8a9b..0000000000 --- a/mindspore/lite/src/ops/addn.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_ADDN_H_ -#define MINDSPORE_LITE_SRC_OPS_ADDN_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class AddN : public PrimitiveC { - public: - AddN() = default; - ~AddN() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(AddN, PrimitiveC); - explicit AddN(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_ADDN_H_ diff --git a/mindspore/lite/src/ops/apply_momentum.cc b/mindspore/lite/src/ops/apply_momentum.cc deleted file mode 100644 index e38e032efc..0000000000 --- a/mindspore/lite/src/ops/apply_momentum.cc +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/apply_momentum.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float ApplyMomentum::GetGradientScale() const { return this->primitive_->value.AsApplyMomentum()->gradientScale; } -bool ApplyMomentum::GetUseNesterov() const { return this->primitive_->value.AsApplyMomentum()->useNesterov; } - -int ApplyMomentum::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_ApplyMomentum; - } - if (this->primitive_->value.type != schema::PrimitiveType_ApplyMomentum) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = std::make_unique<schema::ApplyMomentumT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - attr->gradientScale = GetValue<float>(prim.GetAttr("gradient_scale")); - attr->useNesterov = GetValue<bool>(prim.GetAttr("use_nesterov")); - - this->primitive_->value.value = attr.release(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -float ApplyMomentum::GetGradientScale() const { return this->primitive_->value_as_ApplyMomentum()->gradientScale(); } -bool ApplyMomentum::GetUseNesterov() const { return this->primitive_->value_as_ApplyMomentum()->useNesterov(); } - -int ApplyMomentum::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_ApplyMomentum(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_ApplyMomentum return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateApplyMomentum(*fbb, attr->gradientScale(), attr->useNesterov()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ApplyMomentum, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *ApplyMomentumCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<ApplyMomentum>(primitive); -} -Registry ApplyMomentumRegistry(schema::PrimitiveType_ApplyMomentum, ApplyMomentumCreator); -#endif - -int ApplyMomentum::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - if (inputs.size() != 5) { - MS_LOG(ERROR) << "ApplyMomentum should have at least 5 input tensors"; - return RET_ERROR; - } - - if (inputs[0]->ElementsNum() != inputs[1]->ElementsNum() || inputs[0]->ElementsNum() != inputs[3]->ElementsNum() || - inputs[2]->ElementsNum() != 1 || inputs[4]->ElementsNum() != 1) { - MS_LOG(ERROR) << "error input data size!"; - return RET_ERROR; - } - if (!outputs.empty()) { - auto *out = outputs.front(); - MS_ASSERT(out != nullptr); - out->set_data_type(inputs[0]->data_type()); - out->set_format(inputs[0]->format()); - out->set_shape({1}); - } - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/apply_momentum.h b/mindspore/lite/src/ops/apply_momentum.h deleted file mode 100644 index 0d9454018a..0000000000 --- a/mindspore/lite/src/ops/apply_momentum.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_APPLY_MOMENTUM_H_ -#define MINDSPORE_LITE_SRC_OPS_APPLY_MOMENTUM_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class ApplyMomentum : public PrimitiveC { - public: - ApplyMomentum() = default; - ~ApplyMomentum() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(ApplyMomentum, PrimitiveC); - explicit ApplyMomentum(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - float GetGradientScale() const; - bool GetUseNesterov() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_APPLY_MOMENTUM_H_ diff --git a/mindspore/lite/src/ops/argmax.cc b/mindspore/lite/src/ops/argmax.cc deleted file mode 100644 index b189b3d1d6..0000000000 --- a/mindspore/lite/src/ops/argmax.cc +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/argmax.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int ArgMax::GetAxis() const { return this->primitive_->value.AsArgMax()->axis; } -bool ArgMax::GetOutMaxValue() const { return this->primitive_->value.AsArgMax()->outMaxValue; } -int ArgMax::GetTopK() const { return this->primitive_->value.AsArgMax()->topK; } -bool ArgMax::GetKeepDims() const { return this->primitive_->value.AsArgMax()->keepDims; } -int ArgMax::GetAxisType() const { return this->primitive_->value.AsArgMax()->axisType; } - -void ArgMax::SetAxis(int axis) { this->primitive_->value.AsArgMax()->axis = axis; } -void ArgMax::SetOutMaxValue(bool out_max_value) { this->primitive_->value.AsArgMax()->outMaxValue = out_max_value; } -void ArgMax::SetTopK(int top_k) { this->primitive_->value.AsArgMax()->topK = top_k; } -void ArgMax::SetKeepDims(bool keep_dims) { this->primitive_->value.AsArgMax()->keepDims = keep_dims; } -void ArgMax::SetAxisType(int axis_type) { this->primitive_->value.AsArgMax()->axisType = axis_type; } -int ArgMax::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitive error"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_ArgMax; - } - if (this->primitive_->value.type != schema::PrimitiveType_ArgMax) { - MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto argmax_attr = new (std::nothrow) schema::ArgMaxT(); - if (argmax_attr == nullptr) { - MS_LOG(ERROR) << "new primitive value.value error"; - return RET_ERROR; - } - if (prim.GetAttr("axis") != nullptr) { - argmax_attr->axis = static_cast<int32_t>(GetValue<int64_t>(prim.GetAttr("axis"))); - } - if (prim.GetAttr("keep_dims") != nullptr) { - argmax_attr->keepDims = static_cast<bool>(GetValue<bool>(prim.GetAttr("keep_dims"))); - } - argmax_attr->outMaxValue = false; - this->primitive_->value.value = argmax_attr; - } - return RET_OK; -} -#else -int ArgMax::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_ArgMax(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_ArgMax return nullptr"; - return RET_ERROR; - } - auto val_offset = - schema::CreateArgMax(*fbb, attr->axis(), attr->outMaxValue(), attr->topK(), attr->keepDims(), attr->axisType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ArgMax, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int ArgMax::GetAxis() const { return this->primitive_->value_as_ArgMax()->axis(); } -bool ArgMax::GetOutMaxValue() const { return this->primitive_->value_as_ArgMax()->outMaxValue(); } -int ArgMax::GetTopK() const { return this->primitive_->value_as_ArgMax()->topK(); } -bool ArgMax::GetKeepDims() const { return this->primitive_->value_as_ArgMax()->keepDims(); } -int ArgMax::GetAxisType() const { return this->primitive_->value_as_ArgMax()->axisType(); } - -PrimitiveC *ArgMaxCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<ArgMax>(primitive); } -Registry ArgMaxRegistry(schema::PrimitiveType_ArgMax, ArgMaxCreator); -#endif - -int ArgMax::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (inputs_.size() != kSingleNum || outputs_.size() > kDoubleNum) { - MS_LOG(ERROR) << "tensor number is error."; - return RET_ERROR; - } - - output->set_format(input->format()); - if (GetOutMaxValue() && outputs_.size() == kSingleNum) { - output->set_data_type(input->data_type()); - } else { - output->set_data_type(kNumberTypeInt32); - } - if (!infer_flag()) { - return RET_INFER_INVALID; - } - std::vector<int> output_shape(input->shape()); - auto input_shape_size = input->shape().size(); - auto axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis(); - if (axis >= input_shape_size || axis < 0) { - MS_LOG(ERROR) << "Invalid axis " << GetAxis() << ", input shape size: " << input_shape_size; - return RET_PARAM_INVALID; - } - if (GetTopK() == 1 && !GetKeepDims()) { - output_shape.erase(output_shape.begin() + axis); - } else { - output_shape[axis] = GetTopK(); - } - - output->set_shape(output_shape); - if (outputs_.size() == kDoubleNum) { - outputs_.at(1)->set_format(input->format()); - outputs_.at(1)->set_data_type(input->data_type()); - outputs_.at(1)->set_shape(output_shape); - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/argmax.h b/mindspore/lite/src/ops/argmax.h deleted file mode 100644 index d208c2b60a..0000000000 --- a/mindspore/lite/src/ops/argmax.h +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_ARG_MAX_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ARG_MAX_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class ArgMax : public PrimitiveC { - public: - ArgMax() = default; - ~ArgMax() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(ArgMax, PrimitiveC); - explicit ArgMax(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAxis(int axis); - void SetOutMaxValue(bool out_max_value); - void SetTopK(int top_k); - void SetKeepDims(bool keep_dims); - void SetAxisType(int axis_type); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetAxis() const; - bool GetOutMaxValue() const; - int GetTopK() const; - bool GetKeepDims() const; - int GetAxisType() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_ARG_MAX_H_ diff --git a/mindspore/lite/src/ops/argmin.cc b/mindspore/lite/src/ops/argmin.cc deleted file mode 100644 index cf9e7d5d4e..0000000000 --- a/mindspore/lite/src/ops/argmin.cc +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/argmin.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int ArgMin::GetAxis() const { return this->primitive_->value.AsArgMin()->axis; } -bool ArgMin::GetOutMaxValue() const { return this->primitive_->value.AsArgMin()->outMaxValue; } -int ArgMin::GetTopK() const { return this->primitive_->value.AsArgMin()->topK; } -bool ArgMin::GetKeepDims() const { return this->primitive_->value.AsArgMin()->keepDims; } -int ArgMin::GetAxisType() const { return this->primitive_->value.AsArgMin()->axisType; } - -void ArgMin::SetAxis(int axis) { this->primitive_->value.AsArgMin()->axis = axis; } -void ArgMin::SetOutMaxValue(bool out_max_value) { this->primitive_->value.AsArgMin()->outMaxValue = out_max_value; } -void ArgMin::SetTopK(int top_k) { this->primitive_->value.AsArgMin()->topK = top_k; } -void ArgMin::SetKeepDims(bool keep_dims) { this->primitive_->value.AsArgMin()->keepDims = keep_dims; } -void ArgMin::SetAxisType(int axis_type) { this->primitive_->value.AsArgMin()->axisType = axis_type; } - -int ArgMin::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_ArgMin; - } - if (this->primitive_->value.type != schema::PrimitiveType_ArgMin) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::ArgMinT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (prim.GetAttr("axis") != nullptr) { - attr->axis = static_cast<int32_t>(GetValue<int64_t>(prim.GetAttr("axis"))); - } - if (prim.GetAttr("keep_dims") != nullptr) { - attr->keepDims = static_cast<bool>(GetValue<bool>(prim.GetAttr("keep_dims"))); - } - attr->outMaxValue = false; - } - return RET_OK; -} - -#else -int ArgMin::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_ArgMin(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_ArgMin return nullptr"; - return RET_ERROR; - } - auto val_offset = - schema::CreateArgMin(*fbb, attr->axis(), attr->outMaxValue(), attr->topK(), attr->keepDims(), attr->axisType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ArgMin, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int ArgMin::GetAxis() const { return this->primitive_->value_as_ArgMin()->axis(); } -bool ArgMin::GetOutMaxValue() const { return this->primitive_->value_as_ArgMin()->outMaxValue(); } -int ArgMin::GetTopK() const { return this->primitive_->value_as_ArgMin()->topK(); } -bool ArgMin::GetKeepDims() const { return this->primitive_->value_as_ArgMin()->keepDims(); } -int ArgMin::GetAxisType() const { return this->primitive_->value_as_ArgMin()->axisType(); } - -PrimitiveC *ArgMinCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<ArgMin>(primitive); } -Registry ArgMinRegistry(schema::PrimitiveType_ArgMin, ArgMinCreator); -#endif - -int ArgMin::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (inputs_.size() != kSingleNum || outputs_.size() > kDoubleNum) { - MS_LOG(ERROR) << "tensor number is error."; - } - output->set_format(input->format()); - if (GetOutMaxValue() && outputs_.size() == kSingleNum) { - output->set_data_type(input->data_type()); - } else { - output->set_data_type(kNumberTypeInt32); - } - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input_shape_size = input->shape().size(); - auto axis = GetAxis() < 0 ? GetAxis() + input_shape_size : GetAxis(); - if (axis >= input_shape_size || axis < 0) { - MS_LOG(ERROR) << "Invalid axis " << GetAxis() << ", input shape size: " << input_shape_size; - return RET_PARAM_INVALID; - } - std::vector<int> output_shape(input->shape()); - if (GetTopK() == 1 && !GetKeepDims()) { - output_shape.erase(output_shape.begin() + axis); - } else { - output_shape[axis] = GetTopK(); - } - - output->set_shape(output_shape); - if (outputs_.size() == kDoubleNum) { - outputs_.at(1)->set_format(input->format()); - outputs_.at(1)->set_data_type(input->data_type()); - outputs_.at(1)->set_shape(output_shape); - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/argmin.h b/mindspore/lite/src/ops/argmin.h deleted file mode 100644 index 4a1ab9af12..0000000000 --- a/mindspore/lite/src/ops/argmin.h +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_ARG_MIN_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ARG_MIN_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class ArgMin : public PrimitiveC { - public: - ArgMin() = default; - ~ArgMin() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(ArgMin, PrimitiveC); - explicit ArgMin(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAxis(int axis); - void SetOutMaxValue(bool out_max_value); - void SetTopK(int top_k); - void SetKeepDims(bool keep_dims); - void SetAxisType(int axis_type); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetAxis() const; - bool GetOutMaxValue() const; - int GetTopK() const; - bool GetKeepDims() const; - int GetAxisType() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_ARG_MIN_H_ diff --git a/mindspore/lite/src/ops/arithmetic.cc b/mindspore/lite/src/ops/arithmetic.cc deleted file mode 100644 index 0c03bd6917..0000000000 --- a/mindspore/lite/src/ops/arithmetic.cc +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/arithmetic.h" -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" - -namespace mindspore { -namespace lite { - -int Arithmetic::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - if (inputs_.size() != kDoubleNum) { - MS_LOG(ERROR) << "The number of input must be " << kDoubleNum; - return RET_INPUT_TENSOR_ERROR; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "The number of output must be " << kSingleNum; - return RET_INPUT_TENSOR_ERROR; - } - auto input0 = inputs_[0]; - MS_ASSERT(input0 != nullptr); - auto input1 = inputs_[1]; - MS_ASSERT(input1 != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - auto input_shape0 = input0->shape(); - auto input_shape1 = input1->shape(); - auto format = input0->format(); - output->set_format(format); - output->set_data_type(input0->data_type()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - if (input_shape0.size() > 10 || input_shape1.size() > 10) { - int wrong_dim = input_shape0.size() > input_shape1.size() ? input_shape0.size() : input_shape1.size(); - MS_LOG(ERROR) << "Not support input dim: " << wrong_dim << ", The input dim must be less than 10"; - return RET_ERROR; - } - in_shape0_.resize(10); - in_shape1_.resize(10); - out_shape_.resize(10); - - ndim_ = input_shape0.size(); - if (input_shape0.size() < input_shape1.size()) { - ndim_ = input_shape1.size(); - auto fill_dim_num = input_shape1.size() - input_shape0.size(); - int j = 0; - for (size_t i = 0; i < input_shape1.size(); i++) { - if (i < fill_dim_num) { - in_shape0_[i] = 1; - } else { - in_shape0_[i] = input_shape0[j++]; - } - in_shape1_[i] = input_shape1[i]; - } - format = input0->format(); - } else if (input_shape0.size() > input_shape1.size()) { - ndim_ = input_shape0.size(); - auto fill_dim_num = input_shape0.size() - input_shape1.size(); - int j = 0; - for (size_t i = 0; i < input_shape0.size(); i++) { - if (i < fill_dim_num) { - in_shape1_[i] = 1; - } else { - in_shape1_[i] = input_shape1[j++]; - } - in_shape0_[i] = input_shape0[i]; - } - } else { - for (size_t i = 0; i < input_shape0.size(); i++) { - in_shape1_[i] = input_shape1[i]; - in_shape0_[i] = input_shape0[i]; - } - } - - std::vector<int> output_shape; - for (int i = 0; i < ndim_; i++) { - if (in_shape0_[i] != in_shape1_[i]) { - if (in_shape0_[i] == 1) { - out_shape_[i] = in_shape1_[i]; - } else if (in_shape1_[i] == 1) { - out_shape_[i] = in_shape0_[i]; - } else { - MS_LOG(ERROR) << "shapes of input tensors can not be broadCasted"; - return -1; - } - broadcasting_ = true; - } else { - out_shape_[i] = in_shape0_[i]; - } - output_shape.push_back(out_shape_[i]); - } - - output->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/arithmetic.h b/mindspore/lite/src/ops/arithmetic.h deleted file mode 100644 index 6c5c6f807a..0000000000 --- a/mindspore/lite/src/ops/arithmetic.h +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" -#include "nnacl/arithmetic.h" - -namespace mindspore { -namespace lite { -class Arithmetic : public PrimitiveC { - public: - Arithmetic() = default; - ~Arithmetic() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Arithmetic, PrimitiveC); - explicit Arithmetic(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - // explicit Arithmetic(schema::Primitive *primitive) : PrimitiveC(primitive) {} - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override { - return RET_ERROR; - } -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - bool Broadcasting() const { return this->broadcasting_; } - int NDims() const { return this->ndim_; } - std::vector<int> InShape0() const { return this->in_shape0_; } - std::vector<int> InShape1() const { return this->in_shape1_; } - std::vector<int> OutputShape() const { return this->out_shape_; } - - protected: - bool broadcasting_ = false; - int ndim_ = 0; - std::vector<int> in_shape0_; - std::vector<int> in_shape1_; - std::vector<int> out_shape_; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_H_ diff --git a/mindspore/lite/src/ops/arithmetic_compare.cc b/mindspore/lite/src/ops/arithmetic_compare.cc deleted file mode 100644 index 57c3db7f74..0000000000 --- a/mindspore/lite/src/ops/arithmetic_compare.cc +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/arithmetic_compare.h" - -namespace mindspore { -namespace lite { - -int ArithmeticCompare::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - auto res = Arithmetic::InferShape(inputs_, outputs_); - auto output = outputs_.front(); - output->set_data_type(TypeId::kNumberTypeBool); - return res; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/arithmetic_compare.h b/mindspore/lite/src/ops/arithmetic_compare.h deleted file mode 100644 index 4917a61792..0000000000 --- a/mindspore/lite/src/ops/arithmetic_compare.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_COMPARE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_COMPARE_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic.h" - -namespace mindspore { -namespace lite { -class ArithmeticCompare : public Arithmetic { - public: - ArithmeticCompare() = default; - ~ArithmeticCompare() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(ArithmeticCompare, Arithmetic); - explicit ArithmeticCompare(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_COMPARE_H_ diff --git a/mindspore/lite/src/ops/arithmetic_grad.cc b/mindspore/lite/src/ops/arithmetic_grad.cc deleted file mode 100644 index 58be418faa..0000000000 --- a/mindspore/lite/src/ops/arithmetic_grad.cc +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/arithmetic_grad.h" -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" - -namespace mindspore { -namespace lite { -int ArithmeticGrad::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - if (inputs_.size() != 3) { - MS_LOG(ERROR) << "The number of input must be 3"; - return RET_ERROR; - } - if (outputs_.size() != 2) { - MS_LOG(ERROR) << "The number of output must be 2"; - return RET_ERROR; - } - auto dy = inputs_[0]; - auto x1 = inputs_[1]; - auto x2 = inputs_[2]; - auto dx1 = outputs_[0]; - auto dx2 = outputs_[1]; - - MS_ASSERT(dy != nullptr); - MS_ASSERT(x1 != nullptr); - MS_ASSERT(x2 != nullptr); - MS_ASSERT(dx1 != nullptr); - MS_ASSERT(dx2 != nullptr); - - if ((Type() == schema::PrimitiveType_MaximumGrad) || (Type() == schema::PrimitiveType_MinimumGrad)) { - x1 = inputs_[0]; - x2 = inputs_[1]; - dy = inputs_[2]; - } - - auto inShape0 = x1->shape(); - auto inShape1 = x2->shape(); - auto outShape = dy->shape(); - - if ((Type() == schema::PrimitiveType_AddGrad) || (Type() == schema::PrimitiveType_SubGrad) || - (Type() == schema::PrimitiveType_MaximumGrad) || (Type() == schema::PrimitiveType_MinimumGrad)) { - ndim_ = outShape.size(); - x1_shape_.resize(ndim_); - x2_shape_.resize(ndim_); - dy_shape_.resize(ndim_); - auto fillDimNum0 = outShape.size() - inShape0.size(); - auto fillDimNum1 = outShape.size() - inShape1.size(); - int j0 = 0; - int j1 = 0; - for (unsigned int i = 0; i < outShape.size(); i++) { - x1_shape_[i] = (i < fillDimNum0) ? 1 : inShape0[j0++]; - x2_shape_[i] = (i < fillDimNum1) ? 1 : inShape1[j1++]; - dy_shape_[i] = outShape[i]; - } - } else { - if (dx1->ElementsNum() < dx2->ElementsNum()) { - ndim_ = inShape1.size(); - x1_shape_.resize(ndim_); - x2_shape_.resize(ndim_); - dy_shape_.resize(ndim_); - auto fillDimNum = inShape1.size() - inShape0.size(); // This will not work for batch! - int j = 0; - for (unsigned int i = 0; i < inShape1.size(); i++) { - if (i < fillDimNum) { - x2_shape_[i] = 1; - } else { - x2_shape_[i] = inShape0[j++]; - } - x1_shape_[i] = inShape1[i]; - dy_shape_[i] = outShape[i]; - } - } else if (dx2->ElementsNum() < dx1->ElementsNum()) { // if (inShape0.size() > inShape1.size()) - ndim_ = inShape0.size(); - x1_shape_.resize(ndim_); - x2_shape_.resize(ndim_); - dy_shape_.resize(ndim_); - broadcasting_ = true; - int j = 0; - auto fillDimNum = inShape0.size() - inShape1.size(); - for (unsigned int i = 0; i < inShape0.size(); i++) { - if (i < fillDimNum) { - x2_shape_[i] = 1; - } else { - x2_shape_[i] = inShape1[j++]; - } - x1_shape_[i] = inShape0[i]; - dy_shape_[i] = outShape[i]; - } - } else { - broadcasting_ = false; - for (unsigned int i = 0; i < inShape0.size(); i++) { - x2_shape_[i] = inShape1[i]; - x1_shape_[i] = inShape0[i]; - dy_shape_[i] = outShape[i]; - } - } - } - - dx1->set_shape(x1->shape()); - dx2->set_shape(x2->shape()); - dx1->set_data_type(dy->data_type()); - dx2->set_data_type(dy->data_type()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/arithmetic_grad.h b/mindspore/lite/src/ops/arithmetic_grad.h deleted file mode 100644 index d4a1cf666d..0000000000 --- a/mindspore/lite/src/ops/arithmetic_grad.h +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_ARITHMETIC_GRAD_H_ -#define MINDSPORE_LITE_SRC_OPS_ARITHMETIC_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" -#include "nnacl/arithmetic_self_parameter.h" - -namespace mindspore { -namespace lite { -class ArithmeticGrad : public PrimitiveC { - public: - ArithmeticGrad() = default; - ~ArithmeticGrad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(ArithmeticGrad, PrimitiveC); - explicit ArithmeticGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - // explicit ArithmeticGrad(const schema::Primitive &primitive) : PrimitiveC(primitive) {} - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override { - return RET_ERROR; - } -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - bool Broadcasting() { return this->broadcasting_; } - int NDims() { return this->ndim_; } - std::vector<int> dyShape() { return this->dy_shape_; } - std::vector<int> x1Shape() { return this->x1_shape_; } - std::vector<int> x2Shape() { return this->x2_shape_; } - - protected: - bool broadcasting_ = false; - int ndim_; - std::vector<int> dy_shape_; - std::vector<int> x1_shape_; - std::vector<int> x2_shape_; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_ARITHMETIC_GRAD_H_ diff --git a/mindspore/lite/src/ops/arithmetic_self.cc b/mindspore/lite/src/ops/arithmetic_self.cc deleted file mode 100644 index bc8c2a5831..0000000000 --- a/mindspore/lite/src/ops/arithmetic_self.cc +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/arithmetic_self.h" -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { - -int ArithmeticSelf::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_format(input->format()); - output->set_data_type(input->data_type()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - output->set_shape(input->shape()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/arithmetic_self.h b/mindspore/lite/src/ops/arithmetic_self.h deleted file mode 100644 index dafba50a81..0000000000 --- a/mindspore/lite/src/ops/arithmetic_self.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_SELF_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_SELF_H_ - -#include <vector> -#include "src/ops/primitive_c.h" -#include "nnacl/arithmetic_self_parameter.h" - -namespace mindspore { -namespace lite { -class ArithmeticSelf : public PrimitiveC { - public: - ArithmeticSelf() = default; - ~ArithmeticSelf() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(ArithmeticSelf, PrimitiveC); - explicit ArithmeticSelf(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - // explicit ArithmeticSelf(schema::Primitive *primitive) : PrimitiveC(primitive) {} - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override { - return RET_ERROR; - } -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -OpParameter *PopulateArithmeticSelf(const mindspore::lite::PrimitiveC *primitive); -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_ARITHMETIC_SELF_H_ diff --git a/mindspore/lite/src/ops/assert_op.cc b/mindspore/lite/src/ops/assert_op.cc deleted file mode 100644 index 83074c1b7f..0000000000 --- a/mindspore/lite/src/ops/assert_op.cc +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/assert_op.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int AssertOP::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Assert; - } - if (this->primitive_->value.type != schema::PrimitiveType_Assert) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::AssertT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - PopulaterQuantParam(prim, inputs); - return RET_OK; -} - -#else -int AssertOP::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Assert(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Assert return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateAssert(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Assert, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *AssertCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<AssertOP>(primitive); } -Registry AssertRegistry(schema::PrimitiveType_Assert, AssertCreator); -#endif - -int AssertOP::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { return RET_OK; } -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/assert_op.h b/mindspore/lite/src/ops/assert_op.h deleted file mode 100644 index ba0399e07d..0000000000 --- a/mindspore/lite/src/ops/assert_op.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_SRC_OPS_ASSERT_OP_H_ -#define LITE_MINDSPORE_LITE_SRC_OPS_ASSERT_OP_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class AssertOP : public PrimitiveC { - public: - AssertOP() = default; - ~AssertOP() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(AssertOP, PrimitiveC); - explicit AssertOP(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_SRC_OPS_ASSERT_OP_H_ diff --git a/mindspore/lite/src/ops/assign.cc b/mindspore/lite/src/ops/assign.cc deleted file mode 100644 index 9facccd921..0000000000 --- a/mindspore/lite/src/ops/assign.cc +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/assign.h" -#include <memory> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Assign::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Assign; - } - if (this->primitive_->value.type != schema::PrimitiveType_Assign) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::AssignT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int Assign::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Assign(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Assign return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateAssign(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Assign, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *AssignCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Assign>(primitive); } -Registry AssignRegistry(schema::PrimitiveType_Assign, AssignCreator); -#endif - -int Assign::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - if (2 != inputs.size()) { - MS_LOG(ERROR) << "Assign should have at least 5 input tensors"; - return RET_ERROR; - } - - if (inputs.at(0)->ElementsNum() != inputs.at(1)->ElementsNum()) { - MS_LOG(ERROR) << "error input data size!"; - return RET_ERROR; - } - - if (!outputs.empty()) { - auto *out = outputs.front(); - MS_ASSERT(out != nullptr); - out->set_data_type(inputs.at(0)->data_type()); - out->set_format(inputs.at(0)->format()); - out->set_shape({1}); - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/assign.h b/mindspore/lite/src/ops/assign.h deleted file mode 100644 index e53ac0a636..0000000000 --- a/mindspore/lite/src/ops/assign.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_ASSIGN_H_ -#define MINDSPORE_LITE_SRC_OPS_ASSIGN_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Assign : public PrimitiveC { - public: - Assign() = default; - ~Assign() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Assign, PrimitiveC); - explicit Assign(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_ASSIGN_H_ diff --git a/mindspore/lite/src/ops/assign_add.cc b/mindspore/lite/src/ops/assign_add.cc deleted file mode 100644 index 6d77708ad1..0000000000 --- a/mindspore/lite/src/ops/assign_add.cc +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/assign_add.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int AssignAdd::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitive error"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_AssignAdd; - } - if (this->primitive_->value.type != schema::PrimitiveType_AssignAdd) { - MS_LOG(ERROR) << "PrimitiveType_AssignAdd primitive value type : " - << schema::EnumNamePrimitiveType(primitive_->value.type) << "is not equal" - << schema::EnumNamePrimitiveType(schema::PrimitiveType_AssignAdd); - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::AssignAddT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int AssignAdd::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_AssignAdd(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_AssignAdd return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateAssignAdd(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_AssignAdd, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *AssignAddCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<AssignAdd>(primitive); -} -Registry AssignAddRegistry(schema::PrimitiveType_AssignAdd, AssignAddCreator); -#endif - -int AssignAdd::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - Tensor *x = inputs_.at(0); - Tensor *y = inputs_.at(1); - Tensor *out = outputs_.at(0); - std::vector<int> x_shape = x->shape(); - if (x->data_type() != y->data_type()) { - MS_LOG(ERROR) << "no matched shape of x and y"; - return RET_ERROR; - } - std::vector<int> output_shape(x_shape.size()); - for (size_t i = 0; i < x_shape.size(); i++) { - output_shape[i] = x_shape[i]; - } - out->set_shape(output_shape); - out->set_format(x->format()); - out->set_data_type(x->data_type()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/assign_add.h b/mindspore/lite/src/ops/assign_add.h deleted file mode 100644 index 6e0e94edab..0000000000 --- a/mindspore/lite/src/ops/assign_add.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" -#ifndef LITE_SRC_OPS_ASSIGN_ADD_H_ -#define LITE_SRC_OPS_ASSIGN_ADD_H_ -namespace mindspore { -namespace lite { -class AssignAdd : public PrimitiveC { - public: - AssignAdd() = default; - ~AssignAdd() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(AssignAdd, PrimitiveC); - explicit AssignAdd(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore -#endif // LITE_SRC_OPS_ASSIGN_ADD_H_ diff --git a/mindspore/lite/src/ops/audio_spectrogram.cc b/mindspore/lite/src/ops/audio_spectrogram.cc deleted file mode 100644 index 6adce58037..0000000000 --- a/mindspore/lite/src/ops/audio_spectrogram.cc +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/audio_spectrogram.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int AudioSpectrogram::GetWindowSize() const { return this->primitive_->value.AsAudioSpectrogram()->windowSize; } -int AudioSpectrogram::GetStride() const { return this->primitive_->value.AsAudioSpectrogram()->stride; } -bool AudioSpectrogram::GetMagSquare() const { return this->primitive_->value.AsAudioSpectrogram()->magSquare; } - -#else -int AudioSpectrogram::GetWindowSize() const { return this->primitive_->value_as_AudioSpectrogram()->windowSize(); } -int AudioSpectrogram::GetStride() const { return this->primitive_->value_as_AudioSpectrogram()->stride(); } -bool AudioSpectrogram::GetMagSquare() const { return this->primitive_->value_as_AudioSpectrogram()->magSquare(); } -int AudioSpectrogram::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_AudioSpectrogram(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Add return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateAudioSpectrogram(*fbb, attr->windowSize(), attr->stride(), attr->magSquare()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_AudioSpectrogram, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *AudioSpectrogramCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<AudioSpectrogram>(primitive); -} -Registry AudioSpectrogramRegistry(schema::PrimitiveType_AudioSpectrogram, AudioSpectrogramCreator); -#endif -int AudioSpectrogram::Log2Ceil(uint32_t length) { - if (length == 0) { - return -1; - } - int floor = 0; - for (int i = 4; i >= 0; --i) { - const int shift = (1 << i); - uint32_t tmp = length >> shift; - if (tmp != 0) { - length = tmp; - floor += shift; - } - } - return length == (length & ~(length - 1)) ? floor : floor + 1; -} -uint32_t AudioSpectrogram::GetFftLength(uint32_t length) { - int shift = Log2Ceil(length); - return 1 << shift; -} -int AudioSpectrogram::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input_shape = input->shape(); - if (input_shape.size() != 2) { - MS_LOG(ERROR) << "input shape is error, which need to be 2 dimensions"; - return RET_ERROR; - } - if (GetWindowSize() < 2) { - MS_LOG(ERROR) << "window size is too short, now is " << GetWindowSize(); - return RET_ERROR; - } - if (GetStride() < 1) { - MS_LOG(ERROR) << "stride must be positive, now is " << GetStride(); - return RET_ERROR; - } - std::vector<int> output_shape(3); - output_shape[0] = input_shape[1]; - // output height - int sample_sub_window = input_shape[0] - GetWindowSize(); - output_shape[1] = sample_sub_window < 0 ? 0 : 1 + sample_sub_window / GetStride(); - // compute fft length - int fft_length = GetFftLength(GetWindowSize()); - output_shape[2] = fft_length / 2 + 1; - outputs_.front()->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/audio_spectrogram.h b/mindspore/lite/src/ops/audio_spectrogram.h deleted file mode 100644 index e996543ad3..0000000000 --- a/mindspore/lite/src/ops/audio_spectrogram.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_AUDIO_SPECTROGRAM_H_ -#define LITE_MINDSPORE_LITE_C_OPS_AUDIO_SPECTROGRAM_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class AudioSpectrogram : public PrimitiveC { - public: - AudioSpectrogram() = default; - ~AudioSpectrogram() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(AudioSpectrogram, PrimitiveC); - explicit AudioSpectrogram(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetWindowSize(int window_size) { this->primitive_->value.AsAudioSpectrogram()->windowSize = window_size; } - void SetStride(int stride) { this->primitive_->value.AsAudioSpectrogram()->stride = stride; } - void SetMagSquare(bool mag_square) { this->primitive_->value.AsAudioSpectrogram()->magSquare = mag_square; } -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int GetWindowSize() const; - int GetStride() const; - bool GetMagSquare() const; - int Log2Ceil(uint32_t length); - uint32_t GetFftLength(uint32_t length); - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_AUDIO_SPECTROGRAM_H_ diff --git a/mindspore/lite/src/ops/batch_norm.cc b/mindspore/lite/src/ops/batch_norm.cc deleted file mode 100644 index 3374ef1123..0000000000 --- a/mindspore/lite/src/ops/batch_norm.cc +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/batch_norm.h" -#include <memory> -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float BatchNorm::GetEpsilon() const { return this->primitive_->value.AsBatchNorm()->epsilon; } - -void BatchNorm::SetEpsilon(float epsilon) { this->primitive_->value.AsBatchNorm()->epsilon = epsilon; } - -int BatchNorm::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_FusedBatchNorm; - } - if (this->primitive_->value.type != schema::PrimitiveType_FusedBatchNorm) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::FusedBatchNormT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new FusedBatchNormT failed"; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - attr->epsilon = GetValue<float>(prim.GetAttr("epsilon")); - this->primitive_->value.value = attr; - } - return RET_OK; -} - -#else -int BatchNorm::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateBatchNorm(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_BatchNorm, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -float BatchNorm::GetEpsilon() const { return this->primitive_->value_as_BatchNorm()->epsilon(); } - -PrimitiveC *BatchNormCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<BatchNorm>(primitive); -} -Registry BatchNormRegistry(schema::PrimitiveType_BatchNorm, BatchNormCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/batch_norm.h b/mindspore/lite/src/ops/batch_norm.h deleted file mode 100644 index f4f98648b4..0000000000 --- a/mindspore/lite/src/ops/batch_norm.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_BATCH_NORM_H_ -#define LITE_MINDSPORE_LITE_C_OPS_BATCH_NORM_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class BatchNorm : public PrimitiveC { - public: - BatchNorm() = default; - ~BatchNorm() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(BatchNorm, PrimitiveC); - explicit BatchNorm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - void SetEpsilon(float epsilon); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - float GetEpsilon() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_BATCH_NORM_H_ diff --git a/mindspore/lite/src/ops/batch_to_space.cc b/mindspore/lite/src/ops/batch_to_space.cc deleted file mode 100644 index da7dcc3316..0000000000 --- a/mindspore/lite/src/ops/batch_to_space.cc +++ /dev/null @@ -1,231 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/batch_to_space.h" -#include "src/common/common.h" -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> BatchToSpace::GetBlockShape() const { return this->primitive_->value.AsBatchToSpace()->blockShape; } -std::vector<int> BatchToSpace::GetCrops() const { return this->primitive_->value.AsBatchToSpace()->crops; } - -void BatchToSpace::SetBlockShape(const std::vector<int> &block_shape) { - this->primitive_->value.AsBatchToSpace()->blockShape = block_shape; -} -void BatchToSpace::SetCrops(const std::vector<int> &crops) { this->primitive_->value.AsBatchToSpace()->crops = crops; } - -#else -int BatchToSpace::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_BatchToSpace(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_BatchToSpace return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> blockShape; - if (attr->blockShape() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->blockShape()->size()); i++) { - blockShape.push_back(attr->blockShape()->data()[i]); - } - } - std::vector<int32_t> crops; - if (attr->crops() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->crops()->size()); i++) { - crops.push_back(attr->crops()->data()[i]); - } - } - auto val_offset = schema::CreateBatchToSpaceDirect(*fbb, &blockShape, &crops); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_BatchToSpace, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -std::vector<int> BatchToSpace::GetBlockShape() const { - auto fb_vector = this->primitive_->value_as_BatchToSpace()->blockShape(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -std::vector<int> BatchToSpace::GetCrops() const { - auto fb_vector = this->primitive_->value_as_BatchToSpace()->crops(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} - -PrimitiveC *BatchToSpaceCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<BatchToSpace>(primitive); -} -Registry BatchToSpaceRegistry(schema::PrimitiveType_BatchToSpace, BatchToSpaceCreator); -#endif - -namespace { -constexpr int kBatchToSpaceOutputNum = 1; -constexpr int kBatchToSpaceOneInput = 1; -constexpr int kBatchToSpaceThreeInput = 3; -constexpr int kBlockShapeSize = 2; -constexpr int kCropsSize = 4; -} // namespace - -int BatchToSpace::SetOutputShapeFromParam(const std::vector<lite::Tensor *> inputs, - std::vector<lite::Tensor *> outputs) { - auto input_shape = inputs[0]->shape(); - if (input_shape.size() != kQuadrupleNum) { - MS_LOG(ERROR) << "input shape dimension size should == " << kQuadrupleNum; - return RET_PARAM_INVALID; - } - auto block_shape = GetBlockShape(); - if (block_shape.size() != kBlockShapeSize) { - MS_LOG(ERROR) << "Block shape size should be " << kBlockShapeSize; - return RET_PARAM_INVALID; - } - auto crops = GetCrops(); - if (crops.size() != kCropsSize) { - MS_LOG(ERROR) << "Crops size should be " << kCropsSize; - return RET_PARAM_INVALID; - } - mul_block_shape_ = 1; - - for (size_t i = 0; i < kBlockShapeSize; ++i) { - if (block_shape[i] <= 0) { - MS_LOG(ERROR) << "Input block_shape should > 0!"; - return RET_PARAM_INVALID; - } - if (input_shape[NHWC_N] % block_shape[i]) { - MS_LOG(ERROR) << "Dimension n " << input_shape[NHWC_N] << " can not divide block_shape[" << i << "] " - << block_shape[i]; - return 1; - } - mul_block_shape_ *= block_shape[i]; - } - - if (input_shape[NHWC_N] < mul_block_shape_) { - MS_LOG(ERROR) << "Dimension n " << input_shape[NHWC_N] << " < product of block shape!"; - return RET_PARAM_INVALID; - } - for (size_t i = 0; i < kCropsSize; ++i) { - if (crops[i] < 0) { - MS_LOG(ERROR) << "Input crops should >= 0"; - return RET_PARAM_INVALID; - } - } - std::vector<int32_t> output_shape(input_shape.size()); - output_shape[NHWC_N] = input_shape[NHWC_N] / mul_block_shape_; - output_shape[NHWC_H] = input_shape[NHWC_H] * block_shape[0] - crops[0] - crops[1]; - output_shape[NHWC_W] = input_shape[NHWC_W] * block_shape[1] - crops[2] - crops[3]; - if (input_shape.size() > 3) { - output_shape[NHWC_C] = input_shape[NHWC_C]; - } - outputs[0]->set_shape(output_shape); - return RET_OK; -} - -int BatchToSpace::SetOutputShapeFromInput(const std::vector<lite::Tensor *> inputs, - std::vector<lite::Tensor *> outputs) { - auto input_shape = inputs[0]->shape(); - if (input_shape.size() != kQuadrupleNum) { - MS_LOG(ERROR) << "input shape dimension size should == " << kQuadrupleNum; - return RET_PARAM_INVALID; - } - auto block_shape_data = inputs[1]->data_c(); - auto crops_data = inputs[2]->data_c(); - auto block_shape = static_cast<int *>(block_shape_data); - auto crops = static_cast<int *>(crops_data); - if (inputs[1]->ElementsNum() != kBlockShapeSize) { - MS_LOG(ERROR) << "Block shape size should be " << kBlockShapeSize; - return RET_PARAM_INVALID; - } - if (inputs[2]->ElementsNum() != kCropsSize) { - MS_LOG(ERROR) << "Crops size should be " << kCropsSize; - return RET_PARAM_INVALID; - } - mul_block_shape_ = 1; - - for (size_t i = 0; i < kBlockShapeSize; ++i) { - if (block_shape[i] <= 0) { - MS_LOG(ERROR) << "Input block_shape should > 0!"; - return RET_PARAM_INVALID; - } - if (input_shape[NHWC_N] % block_shape[i]) { - MS_LOG(ERROR) << "Dimension n " << input_shape[NHWC_N] << " can not divide block_shape[" << i << "] " - << block_shape[i]; - return 1; - } - mul_block_shape_ *= block_shape[i]; - } - - if (input_shape[NHWC_N] < mul_block_shape_) { - MS_LOG(ERROR) << "Dimension n " << input_shape[NHWC_N] << " < product of block shape!"; - return RET_PARAM_INVALID; - } - for (size_t i = 0; i < kCropsSize; ++i) { - if (crops[i] < 0) { - MS_LOG(ERROR) << "Input crops should >= 0"; - return RET_PARAM_INVALID; - } - } - std::vector<int32_t> output_shape(input_shape.size()); - output_shape[NHWC_N] = input_shape[NHWC_N] / mul_block_shape_; - output_shape[NHWC_H] = input_shape[NHWC_H] * block_shape[0] - crops[0] - crops[1]; - output_shape[NHWC_W] = input_shape[NHWC_W] * block_shape[1] - crops[2] - crops[3]; - if (input_shape.size() > 3) { - output_shape[NHWC_C] = input_shape[NHWC_C]; - } - outputs[0]->set_shape(output_shape); - return RET_OK; -} - -int BatchToSpace::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - MS_ASSERT(this->primitive_ != nullptr); - if (outputs.size() != kBatchToSpaceOutputNum || - (inputs.size() != kBatchToSpaceOneInput && inputs.size() != kBatchToSpaceThreeInput)) { - MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); - return RET_PARAM_INVALID; - } - - auto input = inputs.at(0); - if (input->format() != schema::Format::Format_NHWC) { - MS_LOG(ERROR) << "batch_to_space only support NHWC now!"; - return RET_FORMAT_ERR; - } - outputs[0]->set_format(input->format()); - outputs[0]->set_data_type(input->data_type()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - if (inputs.size() == kBatchToSpaceOneInput) { - auto ret = SetOutputShapeFromParam(inputs, outputs); - return ret; - } - if (inputs.size() == kBatchToSpaceThreeInput) { - if (inputs[0]->data_c() == nullptr) { - return RET_INFER_INVALID; - } - MS_ASSERT(inputs[1]->data_c() != nullptr); - MS_ASSERT(inputs[2]->data_c() != nullptr); - auto ret = SetOutputShapeFromInput(inputs, outputs); - return ret; - } - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/batch_to_space.h b/mindspore/lite/src/ops/batch_to_space.h deleted file mode 100644 index aa8a2433b8..0000000000 --- a/mindspore/lite/src/ops/batch_to_space.h +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_BATCH_TO_SPACE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_BATCH_TO_SPACE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class BatchToSpace : public PrimitiveC { - public: - BatchToSpace() = default; - ~BatchToSpace() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(BatchToSpace, PrimitiveC); - explicit BatchToSpace(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetBlockShape(const std::vector<int> &block_shape); - void SetCrops(const std::vector<int> &crops); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - std::vector<int> GetBlockShape() const; - std::vector<int> GetCrops() const; - - private: - int SetOutputShapeFromParam(const std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs); - int SetOutputShapeFromInput(const std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs); - int mul_block_shape_; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_BATCH_TO_SPACE_H_ diff --git a/mindspore/lite/src/ops/bias_add.cc b/mindspore/lite/src/ops/bias_add.cc deleted file mode 100644 index cdb0b56f36..0000000000 --- a/mindspore/lite/src/ops/bias_add.cc +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/bias_add.h" -#include <memory> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int BiasAdd::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_BiasAdd; - } - if (this->primitive_->value.type != schema::PrimitiveType_BiasAdd) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::BiasAddT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - if (prim.GetAttr("axis") == nullptr) { - MS_LOG(INFO) << "BiasAdd's attr axis is set to default"; - attr->axis = {1}; - } else { - attr->axis = CastToInt(prim.GetAttr("axis")); - } - this->primitive_->value.value = attr; - } - return RET_OK; -} - -#else -int BiasAdd::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_BiasAdd(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_BiasAdd return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateBiasAddDirect(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_BiasAdd, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *BiasAddCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<BiasAdd>(primitive); } -Registry BiasAddRegistry(schema::PrimitiveType_BiasAdd, BiasAddCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/bias_add.h b/mindspore/lite/src/ops/bias_add.h deleted file mode 100644 index d1cdf391e2..0000000000 --- a/mindspore/lite/src/ops/bias_add.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_BIAS_ADD_H_ -#define LITE_MINDSPORE_LITE_C_OPS_BIAS_ADD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class BiasAdd : public PrimitiveC { - public: - BiasAdd() = default; - ~BiasAdd() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(BiasAdd, PrimitiveC); - explicit BiasAdd(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_BIAS_ADD_H_ diff --git a/mindspore/lite/src/ops/bias_grad.cc b/mindspore/lite/src/ops/bias_grad.cc deleted file mode 100644 index 162c807c05..0000000000 --- a/mindspore/lite/src/ops/bias_grad.cc +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/bias_grad.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int BiasGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_BiasGrad; - } - if (this->primitive_->value.type != schema::PrimitiveType_BiasGrad) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::BiasGradT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - } - return RET_OK; -} -#else -int BiasGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_BiasGrad(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_BiasGrad return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateBiasGrad(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_BiasGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *BiasGradCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<BiasGrad>(primitive); -} -Registry BiasGradRegistry(schema::PrimitiveType_BiasGrad, BiasGradCreator); -#endif - -int BiasGrad::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - if (inputs.size() != 1) { - MS_LOG(ERROR) << "BiasGrad should have one input"; - return RET_ERROR; - } - if (outputs.size() != 1) { - MS_LOG(ERROR) << "BiasGrad should have one output"; - return RET_ERROR; - } - auto *in0 = inputs.front(); - auto *out = outputs.front(); - MS_ASSERT(in0 != nullptr); - MS_ASSERT(out != nullptr); - - auto inshape = in0->shape(); - int ndim = inshape.size(); - for (int i = 0; i < ndim - 1; i++) { - inshape[i] = 1; - } - out->set_shape(inshape); - out->set_data_type(in0->data_type()); - out->set_format(in0->format()); - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/bias_grad.h b/mindspore/lite/src/ops/bias_grad.h deleted file mode 100644 index 44df55a8cd..0000000000 --- a/mindspore/lite/src/ops/bias_grad.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_BIAS_GRAD_H_ -#define MINDSPORE_LITE_SRC_OPS_BIAS_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class BiasGrad : public PrimitiveC { - public: - BiasGrad() = default; - ~BiasGrad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(BiasGrad, PrimitiveC); - explicit BiasGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAxis(const std::vector<int> &axis); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) override; - std::vector<int> GetAxis() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_BIAS_GRAD_H_ diff --git a/mindspore/lite/src/ops/binary_cross_entropy.cc b/mindspore/lite/src/ops/binary_cross_entropy.cc deleted file mode 100644 index da06fff538..0000000000 --- a/mindspore/lite/src/ops/binary_cross_entropy.cc +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <string> -#include "src/ops/binary_cross_entropy.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int BinaryCrossEntropy::GetReduction() const { return this->primitive_->value.AsBinaryCrossEntropy()->reduction; } - -int BinaryCrossEntropy::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitive error"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_BinaryCrossEntropy; - } - if (this->primitive_->value.type != schema::PrimitiveType_BinaryCrossEntropy) { - MS_LOG(ERROR) << "PrimitiveType_BinaryCrossEntropy primitive value type : " - << schema::EnumNamePrimitiveType(primitive_->value.type) << "is not equal" - << schema::EnumNamePrimitiveType(schema::PrimitiveType_BinaryCrossEntropy); - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - schema::BinaryCrossEntropyT *attr = new (std::nothrow) schema::BinaryCrossEntropyT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new binary cross entropy attr failed!"; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - // default is mean - string reduction = "mean"; - if (prim.GetAttr("reduction") == nullptr) { - MS_LOG(ERROR) << "get reduction failed!"; - delete this->primitive_; - delete attr; - this->primitive_ = nullptr; - attr = nullptr; - return RET_ERROR; - } else { - reduction = GetValue<string>(prim.GetAttr("reduction")); - } - if (reduction == "none") { - attr->reduction = 0; - } else if (reduction == "sum") { - attr->reduction = 2; - } else { - // default is mean - attr->reduction = 1; - } - this->primitive_->value.value = attr; - } - - return RET_OK; -} -#else -int BinaryCrossEntropy::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_BinaryCrossEntropy(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_BinaryCrossEntropy return nullptr"; - return RET_ERROR; - } - int reduction = attr->reduction(); - auto val_offset = schema::CreateBinaryCrossEntropy(*fbb, reduction); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_BinaryCrossEntropy, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -int BinaryCrossEntropy::GetReduction() const { return this->primitive_->value_as_BinaryCrossEntropy()->reduction(); } - -PrimitiveC *BinaryCrossEntropyCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<BinaryCrossEntropy>(primitive); -} -Registry BinaryCrossEntropyRegistry(schema::PrimitiveType_BinaryCrossEntropy, BinaryCrossEntropyCreator); -#endif -int BinaryCrossEntropy::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - Tensor *x = inputs_.at(0); - Tensor *out = outputs_.at(0); - out->set_format(x->format()); - out->set_data_type(x->data_type()); - int reduction = GetReduction(); - if (reduction == 1 || reduction == 2) { - out->set_shape({1}); - } else { - std::vector<int> x_shape = x->shape(); - std::vector<int> output_shape(x_shape.size()); - output_shape.assign(x_shape.begin(), x_shape.end()); - out->set_shape(output_shape); - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/binary_cross_entropy.h b/mindspore/lite/src/ops/binary_cross_entropy.h deleted file mode 100644 index c9ad936770..0000000000 --- a/mindspore/lite/src/ops/binary_cross_entropy.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -#ifndef LITE_SRC_OPS_BINARYCROSSENTROPY_H_ -#define LITE_SRC_OPS_BINARYCROSSENTROPY_H_ -namespace mindspore { -namespace lite { -class BinaryCrossEntropy : public PrimitiveC { - public: - BinaryCrossEntropy() = default; - ~BinaryCrossEntropy() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(BinaryCrossEntropy, PrimitiveC); - - explicit BinaryCrossEntropy(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - - int GetReduction() const; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; - - int GetReduction() const; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore -#endif // LITE_SRC_OPS_BINARYCROSSENTROPY_H_ diff --git a/mindspore/lite/src/ops/binary_cross_entropy_grad.cc b/mindspore/lite/src/ops/binary_cross_entropy_grad.cc deleted file mode 100644 index 61016b1075..0000000000 --- a/mindspore/lite/src/ops/binary_cross_entropy_grad.cc +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <string> -#include "src/ops/binary_cross_entropy_grad.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE - -int BinaryCrossEntropyGrad::GetReduction() const { - return this->primitive_->value.AsBinaryCrossEntropyGrad()->reduction; -} - -int BinaryCrossEntropyGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitive error"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_BinaryCrossEntropyGrad; - } - if (this->primitive_->value.type != schema::PrimitiveType_BinaryCrossEntropyGrad) { - MS_LOG(ERROR) << "PrimitiveType_BinaryCrossEntropyGrad primitive value type : " - << schema::EnumNamePrimitiveType(primitive_->value.type) << "is not equal" - << schema::EnumNamePrimitiveType(schema::PrimitiveType_BinaryCrossEntropyGrad); - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - schema::BinaryCrossEntropyGradT *attr = new (std::nothrow) schema::BinaryCrossEntropyGradT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new binary cross entropy attr failed!"; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - // default is mean - string reduction = "mean"; - if (prim.GetAttr("reduction") == nullptr) { - MS_LOG(ERROR) << "get reduction failed!"; - delete this->primitive_; - delete attr; - this->primitive_ = nullptr; - attr = nullptr; - return RET_ERROR; - } else { - reduction = GetValue<string>(prim.GetAttr("reduction")); - } - - if (reduction == "none") { - attr->reduction = 0; - } else if (reduction == "sum") { - attr->reduction = 2; - } else { - // default is mean - attr->reduction = 1; - } - this->primitive_->value.value = attr; - } - - return RET_OK; -} -#else -int BinaryCrossEntropyGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, - flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_BinaryCrossEntropyGrad(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_BinaryCrossEntropyGrad return nullptr"; - return RET_ERROR; - } - int reduction = attr->reduction(); - auto val_offset = schema::CreateBinaryCrossEntropyGrad(*fbb, reduction); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_BinaryCrossEntropyGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -int BinaryCrossEntropyGrad::GetReduction() const { - return this->primitive_->value_as_BinaryCrossEntropyGrad()->reduction(); -} - -PrimitiveC *BinaryCrossEntropyGradCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<BinaryCrossEntropyGrad>(primitive); -} -Registry BinaryCrossEntropyGradRegistry(schema::PrimitiveType_BinaryCrossEntropyGrad, BinaryCrossEntropyGradCreator); -#endif -int BinaryCrossEntropyGrad::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - Tensor *x = inputs_[0]; - Tensor *out = outputs_[0]; - out->set_format(x->format()); - out->set_data_type(x->data_type()); - std::vector<int> x_shape = x->shape(); - std::vector<int> output_shape(x_shape.size()); - output_shape.assign(x_shape.begin(), x_shape.end()); - out->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/binary_cross_entropy_grad.h b/mindspore/lite/src/ops/binary_cross_entropy_grad.h deleted file mode 100644 index bb21020541..0000000000 --- a/mindspore/lite/src/ops/binary_cross_entropy_grad.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -#ifndef LITE_SRC_OPS_BINARY_CROSS_ENTROPY_GRAD_H_ -#define LITE_SRC_OPS_BINARY_CROSS_ENTROPY_GRAD_H_ -namespace mindspore { -namespace lite { -class BinaryCrossEntropyGrad : public PrimitiveC { - public: - BinaryCrossEntropyGrad() = default; - ~BinaryCrossEntropyGrad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(BinaryCrossEntropyGrad, PrimitiveC); - - explicit BinaryCrossEntropyGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - - int GetReduction() const; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; - - int GetReduction() const; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore -#endif // LITE_SRC_OPS_BINARY_CROSS_ENTROPY_GRAD_H_ diff --git a/mindspore/lite/src/ops/bn_grad.cc b/mindspore/lite/src/ops/bn_grad.cc deleted file mode 100644 index 99604e2d51..0000000000 --- a/mindspore/lite/src/ops/bn_grad.cc +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/bn_grad.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float BNGrad::GetEps() const { return this->primitive_->value.AsBNGrad()->eps; } -float BNGrad::GetMomentum() const { return this->primitive_->value.AsBNGrad()->momentum; } - -void BNGrad::SetEps(float eps) { this->primitive_->value.AsBNGrad()->eps = eps; } -void BNGrad::SetMomentum(float momentum) { this->primitive_->value.AsBNGrad()->momentum = momentum; } -int BNGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_BNGrad; - } - if (this->primitive_->value.type != schema::PrimitiveType_BNGrad) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::BNGradT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - attr->momentum = 0.1f; - if (prim.GetAttr("momentum") != nullptr) { - attr->momentum = GetValue<float>(prim.GetAttr("momentum")); - } - attr->eps = 1e-5; - if (prim.GetAttr("epsilon") != nullptr) { - attr->eps = GetValue<float>(prim.GetAttr("epsilon")); - } - this->primitive_->value.value = attr; - } - return RET_OK; -} -#else -int BNGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_BNGrad(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_BNGradInput return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateBNGrad(*fbb, attr->eps(), attr->momentum()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_BNGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *BNGradCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<BNGrad>(primitive); } -Registry BNGradRegistry(schema::PrimitiveType_BNGrad, BNGradCreator); - -float BNGrad::GetEps() const { return this->primitive_->value_as_BNGrad()->eps(); } -float BNGrad::GetMomentum() const { return this->primitive_->value_as_BNGrad()->momentum(); } -#endif -int BNGrad::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - if (inputs.size() != 6) { - MS_LOG(ERROR) << "BNGrad should have five inputs"; - return RET_ERROR; - } - if (outputs.size() != 3) { - MS_LOG(ERROR) << "BNGrad should have three outputs"; - return RET_ERROR; - } - auto in = inputs[1]; - auto scale = inputs[2]; - - if (in->shape().size() != 4) { - MS_LOG(ERROR) << "Grad Fused batchnorm only support nhwc input!"; - } - - outputs[0]->set_shape(in->shape()); - outputs[1]->set_shape(scale->shape()); - outputs[2]->set_shape(scale->shape()); - outputs[0]->set_data_type(in->data_type()); - outputs[1]->set_data_type(scale->data_type()); - outputs[2]->set_data_type(scale->data_type()); - outputs[0]->set_format(in->format()); - outputs[1]->set_format(scale->format()); - outputs[2]->set_format(scale->format()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/bn_grad.h b/mindspore/lite/src/ops/bn_grad.h deleted file mode 100644 index a0b68ea45e..0000000000 --- a/mindspore/lite/src/ops/bn_grad.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_BN_GRAD_H_ -#define MINDSPORE_LITE_SRC_OPS_BN_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class BNGrad : public PrimitiveC { - public: - BNGrad() = default; - ~BNGrad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(BNGrad, PrimitiveC); - explicit BNGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetEps(float eps); - void SetMomentum(float momentum); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - float GetEps() const; - float GetMomentum() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_BN_GRAD_H_ diff --git a/mindspore/lite/src/ops/broadcast_to.cc b/mindspore/lite/src/ops/broadcast_to.cc deleted file mode 100644 index e5a891af84..0000000000 --- a/mindspore/lite/src/ops/broadcast_to.cc +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/broadcast_to.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> BroadcastTo::GetDstShape() const { return this->primitive_->value.AsBroadcastTo()->dst_shape; } - -void BroadcastTo::SetDstShape(const std::vector<int> &dst_shape) { - this->primitive_->value.AsBroadcastTo()->dst_shape = dst_shape; -} - -#else -int BroadcastTo::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_BroadcastTo(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_BroadcastTo return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> dst_shape; - if (attr->dst_shape() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->dst_shape()->size()); i++) { - dst_shape.push_back(attr->dst_shape()->data()[i]); - } - } - auto val_offset = schema::CreateBroadcastToDirect(*fbb, &dst_shape); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_BroadcastTo, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -std::vector<int> BroadcastTo::GetDstShape() const { - auto fb_vector = this->primitive_->value_as_BroadcastTo()->dst_shape(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} - -PrimitiveC *BroadcastToCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<BroadcastTo>(primitive); -} -Registry BroadcastToRegistry(schema::PrimitiveType_BroadcastTo, BroadcastToCreator); -#endif - -namespace { -constexpr int kBroadcastToInputNum = 1; -constexpr int kBroadcastToOnnxInputNum = 2; -constexpr int kBroadcastToOutputNum = 1; -} // namespace - -int BroadcastTo::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - if (inputs.size() != kBroadcastToInputNum && inputs.size() != kBroadcastToOnnxInputNum) { - MS_LOG(ERROR) << "input size:" << inputs.size(); - return RET_PARAM_INVALID; - } - if (outputs.size() != kBroadcastToOutputNum) { - MS_LOG(ERROR) << "output size:" << outputs.size(); - return RET_PARAM_INVALID; - } - - auto input = inputs.at(0); - outputs[0]->set_format(input->format()); - outputs[0]->set_data_type(input->data_type()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - std::vector<int32_t> dst_shape(GetDstShape()); - for (size_t i = 0; i < dst_shape.size(); ++i) { - if (dst_shape[i] == -1) { - dst_shape[i] = inputs[0]->shape()[i]; - } - } - auto input_shape = input->shape(); - std::vector<int> shape(dst_shape.size()); - int input_shape_index = input_shape.size() - 1; - if (input_shape.size() > dst_shape.size()) { - MS_LOG(ERROR) << "input shape size " << input_shape.size() << " should <= broadcast to shape size " - << dst_shape.size() << "!"; - return RET_PARAM_INVALID; - } - - for (int i = dst_shape.size() - 1; i >= 0; --i) { - if (dst_shape[i] < 0) { - MS_LOG(ERROR) << "shape[" << i << "] = " << dst_shape[i] << " ] should be > 0!"; - return RET_PARAM_INVALID; - } - if (input_shape_index >= 0) { - auto dim = input_shape[input_shape_index]; - if (dim != dst_shape[i] && dim != 1) { - MS_LOG(ERROR) << "Invalid broadcast shape!"; - return RET_PARAM_INVALID; - } - } - shape[i] = dst_shape[i]; - --input_shape_index; - } - outputs[0]->set_shape(shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/broadcast_to.h b/mindspore/lite/src/ops/broadcast_to.h deleted file mode 100644 index 4794a38bac..0000000000 --- a/mindspore/lite/src/ops/broadcast_to.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_BROADCAST_TO_H_ -#define LITE_MINDSPORE_LITE_C_OPS_BROADCAST_TO_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class BroadcastTo : public PrimitiveC { - public: - BroadcastTo() = default; - ~BroadcastTo() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(BroadcastTo, PrimitiveC); - explicit BroadcastTo(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetDstShape(const std::vector<int> &dst_shape); - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - std::vector<int> GetDstShape() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_BROADCAST_TO_H_ diff --git a/mindspore/lite/src/ops/cast.cc b/mindspore/lite/src/ops/cast.cc deleted file mode 100644 index abebfe508e..0000000000 --- a/mindspore/lite/src/ops/cast.cc +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/cast.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Cast::GetSrcT() const { return this->primitive_->value.AsCast()->srcT; } -int Cast::GetDstT() const { return this->primitive_->value.AsCast()->dstT; } - -void Cast::SetSrcT(int src_t) { this->primitive_->value.AsCast()->srcT = src_t; } -void Cast::SetDstT(int dst_t) { this->primitive_->value.AsCast()->dstT = dst_t; } - -int Cast::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Cast; - } - if (this->primitive_->value.type != schema::PrimitiveType_Cast) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::CastT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - auto srcAnf = reinterpret_cast<mindspore::Number *>(prim.GetAttr("SrcT").get()); - auto dstAnf = reinterpret_cast<mindspore::Number *>(prim.GetAttr("DstT").get()); - attr->srcT = srcAnf->number_type(); - attr->dstT = dstAnf->number_type(); - this->primitive_->value.value = attr; - } - - return RET_OK; -} - -#else -int Cast::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Cast(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Cast return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateCast(*fbb, attr->srcT(), attr->dstT()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Cast, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int Cast::GetSrcT() const { return this->primitive_->value_as_Cast()->srcT(); } -int Cast::GetDstT() const { return this->primitive_->value_as_Cast()->dstT(); } - -PrimitiveC *CastCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Cast>(primitive); } -Registry CastRegistry(schema::PrimitiveType_Cast, CastCreator); -#endif - -int Cast::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "tensor number is error."; - return RET_INPUT_TENSOR_ERROR; - } - output->set_format(input->format()); - - output->set_data_type(static_cast<TypeId>(GetDstT())); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - if (GetSrcT() != 0 && input->data_type() != GetSrcT()) { - MS_LOG(ERROR) << "input dataType is error"; - return RET_INPUT_TENSOR_ERROR; - } - if (kSupportDataType.find(input->data_type()) == kSupportDataType.end()) { - MS_LOG(ERROR) << "Unsupported input data type " << input->data_type(); - return RET_INPUT_TENSOR_ERROR; - } - - output->set_shape(input->shape()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/cast.h b/mindspore/lite/src/ops/cast.h deleted file mode 100644 index 4ef1d67cce..0000000000 --- a/mindspore/lite/src/ops/cast.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_CAST_H_ -#define LITE_MINDSPORE_LITE_C_OPS_CAST_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Cast : public PrimitiveC { - public: - Cast() = default; - ~Cast() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Cast, PrimitiveC); - explicit Cast(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetSrcT(int src_t); - void SetDstT(int dst_t); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetSrcT() const; - int GetDstT() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_CAST_H_ diff --git a/mindspore/lite/src/ops/ceil.cc b/mindspore/lite/src/ops/ceil.cc deleted file mode 100644 index 208cf2ecac..0000000000 --- a/mindspore/lite/src/ops/ceil.cc +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/ceil.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifndef PRIMITIVE_WRITEABLE -PrimitiveC *CeilCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Ceil>(primitive); } -Registry CeilRegistry(schema::PrimitiveType_Ceil, CeilCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/ceil.h b/mindspore/lite/src/ops/ceil.h deleted file mode 100644 index 41d56ac797..0000000000 --- a/mindspore/lite/src/ops/ceil.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_CEIL_H_ -#define LITE_MINDSPORE_LITE_C_OPS_CEIL_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -class Ceil : public ArithmeticSelf { - public: - Ceil() = default; - ~Ceil() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Ceil, ArithmeticSelf); - explicit Ceil(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateCeil(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Ceil, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; - } -#endif -}; - -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_CEIL_H_ diff --git a/mindspore/lite/src/ops/clip.cc b/mindspore/lite/src/ops/clip.cc deleted file mode 100644 index 9d545f871e..0000000000 --- a/mindspore/lite/src/ops/clip.cc +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/clip.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float Clip::GetMax() const { return this->primitive_->value.AsClip()->max; } -float Clip::GetMin() const { return this->primitive_->value.AsClip()->min; } - -void Clip::SetMax(float max) { this->primitive_->value.AsClip()->max = max; } -void Clip::SetMin(float min) { this->primitive_->value.AsClip()->min = min; } - -#else -int Clip::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Clip(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Clip return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateClip(*fbb, attr->max(), attr->min()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Clip, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -float Clip::GetMax() const { return this->primitive_->value_as_Clip()->max(); } -float Clip::GetMin() const { return this->primitive_->value_as_Clip()->min(); } - -PrimitiveC *ClipCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Clip>(primitive); } -Registry ClipRegistry(schema::PrimitiveType_Clip, ClipCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/clip.h b/mindspore/lite/src/ops/clip.h deleted file mode 100644 index 6c451d9e57..0000000000 --- a/mindspore/lite/src/ops/clip.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_CLIP_H_ -#define LITE_MINDSPORE_LITE_C_OPS_CLIP_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Clip : public PrimitiveC { - public: - Clip() = default; - ~Clip() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Clip, PrimitiveC); - explicit Clip(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetMax(float max); - void SetMin(float min); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - float GetMax() const; - float GetMin() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_CLIP_H_ diff --git a/mindspore/lite/src/ops/compat/attr_transfer_common.cc b/mindspore/lite/src/ops/compat/attr_transfer_common.cc index 633482ea24..c981ba6f44 100644 --- a/mindspore/lite/src/ops/compat/attr_transfer_common.cc +++ b/mindspore/lite/src/ops/compat/attr_transfer_common.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/src/ops/compat/attr_transfer_common.h b/mindspore/lite/src/ops/compat/attr_transfer_common.h index 6ecf2be251..265db8db22 100644 --- a/mindspore/lite/src/ops/compat/attr_transfer_common.h +++ b/mindspore/lite/src/ops/compat/attr_transfer_common.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,7 +21,6 @@ #include "ir/dtype/type_id.h" #include "src/tensor.h" #include "include/errorcode.h" -#include "schema/model_v0_generated.h" #include "src/common/common.h" #include "src/ops/compat/compat_register.h" diff --git a/mindspore/lite/src/ops/compat/compat_register.h b/mindspore/lite/src/ops/compat/compat_register.h index 8285d1e7f2..61352d6dd5 100644 --- a/mindspore/lite/src/ops/compat/compat_register.h +++ b/mindspore/lite/src/ops/compat/compat_register.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,7 +27,7 @@ namespace mindspore { namespace lite { // compatibility, transfer attr to input tensor. -typedef int (*TransferAttrFunc)(const void *primitive, Model::Node *node, std::vector<schema::Tensor *> *tensor, +typedef int (*TransferAttrFunc)(Model::Node *node, std::vector<schema::Tensor *> *tensor, std::vector<char *> *tensor_bufs); class CompatRegistry { public: diff --git a/mindspore/lite/src/ops/compat/v0/broadcast_to_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/broadcast_to_compat_v0.cc new file mode 100644 index 0000000000..fd944e821c --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/broadcast_to_compat_v0.cc @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferBroadcastToAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, + std::vector<char *> *tensor_bufs) { + if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + if (node->input_indices_.size() != 1) { + MS_LOG(DEBUG) << "broadcast_to don't need to convert attr to tensor."; + return RET_OK; + } + dst_tensors->clear(); + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); + auto dst_shape_attr = prim->value_as_BroadcastTo()->dst_shape(); + std::vector<int> dst_shape = std::vector<int>(dst_shape_attr->begin(), dst_shape_attr->end()); + auto dst_shape_tensor = AttrToTensor(dst_shape.data(), dst_shape.size(), true, kNumberTypeInt32, tensor_bufs); + if (dst_shape_tensor == nullptr) { + MS_LOG(ERROR) << "attr tensor is nullptr, transform is failed."; + return RET_NULL_PTR; + } + dst_tensors->push_back(dst_shape_tensor); + return RET_OK; +} + +Register BroadcastToTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_BroadcastTo, + TransferBroadcastToAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/broadcat_to_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/broadcat_to_compat_v0.cc deleted file mode 100644 index 6959fd70b2..0000000000 --- a/mindspore/lite/src/ops/compat/v0/broadcat_to_compat_v0.cc +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/compat/attr_transfer_common.h" - -namespace mindspore { -namespace lite { -int TransferBroadcastToAttr(const void *primitive, Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, - std::vector<char *> *tensor_bufs) { - if (primitive == nullptr || node == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { - MS_LOG(ERROR) << "the parameter of this function is nullptr."; - return RET_ERROR; - } - if (node->input_indices_.size() != 1) { - MS_LOG(DEBUG) << "broadcast_to don't need to convert attr to tensor."; - return RET_OK; - } - dst_tensors->clear(); - auto prim = reinterpret_cast<const schema::v0::Primitive *>(primitive); - auto dst_shape_attr = prim->value_as_BroadcastTo()->dst_shape(); - std::vector<int> dst_shape = std::vector<int>(dst_shape_attr->begin(), dst_shape_attr->end()); - auto dst_shape_tensor = AttrToTensor(dst_shape.data(), dst_shape.size(), true, kNumberTypeInt32, tensor_bufs); - if (dst_shape_tensor == nullptr) { - MS_LOG(ERROR) << "attr tensor is nullptr, transform is failed."; - return RET_NULL_PTR; - } - dst_tensors->push_back(dst_shape_tensor); - return RET_OK; -} - -Register BroadcastToTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_BroadcastTo, - TransferBroadcastToAttr); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/cast_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/cast_compat_v0.cc new file mode 100644 index 0000000000..583f7cc93c --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/cast_compat_v0.cc @@ -0,0 +1,41 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferCastAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, std::vector<char *> *tensor_bufs) { + if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + dst_tensors->clear(); + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); + auto dst_type_attr = prim->value_as_Cast()->dstT(); + auto dst_type_tensor = AttrToTensor(&dst_type_attr, 1, false, kNumberTypeInt32, tensor_bufs); + if (dst_type_tensor == nullptr) { + MS_LOG(ERROR) << "attr tensor is nullptr, transform is failed."; + return RET_NULL_PTR; + } + dst_tensors->push_back(dst_type_tensor); + return RET_OK; +} + +Register CastTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_Cast, TransferCastAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/expand_dims_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/expand_dims_compat_v0.cc new file mode 100644 index 0000000000..140784ba5a --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/expand_dims_compat_v0.cc @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferExpandDimsAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, + std::vector<char *> *tensor_bufs) { + if (node == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + MS_ASSERT(node->input_indices_.size() == 1); + MS_ASSERT(dst_tensors->size() == 0); + + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); + int32_t dim = prim->value_as_ExpandDims()->dim(); + auto dim_tensor = AttrToTensor(&dim, 1, false, kNumberTypeInt32, tensor_bufs); + if (dim_tensor == nullptr) { + MS_LOG(ERROR) << "transfer expand dim tensor failed."; + return RET_NULL_PTR; + } + dst_tensors->push_back(dim_tensor); + return RET_OK; +} + +Register ExpandDimsTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_ExpandDims, + TransferExpandDimsAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/fill_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/fill_compat_v0.cc new file mode 100644 index 0000000000..5ab1f62148 --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/fill_compat_v0.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferFillToAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, + std::vector<char *> *tensor_bufs) { + if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + if (node->input_indices_.size() != 1) { + MS_LOG(DEBUG) << "fill don't need to convert attr to tensor."; + return RET_OK; + } + dst_tensors->clear(); + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); + auto dims_attr = prim->value_as_Fill()->dims(); + std::vector<int> dims = std::vector<int>(dims_attr->begin(), dims_attr->end()); + auto dims_tensor = AttrToTensor(dims.data(), dims.size(), true, kNumberTypeInt32, tensor_bufs); + if (dims_tensor == nullptr) { + MS_LOG(ERROR) << "attr tensor is nullptr, transform is failed."; + return RET_NULL_PTR; + } + dst_tensors->push_back(dims_tensor); + return RET_OK; +} + +Register FillTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_Fill, TransferFillToAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/gather_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/gather_compat_v0.cc new file mode 100644 index 0000000000..179e65a433 --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/gather_compat_v0.cc @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferGatherAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, + std::vector<char *> *tensor_bufs) { + if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + dst_tensors->clear(); + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); + auto axis_attr = prim->value_as_Gather()->axis(); + auto axis_tensor = AttrToTensor(&axis_attr, 1, false, kNumberTypeInt32, tensor_bufs); + if (axis_tensor == nullptr) { + MS_LOG(ERROR) << "attr tensor is nullptr, transform is failed."; + return RET_NULL_PTR; + } + dst_tensors->push_back(axis_tensor); + return RET_OK; +} + +Register GatherTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_Gather, TransferGatherAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/nchw2nhwc_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/nchw2nhwc_compat_v0.cc new file mode 100644 index 0000000000..1164ebd821 --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/nchw2nhwc_compat_v0.cc @@ -0,0 +1,46 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferNchw2NhwcAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, + std::vector<char *> *tensor_bufs) { + if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + if (node->input_indices_.size() != 1) { + MS_LOG(DEBUG) << "nchw2nhwc don't need to convert attr to tensor."; + return RET_OK; + } + dst_tensors->clear(); + std::vector<int> dst_shape{0, 2, 3, 1}; // nchw to nhwc + auto dst_shape_tensor = AttrToTensor(dst_shape.data(), dst_shape.size(), true, kNumberTypeInt32, tensor_bufs); + if (dst_shape_tensor == nullptr) { + MS_LOG(ERROR) << "attr tensor is nullptr, transform is failed."; + return RET_NULL_PTR; + } + dst_tensors->push_back(dst_shape_tensor); + return RET_OK; +} + +Register Nchw2NhwcTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_Nchw2Nhwc, + TransferNchw2NhwcAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/nhwc2nchw_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/nhwc2nchw_compat_v0.cc new file mode 100644 index 0000000000..0d6297a7f2 --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/nhwc2nchw_compat_v0.cc @@ -0,0 +1,46 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferNhwc2NchwAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, + std::vector<char *> *tensor_bufs) { + if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + if (node->input_indices_.size() != 1) { + MS_LOG(DEBUG) << "nhwc2nchw don't need to convert attr to tensor."; + return RET_OK; + } + dst_tensors->clear(); + std::vector<int> dst_shape{0, 3, 1, 2}; // nhwc to nchw + auto dst_shape_tensor = AttrToTensor(dst_shape.data(), dst_shape.size(), true, kNumberTypeInt32, tensor_bufs); + if (dst_shape_tensor == nullptr) { + MS_LOG(ERROR) << "attr tensor is nullptr, transform is failed."; + return RET_NULL_PTR; + } + dst_tensors->push_back(dst_shape_tensor); + return RET_OK; +} + +Register Nhwc2NchwTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_Nhwc2Nchw, + TransferNhwc2NchwAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/pad_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/pad_compat_v0.cc new file mode 100644 index 0000000000..f4fb1c5416 --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/pad_compat_v0.cc @@ -0,0 +1,46 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferPadAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, std::vector<char *> *tensor_bufs) { + if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + dst_tensors->clear(); + if (node->input_indices_.size() > 1) { + MS_LOG(DEBUG) << "pad don't need to convert attr to tensor."; + return RET_OK; + } + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); + auto paddings_attr = prim->value_as_Pad()->paddings(); + std::vector<int> paddings = std::vector<int>(paddings_attr->begin(), paddings_attr->end()); + auto paddings_tensor = AttrToTensor(paddings.data(), paddings.size(), true, kNumberTypeInt32, tensor_bufs); + if (paddings_tensor == nullptr) { + MS_LOG(ERROR) << "attr tensor is nullptr, transform is failed."; + return RET_NULL_PTR; + } + dst_tensors->push_back(paddings_tensor); + return RET_OK; +} + +Register PadTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_Pad, TransferPadAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/permute_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/permute_compat_v0.cc new file mode 100644 index 0000000000..ec76fa82d9 --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/permute_compat_v0.cc @@ -0,0 +1,51 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferPermuteAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, + std::vector<char *> *tensor_bufs) { + if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + if (node->input_indices_.size() != 1) { + MS_LOG(DEBUG) << "permute don't need to convert attr to tensor."; + return RET_OK; + } + dst_tensors->clear(); + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); + auto order_attr = prim->value_as_Permute()->order(); + + std::vector<int> dst_shape; + for (auto it = order_attr->begin(); it != order_attr->end(); ++it) { + dst_shape.push_back(static_cast<int>(*it)); + } + auto dst_shape_tensor = AttrToTensor(dst_shape.data(), dst_shape.size(), true, kNumberTypeInt32, tensor_bufs); + if (dst_shape_tensor == nullptr) { + MS_LOG(ERROR) << "attr tensor is nullptr, transform is failed."; + return RET_NULL_PTR; + } + dst_tensors->push_back(dst_shape_tensor); + return RET_OK; +} + +Register PermuteTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_Permute, TransferPermuteAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/power_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/power_compat_v0.cc new file mode 100644 index 0000000000..ea999154d9 --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/power_compat_v0.cc @@ -0,0 +1,46 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferPowerToAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, + std::vector<char *> *tensor_bufs) { + if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + if (node->input_indices_.size() != 1) { + MS_LOG(DEBUG) << "power don't need to convert attr to tensor."; + return RET_OK; + } + dst_tensors->clear(); + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); + auto power_attr = prim->value_as_Power()->power(); + auto power_tensor = AttrToTensor(&power_attr, 1, false, kNumberTypeFloat32, tensor_bufs); + if (power_tensor == nullptr) { + MS_LOG(ERROR) << "attr tensor is nullptr, transform is failed."; + return RET_NULL_PTR; + } + dst_tensors->push_back(power_tensor); + return RET_OK; +} + +Register PowerTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_Power, TransferPowerToAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/reduce_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/reduce_compat_v0.cc new file mode 100644 index 0000000000..463f5edd10 --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/reduce_compat_v0.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferReduceToAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, + std::vector<char *> *tensor_bufs) { + if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + if (node->input_indices_.size() != 1) { + MS_LOG(DEBUG) << "fill don't need to convert attr to tensor."; + return RET_OK; + } + dst_tensors->clear(); + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); + auto axes_attr = prim->value_as_Reduce()->axes(); + std::vector<int> axes = std::vector<int>(axes_attr->begin(), axes_attr->end()); + auto axes_tensor = AttrToTensor(axes.data(), axes.size(), true, kNumberTypeInt32, tensor_bufs); + if (axes_tensor == nullptr) { + MS_LOG(ERROR) << "attr tensor is nullptr, transform is failed."; + return RET_NULL_PTR; + } + dst_tensors->push_back(axes_tensor); + return RET_OK; +} + +Register ReduceTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_Reduce, TransferReduceToAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/reshape_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/reshape_compat_v0.cc index 622900116f..ec919c898b 100644 --- a/mindspore/lite/src/ops/compat/v0/reshape_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/reshape_compat_v0.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,22 +14,23 @@ * limitations under the License. */ +#include "schema/model_v0_generated.h" #include "src/ops/compat/attr_transfer_common.h" namespace mindspore { namespace lite { -int TransferReshapeAttr(const void *primitive, Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, +int TransferReshapeAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, std::vector<char *> *tensor_bufs) { - if (primitive == nullptr || node == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; } if (node->input_indices_.size() != 1) { - MS_LOG(DEBUG) << "reshape need to convert attr to tensor."; + MS_LOG(DEBUG) << "reshape don't need to convert attr to tensor."; return RET_OK; } dst_tensors->clear(); - auto prim = reinterpret_cast<const schema::v0::Primitive *>(primitive); + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); auto dst_shape_attr = prim->value_as_Reshape()->shape(); std::vector<int> dst_shape = std::vector<int>(dst_shape_attr->begin(), dst_shape_attr->end()); auto dst_shape_tensor = AttrToTensor(dst_shape.data(), dst_shape.size(), true, kNumberTypeInt32, tensor_bufs); diff --git a/mindspore/lite/src/ops/compat/v0/slice_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/slice_compat_v0.cc new file mode 100644 index 0000000000..2472c7aa27 --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/slice_compat_v0.cc @@ -0,0 +1,59 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferSliceAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, std::vector<char *> *tensor_bufs) { + if (node == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + if (node->input_indices_.size() != 1) { + MS_LOG(DEBUG) << "Slice don't need to convert attr to tensor."; + return RET_OK; + } + MS_ASSERT(dst_tensors->size() == 0); + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); + + /* transfer begin tensor */ + auto begin_attr = prim->value_as_Slice()->begin(); + std::vector<int32_t> begin_shape = std::vector<int>(begin_attr->begin(), begin_attr->end()); + auto begin_tensor = AttrToTensor(begin_shape.data(), begin_shape.size(), true, kNumberTypeInt32, tensor_bufs); + if (begin_tensor == nullptr) { + MS_LOG(ERROR) << "slice transfer begin failed"; + return RET_NULL_PTR; + } + dst_tensors->push_back(begin_tensor); + + /* transfer size tensor */ + auto size_attr = prim->value_as_Slice()->size(); + std::vector<int32_t> size_shape = std::vector<int>(size_attr->begin(), size_attr->end()); + auto size_tensor = AttrToTensor(size_shape.data(), size_shape.size(), true, kNumberTypeInt32, tensor_bufs); + if (size_tensor == nullptr) { + MS_LOG(ERROR) << "slice transfer size failed"; + return RET_NULL_PTR; + } + dst_tensors->push_back(size_tensor); + + return RET_OK; +} + +Register SliceTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_Slice, TransferSliceAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/strided_slice_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/strided_slice_compat_v0.cc index 74556895b7..5bbd541d96 100644 --- a/mindspore/lite/src/ops/compat/v0/strided_slice_compat_v0.cc +++ b/mindspore/lite/src/ops/compat/v0/strided_slice_compat_v0.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,18 +14,19 @@ * limitations under the License. */ +#include "schema/model_v0_generated.h" #include "src/ops/compat/attr_transfer_common.h" namespace mindspore { namespace lite { -int TransferStridedSliceAttr(const void *primitive, Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, +int TransferStridedSliceAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, std::vector<char *> *tensor_bufs) { - if (primitive == nullptr || node == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { MS_LOG(ERROR) << "the parameter of this function is nullptr."; return RET_ERROR; } dst_tensors->clear(); - auto prim = reinterpret_cast<const schema::v0::Primitive *>(primitive); + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); int inputs_size = node->input_indices_.size(); switch (inputs_size) { case 1: { diff --git a/mindspore/lite/src/ops/compat/v0/tile_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/tile_compat_v0.cc new file mode 100644 index 0000000000..3960fef8eb --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/tile_compat_v0.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferTileToAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, + std::vector<char *> *tensor_bufs) { + if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + if (node->input_indices_.size() != 1) { + MS_LOG(DEBUG) << "tile don't need to convert attr to tensor."; + return RET_OK; + } + dst_tensors->clear(); + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); + auto multiples_attr = prim->value_as_Tile()->multiples(); + std::vector<int> multiples = std::vector<int>(multiples_attr->begin(), multiples_attr->end()); + auto multiples_tensor = AttrToTensor(multiples.data(), multiples.size(), true, kNumberTypeInt32, tensor_bufs); + if (multiples_tensor == nullptr) { + MS_LOG(ERROR) << "attr tensor is nullptr, transform is failed."; + return RET_NULL_PTR; + } + dst_tensors->push_back(multiples_tensor); + return RET_OK; +} + +Register TileTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_Tile, TransferTileToAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/topk_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/topk_compat_v0.cc new file mode 100644 index 0000000000..48ce7bd234 --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/topk_compat_v0.cc @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferTopkAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, std::vector<char *> *tensor_bufs) { + if (node == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + if (node->input_indices_.size() != 1) { + MS_LOG(DEBUG) << "topK need to convert attr to tensor."; + return RET_OK; + } + MS_ASSERT(dst_tensors->size() == 0); + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); + int32_t topk_k = prim->value_as_TopK()->k(); + auto k_tensor = AttrToTensor(&topk_k, 1, false, kNumberTypeInt32, tensor_bufs); + if (k_tensor == nullptr) { + MS_LOG(ERROR) << "attr tensor is nullptr, transform is failed."; + return RET_NULL_PTR; + } + dst_tensors->push_back(k_tensor); + return RET_OK; +} + +Register TopkTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_TopK, TransferTopkAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/compat/v0/transpose_compat_v0.cc b/mindspore/lite/src/ops/compat/v0/transpose_compat_v0.cc new file mode 100644 index 0000000000..b833e9659f --- /dev/null +++ b/mindspore/lite/src/ops/compat/v0/transpose_compat_v0.cc @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/compat/attr_transfer_common.h" + +namespace mindspore { +namespace lite { +int TransferTransposeAttr(Model::Node *node, std::vector<schema::Tensor *> *dst_tensors, + std::vector<char *> *tensor_bufs) { + if (node == nullptr || node->primitive_ == nullptr || dst_tensors == nullptr || tensor_bufs == nullptr) { + MS_LOG(ERROR) << "the parameter of this function is nullptr."; + return RET_ERROR; + } + if (node->input_indices_.size() != 1) { + MS_LOG(DEBUG) << "transpose don't need to convert attr to tensor."; + return RET_OK; + } + dst_tensors->clear(); + auto prim = reinterpret_cast<const schema::v0::Primitive *>(node->primitive_); + auto perm_attr = prim->value_as_Transpose()->perm(); + std::vector<int> dst_shape = std::vector<int>(perm_attr->begin(), perm_attr->end()); + auto dst_shape_tensor = AttrToTensor(dst_shape.data(), dst_shape.size(), true, kNumberTypeInt32, tensor_bufs); + if (dst_shape_tensor == nullptr) { + MS_LOG(ERROR) << "attr tensor is nullptr, transform is failed."; + return RET_NULL_PTR; + } + dst_tensors->push_back(dst_shape_tensor); + return RET_OK; +} + +Register TransposeTransferRegistry(SCHEMA_VERSION::SCHEMA_V0, schema::v0::PrimitiveType_Transpose, + TransferTransposeAttr); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/concat.cc b/mindspore/lite/src/ops/concat.cc deleted file mode 100644 index 45eff714a1..0000000000 --- a/mindspore/lite/src/ops/concat.cc +++ /dev/null @@ -1,139 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/concat.h" -#include <memory> -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Concat::GetAxis() const { return this->primitive_->value.AsConcat()->axis; } - -void Concat::SetAxis(int axis) { this->primitive_->value.AsConcat()->axis = axis; } - -int Concat::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Concat; - } - if (this->primitive_->value.type != schema::PrimitiveType_Concat) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::ConcatT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - auto prim_axis = CastToInt(prim.GetAttr("axis")).front(); - attr->axis = prim_axis; - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else -int Concat::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Concat(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Concat return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateConcat(*fbb, attr->axis()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Concat, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int Concat::GetAxis() const { return this->primitive_->value_as_Concat()->axis(); } - -PrimitiveC *ConcatCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Concat>(primitive); } -Registry ConcatRegistry(schema::PrimitiveType_Concat, ConcatCreator); - -#endif - -namespace { -constexpr int kConcatOutputNum = 1; -} -int Concat::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr!"; - return RET_PARAM_INVALID; - } - auto input0 = inputs_.front(); - auto output = outputs_.front(); - if (outputs_.size() != kConcatOutputNum) { - MS_LOG(ERROR) << "output size is error"; - return RET_PARAM_INVALID; - } - output->set_data_type(input0->data_type()); - output->set_format(input0->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - auto input0_shape = inputs_.at(0)->shape(); - auto axis = GetAxis() < 0 ? GetAxis() + input0_shape.size() : GetAxis(); - if (axis < 0 || axis >= input0_shape.size()) { - MS_LOG(ERROR) << "Invalid axis: " << axis; - return RET_PARAM_INVALID; - } - auto input0_shape_without_axis = input0_shape; - input0_shape_without_axis.erase(input0_shape_without_axis.begin() + axis); - int output_axis_dim = input0_shape.at(axis); - for (size_t i = 1; i < inputs_.size(); ++i) { - auto shape_tmp = inputs_.at(i)->shape(); - if (shape_tmp.size() != input0_shape.size()) { - MS_LOG(ERROR) << "All inputs should have the same dim num!"; - return RET_PARAM_INVALID; - } - if ((inputs_.at(i)->data_type() != output->data_type()) && - !((inputs_.at(i)->data_type() == kNumberTypeFloat16 && output->data_type() == kNumberTypeFloat32) || - (inputs_.at(i)->data_type() == kNumberTypeFloat32 && output->data_type() == kNumberTypeFloat16))) { - MS_LOG(ERROR) << "All inputs should have the same type!"; - return RET_PARAM_INVALID; - } - auto axis_tmp = shape_tmp[axis]; - shape_tmp.erase(shape_tmp.begin() + axis); - if (input0_shape_without_axis != shape_tmp) { - MS_LOG(ERROR) << "Inputs should have the same dim except axis!"; - return RET_PARAM_INVALID; - } - output_axis_dim += axis_tmp; - } - auto output_shape = input0_shape; - output_shape[axis] = output_axis_dim; - outputs_[0]->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/concat.h b/mindspore/lite/src/ops/concat.h deleted file mode 100644 index c12c7f94d3..0000000000 --- a/mindspore/lite/src/ops/concat.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_CONCAT_H_ -#define LITE_MINDSPORE_LITE_C_OPS_CONCAT_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Concat : public PrimitiveC { - public: - Concat() = default; - ~Concat() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Concat, PrimitiveC); - explicit Concat(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - void SetAxis(int axis); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetAxis() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_CONCAT_H_ diff --git a/mindspore/lite/src/ops/constant.h b/mindspore/lite/src/ops/constant.h deleted file mode 100644 index 659331c650..0000000000 --- a/mindspore/lite/src/ops/constant.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifdef PRIMITIVE_WRITEABLE -#ifndef LITE_MINDSPORE_LITE_C_OPS_CONSTANT_H_ -#define LITE_MINDSPORE_LITE_C_OPS_CONSTANT_H_ - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Constant : public PrimitiveC { - public: - Constant() = default; - ~Constant() = default; - MS_DECLARE_PARENT(Constant, PrimitiveC); - explicit Constant(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_CONSTANT_H_ -#endif diff --git a/mindspore/lite/src/ops/constant_of_shape.cc b/mindspore/lite/src/ops/constant_of_shape.cc deleted file mode 100644 index 5e5a78bce7..0000000000 --- a/mindspore/lite/src/ops/constant_of_shape.cc +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/constant_of_shape.h" -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore::lite { -namespace { -constexpr int kShapeInputNum = 1; -constexpr int kShapeOutputNum = 1; -} // namespace -#ifdef PRIMITIVE_WRITEABLE -std::vector<float> ConstantOfShape::GetValue() const { return this->primitive_->value.AsConstantOfShape()->value; } - -int ConstantOfShape::GetDataType() const { return this->primitive_->value.AsConstantOfShape()->dataType; } - -#else -int ConstantOfShape::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_ConstantOfShape(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_ConstantOfShape return nullptr"; - return RET_ERROR; - } - std::vector<float> value; - if (attr->value() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->value()->size()); i++) { - value.push_back(attr->value()->data()[i]); - } - } - auto val_offset = schema::CreateConstantOfShapeDirect(*fbb, attr->dataType(), &value); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ConstantOfShape, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -std::vector<float> ConstantOfShape::GetValue() const { - auto fb_vector = this->primitive_->value_as_ConstantOfShape()->value(); - return std::vector<float>(fb_vector->begin(), fb_vector->end()); -} -int ConstantOfShape::GetDataType() const { return this->primitive_->value_as_ConstantOfShape()->dataType(); } - -PrimitiveC *ConstantOfShapeCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<ConstantOfShape>(primitive); -} -Registry ConstantOfShapeRegistry(schema::PrimitiveType_ConstantOfShape, ConstantOfShapeCreator); - -#endif - -int ConstantOfShape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - if (inputs_.size() != kShapeInputNum) { - MS_LOG(ERROR) << "inputs to ConstantOfShape operator should be 1, but " << inputs_.size() << " is given."; - return RET_ERROR; - } - if (inputs_.front() == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr!"; - return RET_PARAM_INVALID; - } - if (outputs_.size() != kShapeOutputNum) { - MS_LOG(ERROR) << "outputs to ConstantOfShape operator should be 1, but " << outputs_.size() << " is given."; - return RET_ERROR; - } - - auto in_tensor = inputs_.front(); - auto out_tensor = outputs_.front(); - out_tensor->set_data_type(static_cast<TypeId>(GetDataType())); - out_tensor->set_format(in_tensor->format()); - - if (!infer_flag() || in_tensor->data_c() == nullptr) { - return RET_INFER_INVALID; - } - - int size = in_tensor->ElementsNum(); - std::vector<int> out_shape(size); - - switch (in_tensor->data_type()) { - case kNumberTypeInt32: { - int32_t *in_data = reinterpret_cast<int32_t *>(in_tensor->data_c()); - for (int i = 0; i < size; ++i) { - out_shape[i] = in_data[i]; - MS_ASSERT(out_shape[i] > 0); - } - break; - } - case kNumberTypeInt64: { - int64_t *in_data = reinterpret_cast<int64_t *>(in_tensor->data_c()); - for (int i = 0; i < size; ++i) { - out_shape[i] = in_data[i]; - MS_ASSERT(out_shape[i] > 0); - } - break; - } - default: - MS_LOG(INFO) << "Invalid input data type!"; - return RET_INFER_INVALID; - } - - out_tensor->set_shape(out_shape); - return RET_OK; -} -} // namespace mindspore::lite diff --git a/mindspore/lite/src/ops/constant_of_shape.h b/mindspore/lite/src/ops/constant_of_shape.h deleted file mode 100644 index a72979a62a..0000000000 --- a/mindspore/lite/src/ops/constant_of_shape.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_SRC_OPS_CONSTANT_OF_SHAPE_H_ -#define LITE_MINDSPORE_LITE_SRC_OPS_CONSTANT_OF_SHAPE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class ConstantOfShape : public PrimitiveC { - public: - ConstantOfShape() = default; - ~ConstantOfShape() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(ConstantOfShape, PrimitiveC); - explicit ConstantOfShape(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - std::vector<float> GetValue() const; - int GetDataType() const; -}; -} // namespace lite -} // namespace mindspore -#endif // LITE_MINDSPORE_LITE_SRC_OPS_CONSTANT_OF_SHAPE_H_ diff --git a/mindspore/lite/src/ops/control_depend.cc b/mindspore/lite/src/ops/control_depend.cc deleted file mode 100644 index c5296bdd17..0000000000 --- a/mindspore/lite/src/ops/control_depend.cc +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/control_depend.h" -#include <vector> -#include <memory> - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int ControlDepend::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_ControlDepend; - } - if (this->primitive_->value.type != schema::PrimitiveType_ControlDepend) { - MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow)(schema::ControlDependT); - if (attr == nullptr) { - MS_LOG(ERROR) << "attr is nullptr"; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - this->primitive_->value.value = attr; - } - return RET_OK; -} -#else -int ControlDepend::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateControlDepend(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ControlDepend, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/control_depend.h b/mindspore/lite/src/ops/control_depend.h deleted file mode 100644 index 0737dbc4c2..0000000000 --- a/mindspore/lite/src/ops/control_depend.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_SRC_OPS_CONTROL_DEPEND_H_ -#define LITE_MINDSPORE_LITE_SRC_OPS_CONTROL_DEPEND_H_ - -#include <vector> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class ControlDepend : public PrimitiveC { - public: - ControlDepend() = default; - ~ControlDepend() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(ControlDepend, PrimitiveC); - explicit ControlDepend(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_SRC_OPS_CONTROL_DEPEND_H_ diff --git a/mindspore/lite/src/ops/conv2d.cc b/mindspore/lite/src/ops/conv2d.cc deleted file mode 100644 index c493586503..0000000000 --- a/mindspore/lite/src/ops/conv2d.cc +++ /dev/null @@ -1,417 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/conv2d.h" - -#include <map> -#include <memory> -#include <string> - -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#ifdef PRIMITIVE_WRITEABLE -#include <float.h> -#include "src/param_value_lite.h" -#endif - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -int Conv2D::PadUp() const { return this->pad_u_; } -int Conv2D::PadDown() const { return this->pad_d_; } -int Conv2D::PadLeft() const { return this->pad_l_; } -int Conv2D::PadRight() const { return this->pad_r_; } -#ifdef PRIMITIVE_WRITEABLE -int Conv2D::GetFormat() const { return this->primitive_->value.AsConv2D()->format; } -int Conv2D::GetGroup() const { return this->primitive_->value.AsConv2D()->group; } -int Conv2D::GetChannelIn() const { return this->primitive_->value.AsConv2D()->channelIn; } -int Conv2D::GetChannelOut() const { return this->primitive_->value.AsConv2D()->channelOut; } -int Conv2D::GetKernelW() const { return this->primitive_->value.AsConv2D()->kernelW; } -int Conv2D::GetKernelH() const { return this->primitive_->value.AsConv2D()->kernelH; } -int Conv2D::GetStrideW() const { return this->primitive_->value.AsConv2D()->strideW; } -int Conv2D::GetStrideH() const { return this->primitive_->value.AsConv2D()->strideH; } -int Conv2D::GetPadMode() const { return this->primitive_->value.AsConv2D()->padMode; } -int Conv2D::GetPadUp() const { return this->primitive_->value.AsConv2D()->padUp; } -int Conv2D::GetPadDown() const { return this->primitive_->value.AsConv2D()->padDown; } -int Conv2D::GetPadLeft() const { return this->primitive_->value.AsConv2D()->padLeft; } -int Conv2D::GetPadRight() const { return this->primitive_->value.AsConv2D()->padRight; } -int Conv2D::GetDilateW() const { return this->primitive_->value.AsConv2D()->dilateW; } -int Conv2D::GetDilateH() const { return this->primitive_->value.AsConv2D()->dilateH; } -int Conv2D::GetActivationType() const { return this->primitive_->value.AsConv2D()->activationType; } - -void Conv2D::SetFormat(int format) { this->primitive_->value.AsConv2D()->format = (schema::Format)format; } -void Conv2D::SetGroup(int group) { this->primitive_->value.AsConv2D()->group = group; } -void Conv2D::SetChannelIn(int channel_in) { this->primitive_->value.AsConv2D()->channelIn = channel_in; } -void Conv2D::SetChannelOut(int channel_out) { this->primitive_->value.AsConv2D()->channelOut = channel_out; } -void Conv2D::SetKernelW(int kernel_w) { this->primitive_->value.AsConv2D()->kernelW = kernel_w; } -void Conv2D::SetKernelH(int kernel_h) { this->primitive_->value.AsConv2D()->kernelH = kernel_h; } -void Conv2D::SetStrideW(int stride_w) { this->primitive_->value.AsConv2D()->strideW = stride_w; } -void Conv2D::SetStrideH(int stride_h) { this->primitive_->value.AsConv2D()->strideH = stride_h; } -void Conv2D::SetPadMode(int pad_mode) { this->primitive_->value.AsConv2D()->padMode = (schema::PadMode)pad_mode; } -void Conv2D::SetPadUp(int pad_up) { this->primitive_->value.AsConv2D()->padUp = pad_up; } -void Conv2D::SetPadDown(int pad_down) { this->primitive_->value.AsConv2D()->padDown = pad_down; } -void Conv2D::SetPadLeft(int pad_left) { this->primitive_->value.AsConv2D()->padLeft = pad_left; } -void Conv2D::SetPadRight(int pad_right) { this->primitive_->value.AsConv2D()->padRight = pad_right; } -void Conv2D::SetDilateW(int dilate_w) { this->primitive_->value.AsConv2D()->dilateW = dilate_w; } -void Conv2D::SetDilateH(int dilate_h) { this->primitive_->value.AsConv2D()->dilateH = dilate_h; } -void Conv2D::SetActivationType(int activation_type) { - this->primitive_->value.AsConv2D()->activationType = (schema::ActivationType)activation_type; -} -template <typename T> -void ConvertConvWeight(const ParameterPtr &param_node) { - MS_ASSERT(param_node != nullptr); - auto param = param_node->default_param(); - auto weight = std::dynamic_pointer_cast<ParamValueLite>(param); - MS_ASSERT(weight != nullptr); - - std::unique_ptr<T[]> buf(new (std::nothrow) T[weight->tensor_shape_size()]); - - if (buf == nullptr) { - MS_LOG(ERROR) << "new buf failed"; - return; - } - - size_t filter_k = weight->tensor_shape().at(0); - size_t filter_c = weight->tensor_shape().at(1); - size_t filter_h = weight->tensor_shape().at(2); - size_t filter_w = weight->tensor_shape().at(3); - T *p1Buff = nullptr; - T *p2Buff = nullptr; - for (size_t k = 0; k < filter_k; ++k) { - for (size_t c = 0; c < filter_c; ++c) { - for (size_t h = 0; h < filter_h; ++h) { - for (size_t w = 0; w < filter_w; ++w) { - p1Buff = reinterpret_cast<float *>(weight->tensor_addr()) + - ((k * filter_c * filter_h * filter_w) + (c * filter_h * filter_w) + (h * filter_w) + (w)); - p2Buff = - buf.get() + ((c * filter_k * filter_h * filter_w) + (k * filter_h * filter_w) + (h * filter_w) + (w)); - *p2Buff = *p1Buff; - } - } - } - } - - auto ret = ::memcpy_s(weight->tensor_addr(), weight->tensor_shape_size() * sizeof(T), buf.get(), - weight->tensor_shape_size() * sizeof(T)); - if (ret != EOK) { - MS_LOG(ERROR) << "memcpy_s failed: " << ret; - return; - } - - auto abstract_base = param_node->abstract(); - MS_ASSERT(abstract_base != nullptr); - if (utils::isa<abstract::AbstractTensorPtr>(abstract_base)) { - auto abstract_tensor = utils::cast<abstract::AbstractTensorPtr>(abstract_base); - utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[0] = filter_c; - utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[1] = filter_k; - utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[2] = filter_h; - utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[3] = filter_w; - weight->set_tensor_shape( - {static_cast<int>(filter_c), static_cast<int>(filter_k), static_cast<int>(filter_h), static_cast<int>(filter_w)}); - } - return; -} -void Conv2D::PopulaterConv2DMultiGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group, - const std::vector<AnfNodePtr> &inputs) { - auto attr = std::make_unique<schema::DepthwiseConv2DT>(); - if (attr.get() == nullptr) { - MS_LOG(ERROR) << "Memory allocation failed"; - return; - } - auto format = GetValue<std::string>(prim.GetAttr("data_format")); - if (format == "NCHW") { - attr->format = schema::Format::Format_NCHW; - } else if (format == "NHWC") { - attr->format = schema::Format::Format_NHWC; - } else { - attr->format = schema::Format::Format_NUM_OF_FORMAT; - } - auto pad_list = CastToInt(prim.GetAttr("pad_list")); - attr->padUp = pad_list.at(0); - attr->padDown = pad_list.at(1); - attr->padLeft = pad_list.at(2); - attr->padRight = pad_list.at(3); - - auto dilation = CastToInt(prim.GetAttr("dilation")); - if (train_flag()) { - attr->dilateH = dilation.at(2); - attr->dilateW = dilation.at(3); - } else { - attr->dilateH = dilation.at(0); - attr->dilateW = dilation.at(1); - } - auto kernel_size = CastToInt(prim.GetAttr("kernel_size")); - attr->kernelH = kernel_size.at(0); - attr->kernelW = (kernel_size.size() > 1) ? kernel_size.at(1) : kernel_size.at(0); - - auto stride = CastToInt(prim.GetAttr("stride")); - attr->strideH = stride.at(2); - attr->strideW = stride.at(3); - - auto pad_mode = GetValue<std::string>(prim.GetAttr("pad_mode")); - if (pad_mode == "valid") { - attr->padMode = schema::PadMode_VALID; - } else if (pad_mode == "same") { - attr->padMode = schema::PadMode_SAME_UPPER; - } else { - attr->padMode = schema::PadMode_NOTSET; - } - - if (prim.GetAttr("activation_name") != nullptr) { - std::string activate_name = GetValue<std::string>(prim.GetAttr("activation_name")); - attr->activationType = kActivationTypeMap[activate_name]; - } else { - attr->activationType = schema::ActivationType_NO_ACTIVATION; - } - - int channel_mutiplier = 1; - if (prim.GetAttr("channel_mutiplier") != nullptr) { - channel_mutiplier = CastToInt(prim.GetAttr("channel_multiplier")).front(); - } - attr->channelMultiplier = channel_mutiplier; - - MS_ASSERT(inputs.size() == kAnfPopulaterInputNumTwo); - auto input_node = inputs.at(kAnfPopulaterInputNumOne); - MS_ASSERT(input_node != nullptr); - if (input_node->isa<Parameter>()) { - auto param_node = input_node->cast<ParameterPtr>(); - ConvertConvWeight<float>(param_node); - auto abstractBase = param_node->abstract(); - MS_ASSERT(abstractBase != nullptr); - if (utils::isa<abstract::AbstractTensorPtr>(abstractBase)) { - auto abstractTensor = utils::cast<abstract::AbstractTensorPtr>(abstractBase); - MS_ASSERT(abstractTensor != nullptr); - if (utils::isa<abstract::ShapePtr>(abstractTensor->BuildShape())) { - auto dims = utils::cast<abstract::ShapePtr>(abstractTensor->BuildShape())->shape(); - attr->channelIn = dims.at(kAnfPopulaterInputNumOne); - } - } - } else if (input_node->isa<CNode>()) { - // The weight of convolution is the output from the other operators which could be folded by const folding pass. - attr->channelIn = -1; - } - - primitive->value.type = schema::PrimitiveType_DepthwiseConv2D; - primitive->value.value = attr.release(); -} - -void Conv2D::PopulaterConv2DSingleGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group) { - auto attr = std::make_unique<schema::Conv2DT>(); - if (attr.get() == nullptr) { - MS_LOG(ERROR) << "Memory allocation failed"; - return; - } - attr->group = group; - auto format = GetValue<std::string>(prim.GetAttr("data_format")); - if (format == "NCHW") { - attr->format = schema::Format::Format_NCHW; - } else if (format == "NHWC") { - attr->format = schema::Format::Format_NHWC; - } else { - attr->format = schema::Format::Format_NUM_OF_FORMAT; - } - auto pad_list = CastToInt(prim.GetAttr("pad_list")); - attr->padUp = pad_list.at(0); - attr->padDown = pad_list.at(1); - attr->padLeft = pad_list.at(2); - attr->padRight = pad_list.at(3); - - auto dilation = CastToInt(prim.GetAttr("dilation")); - attr->dilateH = dilation.at(2); - attr->dilateW = dilation.at(3); - - auto kernel_size = CastToInt(prim.GetAttr("kernel_size")); - attr->kernelH = kernel_size.at(0); - attr->kernelW = (kernel_size.size() > 1) ? kernel_size.at(1) : kernel_size.at(0); - - auto stride = CastToInt(prim.GetAttr("stride")); - attr->strideH = stride.at(2); - attr->strideW = stride.at(3); - - attr->channelOut = CastToInt(prim.GetAttr("out_channel")).front(); - - auto pad_mode = GetValue<std::string>(prim.GetAttr("pad_mode")); - if (pad_mode == "valid") { - attr->padMode = schema::PadMode_VALID; - } else if (pad_mode == "same") { - attr->padMode = schema::PadMode_SAME_UPPER; - } else { - attr->padMode = schema::PadMode_NOTSET; - } - - if (prim.GetAttr("activation_name") != nullptr) { - std::string activate_name = GetValue<std::string>(prim.GetAttr("activation_name")); - attr->activationType = kActivationTypeMap[activate_name]; - } else { - attr->activationType = schema::ActivationType_NO_ACTIVATION; - } - - primitive->value.type = schema::PrimitiveType_Conv2D; - primitive->value.value = attr.release(); -} - -int Conv2D::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Conv2D; - } - if (this->primitive_->value.type != schema::PrimitiveType_Conv2D) { - MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; - return RET_ERROR; - } - auto groupAttr = prim.GetAttr("group"); - if (groupAttr == nullptr) { - MS_LOG(ERROR) << "conv2d op has no group attr,please check pb model"; - return RET_NULL_PTR; - } - int group = CastToInt(groupAttr).front(); - if (group > 1) { - PopulaterConv2DMultiGroup(prim, this->primitive_, group, inputs); - } else { - PopulaterConv2DSingleGroup(prim, this->primitive_, group); - } - - PopulaterQuantParam(prim, inputs); - return RET_OK; -} - -#else -int Conv2D::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Conv2D(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Conv2D return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateConv2D( - *fbb, attr->format(), attr->group(), attr->channelIn(), attr->channelOut(), attr->kernelW(), attr->kernelH(), - attr->strideW(), attr->strideH(), attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(), - attr->padRight(), attr->dilateW(), attr->dilateH(), attr->hasBias(), attr->activationType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Conv2D, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int Conv2D::GetFormat() const { return this->primitive_->value_as_Conv2D()->format(); } -int Conv2D::GetGroup() const { return this->primitive_->value_as_Conv2D()->group(); } -int Conv2D::GetChannelIn() const { return this->primitive_->value_as_Conv2D()->channelIn(); } -int Conv2D::GetChannelOut() const { return this->primitive_->value_as_Conv2D()->channelOut(); } -int Conv2D::GetKernelW() const { return this->primitive_->value_as_Conv2D()->kernelW(); } -int Conv2D::GetKernelH() const { return this->primitive_->value_as_Conv2D()->kernelH(); } -int Conv2D::GetStrideW() const { return this->primitive_->value_as_Conv2D()->strideW(); } -int Conv2D::GetStrideH() const { return this->primitive_->value_as_Conv2D()->strideH(); } -int Conv2D::GetPadMode() const { return this->primitive_->value_as_Conv2D()->padMode(); } -int Conv2D::GetPadUp() const { return this->primitive_->value_as_Conv2D()->padUp(); } -int Conv2D::GetPadDown() const { return this->primitive_->value_as_Conv2D()->padDown(); } -int Conv2D::GetPadLeft() const { return this->primitive_->value_as_Conv2D()->padLeft(); } -int Conv2D::GetPadRight() const { return this->primitive_->value_as_Conv2D()->padRight(); } -int Conv2D::GetDilateW() const { return this->primitive_->value_as_Conv2D()->dilateW(); } -int Conv2D::GetDilateH() const { return this->primitive_->value_as_Conv2D()->dilateH(); } -int Conv2D::GetActivationType() const { return this->primitive_->value_as_Conv2D()->activationType(); } - -PrimitiveC *Conv2DCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Conv2D>(primitive); } -Registry Conv2DRegistry(schema::PrimitiveType_Conv2D, Conv2DCreator); -#endif - -void Conv2D::ConvInferShape(int input_h, int input_w, int *output_h, int *output_w) { - MS_ASSERT(this->primitive_ != nullptr); - int kernel_w = GetKernelW(); - int kernel_h = GetKernelH(); - int stride_w = GetStrideW(); - int stride_h = GetStrideH(); - int dilate_w = GetDilateW(); - int dilate_h = GetDilateH(); - - if (GetPadMode() == schema::PadMode_SAME_UPPER) { - *output_w = std::ceil(static_cast<float>(input_w) / static_cast<float>(stride_w)); - *output_h = std::ceil(static_cast<float>(input_h) / static_cast<float>(stride_h)); - auto pad_h_all = ((*output_h - 1) * stride_h + (kernel_h - 1) * dilate_h + 1 - input_h); - auto pad_w_all = ((*output_w - 1) * stride_w + (kernel_w - 1) * dilate_w + 1 - input_w); - if (pad_h_all < 0) { - pad_u_ = pad_d_ = 0; - } else { - pad_u_ = pad_h_all / 2; - pad_d_ = pad_h_all - pad_u_; - } - if (pad_w_all < 0) { - pad_l_ = pad_r_ = 0; - } else { - pad_l_ = pad_w_all / 2; - pad_r_ = pad_w_all - pad_l_; - } - } else { - *output_w = std::ceil((static_cast<float>(input_w) + pad_l_ + pad_r_ - - (static_cast<float>(kernel_w) - 1) * static_cast<float>(dilate_w)) / - static_cast<float>(stride_w)); - *output_h = std::ceil((static_cast<float>(input_h) + pad_u_ + pad_d_ - - (static_cast<float>(kernel_h) - 1) * static_cast<float>(dilate_h)) / - static_cast<float>(stride_h)); - } -} - -int Conv2D::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - if (inputs_.size() != 2 && inputs_.size() != 3) { - MS_LOG(ERROR) << "Conv2d should has two or three inputs"; - return RET_ERROR; - } - if (outputs_.size() != 1) { - MS_LOG(ERROR) << "Conv2d should has one outputs"; - return RET_ERROR; - } - auto *input_tensor = inputs_.front(); - auto *weight_tensor = inputs_.at(1); - auto *out_tensor = outputs_.front(); - MS_ASSERT(input_tensor != nullptr); - MS_ASSERT(out_tensor != nullptr); - - out_tensor->set_format(input_tensor->format()); - out_tensor->set_data_type(input_tensor->data_type()); - pad_l_ = GetPadLeft(); - pad_u_ = GetPadUp(); - pad_d_ = GetPadDown(); - pad_r_ = GetPadRight(); - - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto in_shape = input_tensor->shape(); - if (in_shape.size() == 0) { - return RET_INFER_INVALID; - } - int input_h = in_shape.at(1); - int input_w = in_shape.at(2); - int output_w = 0, output_h = 0; - - this->ConvInferShape(input_h, input_w, &output_h, &output_w); - - std::vector<int> out_shape{input_tensor->shape()}; - out_shape.at(1) = output_h >= 0 ? output_h : 1; - out_shape.at(2) = output_w >= 0 ? output_w : 1; - out_shape.at(3) = weight_tensor->shape()[0]; - out_tensor->set_shape(out_shape); - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/conv2d.h b/mindspore/lite/src/ops/conv2d.h deleted file mode 100644 index c40e3ac61b..0000000000 --- a/mindspore/lite/src/ops/conv2d.h +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_CONV2_D_H_ -#define LITE_MINDSPORE_LITE_C_OPS_CONV2_D_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Conv2D : public PrimitiveC { - public: - Conv2D() = default; - ~Conv2D() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Conv2D, PrimitiveC); - explicit Conv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - virtual void SetFormat(int format); - virtual void SetGroup(int group); - virtual void SetChannelIn(int channel_in); - virtual void SetChannelOut(int channel_out); - virtual void SetKernelW(int kernel_w); - virtual void SetKernelH(int kernel_h); - virtual void SetStrideW(int stride_w); - virtual void SetStrideH(int stride_h); - virtual void SetPadMode(int pad_mode); - virtual void SetPadUp(int pad_up); - virtual void SetPadDown(int pad_down); - virtual void SetPadLeft(int pad_left); - virtual void SetPadRight(int pad_right); - virtual void SetDilateW(int dilate_w); - virtual void SetDilateH(int dilate_h); - virtual void SetActivationType(int activation_type); - - private: - void PopulaterConv2DMultiGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group, - const std::vector<AnfNodePtr> &inputs); - void PopulaterConv2DSingleGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - - public: - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int PadUp() const; - int PadDown() const; - int PadLeft() const; - int PadRight() const; - - virtual int GetFormat() const; - virtual int GetGroup() const; - virtual int GetChannelIn() const; - virtual int GetChannelOut() const; - virtual int GetKernelW() const; - virtual int GetKernelH() const; - virtual int GetStrideW() const; - virtual int GetStrideH() const; - virtual int GetPadMode() const; - virtual int GetPadUp() const; - virtual int GetPadDown() const; - virtual int GetPadLeft() const; - virtual int GetPadRight() const; - virtual int GetDilateW() const; - virtual int GetDilateH() const; - virtual int GetActivationType() const; - - protected: - void ConvInferShape(int input_h, int input_w, int *output_h, int *output_w); - - protected: - int pad_u_ = 0; - int pad_d_ = 0; - int pad_l_ = 0; - int pad_r_ = 0; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_CONV2_D_H_ diff --git a/mindspore/lite/src/ops/conv2d_grad_filter.cc b/mindspore/lite/src/ops/conv2d_grad_filter.cc deleted file mode 100644 index 3963d962d4..0000000000 --- a/mindspore/lite/src/ops/conv2d_grad_filter.cc +++ /dev/null @@ -1,244 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/conv2d_grad_filter.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Conv2DGradFilter::GetFormat() const { return this->primitive_->value.AsConv2DGradFilter()->format; } -int Conv2DGradFilter::GetGroup() const { return this->primitive_->value.AsConv2DGradFilter()->group; } -int Conv2DGradFilter::GetChannelIn() const { return this->primitive_->value.AsConv2DGradFilter()->channelIn; } -int Conv2DGradFilter::GetChannelOut() const { return this->primitive_->value.AsConv2DGradFilter()->channelOut; } -int Conv2DGradFilter::GetKernelW() const { return this->primitive_->value.AsConv2DGradFilter()->kernelW; } -int Conv2DGradFilter::GetKernelH() const { return this->primitive_->value.AsConv2DGradFilter()->kernelH; } -int Conv2DGradFilter::GetStrideW() const { return this->primitive_->value.AsConv2DGradFilter()->strideW; } -int Conv2DGradFilter::GetStrideH() const { return this->primitive_->value.AsConv2DGradFilter()->strideH; } -int Conv2DGradFilter::GetPadMode() const { return this->primitive_->value.AsConv2DGradFilter()->padMode; } -int Conv2DGradFilter::GetPadUp() const { return this->primitive_->value.AsConv2DGradFilter()->padUp; } -int Conv2DGradFilter::GetPadDown() const { return this->primitive_->value.AsConv2DGradFilter()->padDown; } -int Conv2DGradFilter::GetPadLeft() const { return this->primitive_->value.AsConv2DGradFilter()->padLeft; } -int Conv2DGradFilter::GetPadRight() const { return this->primitive_->value.AsConv2DGradFilter()->padRight; } -int Conv2DGradFilter::GetDilateW() const { return this->primitive_->value.AsConv2DGradFilter()->dilateW; } -int Conv2DGradFilter::GetDilateH() const { return this->primitive_->value.AsConv2DGradFilter()->dilateH; } - -int Conv2DGradFilter::GetActivationType() const { return this->primitive_->value.AsConv2DGradFilter()->activationType; } - -void Conv2DGradFilter::SetFormat(int format) { - this->primitive_->value.AsConv2DGradFilter()->format = (schema::Format)format; -} -void Conv2DGradFilter::SetGroup(int group) { this->primitive_->value.AsConv2DGradFilter()->group = group; } -void Conv2DGradFilter::SetChannelIn(int channel_in) { - this->primitive_->value.AsConv2DGradFilter()->channelIn = channel_in; -} -void Conv2DGradFilter::SetChannelOut(int channel_out) { - this->primitive_->value.AsConv2DGradFilter()->channelOut = channel_out; -} -void Conv2DGradFilter::SetKernelW(int kernel_w) { this->primitive_->value.AsConv2DGradFilter()->kernelW = kernel_w; } -void Conv2DGradFilter::SetKernelH(int kernel_h) { this->primitive_->value.AsConv2DGradFilter()->kernelH = kernel_h; } -void Conv2DGradFilter::SetStrideW(int stride_w) { this->primitive_->value.AsConv2DGradFilter()->strideW = stride_w; } -void Conv2DGradFilter::SetStrideH(int stride_h) { this->primitive_->value.AsConv2DGradFilter()->strideH = stride_h; } -void Conv2DGradFilter::SetPadMode(int pad_mode) { - this->primitive_->value.AsConv2DGradFilter()->padMode = (schema::PadMode)pad_mode; -} -void Conv2DGradFilter::SetPadUp(int pad_up) { this->primitive_->value.AsConv2DGradFilter()->padUp = pad_up; } -void Conv2DGradFilter::SetPadDown(int pad_down) { this->primitive_->value.AsConv2DGradFilter()->padDown = pad_down; } -void Conv2DGradFilter::SetPadLeft(int pad_left) { this->primitive_->value.AsConv2DGradFilter()->padLeft = pad_left; } -void Conv2DGradFilter::SetPadRight(int pad_right) { - this->primitive_->value.AsConv2DGradFilter()->padRight = pad_right; -} -void Conv2DGradFilter::SetDilateW(int dilate_w) { this->primitive_->value.AsConv2DGradFilter()->dilateW = dilate_w; } -void Conv2DGradFilter::SetDilateH(int dilate_h) { this->primitive_->value.AsConv2DGradFilter()->dilateH = dilate_h; } -std::vector<int> Conv2DGradFilter::GetFilterShape() const { - return this->primitive_->value.AsConv2DGradFilter()->filter_shape; -} -void Conv2DGradFilter::SetActivationType(int activation_type) { - this->primitive_->value.AsConv2DGradFilter()->activationType = (schema::ActivationType)activation_type; -} - -int Conv2DGradFilter::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Conv2DGradFilter; - } - if (this->primitive_->value.type != schema::PrimitiveType_Conv2DGradFilter) { - MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; - return RET_ERROR; - } - - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::Conv2DGradFilterT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - attr->group = CastToInt(prim.GetAttr("group")).front(); - auto format = GetValue<std::string>(prim.GetAttr("data_format")); - if (format == "NCHW") { - attr->format = schema::Format_NCHW; - } else if (format == "NHWC") { - attr->format = schema::Format_NHWC; - } else { - attr->format = schema::Format_NUM_OF_FORMAT; - } - auto pad_list = CastToInt(prim.GetAttr("pad_list")); - attr->padUp = pad_list.at(0); - attr->padDown = pad_list.at(1); - attr->padLeft = pad_list.at(2); - attr->padRight = pad_list.at(3); - - auto dilation = CastToInt(prim.GetAttr("dilation")); - attr->dilateH = dilation.at(2); - attr->dilateW = dilation.at(3); - - auto kernel_size = CastToInt(prim.GetAttr("kernel_size")); - attr->kernelH = kernel_size.at(0); - attr->kernelW = (kernel_size.size() > 1) ? kernel_size.at(1) : kernel_size.at(0); - - auto stride = CastToInt(prim.GetAttr("stride")); - attr->strideH = stride.at(0); - attr->strideW = stride.at(1); - - attr->channelOut = CastToInt(prim.GetAttr("out_channel")).front(); - auto pad_mode = GetValue<std::string>(prim.GetAttr("pad_mode")); - if (pad_mode == "valid") { - attr->padMode = schema::PadMode_VALID; - } else if (pad_mode == "same") { - attr->padMode = schema::PadMode_SAME_UPPER; - } else { - attr->padMode = schema::PadMode_NOTSET; - } - - if (prim.GetAttr("activation_name") != nullptr) { - std::string activate_name = GetValue<std::string>(prim.GetAttr("activation_name")); - attr->activationType = kActivationTypeMap[activate_name]; - } else { - attr->activationType = schema::ActivationType_NO_ACTIVATION; - } - - if (inputs.size() >= kAnfPopulaterInputNumThree) { - auto filter_shape = inputs[kAnfPopulaterInputNumTwo]; - MS_ASSERT(filter_shape != nullptr); - if (filter_shape->isa<ValueNode>()) { - auto valueNode = filter_shape->cast<ValueNodePtr>(); - MS_ASSERT(valueNode != nullptr); - auto value = valueNode->value(); - MS_ASSERT(value != nullptr); - if (value->isa<ValueTuple>()) { - auto valTuplPtr = dyn_cast<ValueTuple>(value); - MS_ASSERT(valTuplPtr != nullptr); - const int nchw2nhwc[] = {0, 3, 1, 2}; - attr->filter_shape.resize(valTuplPtr->size()); - for (size_t i = 0; i < valTuplPtr->size(); i++) { - auto elem = (*valTuplPtr)[i]; - MS_ASSERT(elem != nullptr); - attr->filter_shape[nchw2nhwc[i]] = CastToInt(elem).front(); - } - } - } - } - - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int Conv2DGradFilter::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Conv2DGradFilter(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Conv2DGradFilter return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> filter_shape; - if (attr->filter_shape() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->filter_shape()->size()); i++) { - filter_shape.push_back(attr->filter_shape()->data()[i]); - } - } - auto val_offset = schema::CreateConv2DGradFilterDirect( - *fbb, attr->format(), attr->group(), attr->channelIn(), attr->channelOut(), attr->kernelW(), attr->kernelH(), - attr->strideW(), attr->strideH(), attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(), - attr->padRight(), attr->dilateW(), attr->dilateH(), attr->hasBias(), &filter_shape, attr->activationType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Conv2DGradFilter, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int Conv2DGradFilter::GetFormat() const { return this->primitive_->value_as_Conv2DGradFilter()->format(); } -int Conv2DGradFilter::GetGroup() const { return this->primitive_->value_as_Conv2DGradFilter()->group(); } -int Conv2DGradFilter::GetChannelIn() const { return this->primitive_->value_as_Conv2DGradFilter()->channelIn(); } -int Conv2DGradFilter::GetChannelOut() const { return this->primitive_->value_as_Conv2DGradFilter()->channelOut(); } -int Conv2DGradFilter::GetKernelW() const { return this->primitive_->value_as_Conv2DGradFilter()->kernelW(); } -int Conv2DGradFilter::GetKernelH() const { return this->primitive_->value_as_Conv2DGradFilter()->kernelH(); } -int Conv2DGradFilter::GetStrideW() const { return this->primitive_->value_as_Conv2DGradFilter()->strideW(); } -int Conv2DGradFilter::GetStrideH() const { return this->primitive_->value_as_Conv2DGradFilter()->strideH(); } -int Conv2DGradFilter::GetPadMode() const { return this->primitive_->value_as_Conv2DGradFilter()->padMode(); } -int Conv2DGradFilter::GetPadUp() const { return this->primitive_->value_as_Conv2DGradFilter()->padUp(); } -int Conv2DGradFilter::GetPadDown() const { return this->primitive_->value_as_Conv2DGradFilter()->padDown(); } -int Conv2DGradFilter::GetPadLeft() const { return this->primitive_->value_as_Conv2DGradFilter()->padLeft(); } -int Conv2DGradFilter::GetPadRight() const { return this->primitive_->value_as_Conv2DGradFilter()->padRight(); } -int Conv2DGradFilter::GetDilateW() const { return this->primitive_->value_as_Conv2DGradFilter()->dilateW(); } -int Conv2DGradFilter::GetDilateH() const { return this->primitive_->value_as_Conv2DGradFilter()->dilateH(); } -std::vector<int> Conv2DGradFilter::GetFilterShape() const { - auto fb_vector = this->primitive_->value_as_Conv2DGradFilter()->filter_shape(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -int Conv2DGradFilter::GetActivationType() const { - return this->primitive_->value_as_Conv2DGradFilter()->activationType(); -} - -PrimitiveC *Conv2DGradFilterCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<Conv2DGradFilter>(primitive); -} -Registry conv2DGradFilterRegistry(schema::PrimitiveType_Conv2DGradFilter, Conv2DGradFilterCreator); -#endif - -int Conv2DGradFilter::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - if (inputs.size() < 2) { - MS_LOG(ERROR) << "Conv2d Grad Filter should be at least two input, but it got " << inputs.size(); - return RET_ERROR; - } - if (outputs.size() != 1) { - MS_LOG(ERROR) << "Conv2d Grad Filter should have one output but it got " << outputs.size(); - return RET_ERROR; - } - - auto *in0 = inputs.at(0); - MS_ASSERT(in0 != nullptr); - - auto *out = outputs.at(0); - MS_ASSERT(out != nullptr); - - out->set_shape(GetFilterShape()); - out->set_data_type(in0->data_type()); - out->set_format(in0->format()); - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/conv2d_grad_filter.h b/mindspore/lite/src/ops/conv2d_grad_filter.h deleted file mode 100644 index bf538c45bf..0000000000 --- a/mindspore/lite/src/ops/conv2d_grad_filter.h +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_CONV2D_GRAD_FILTER_H_ -#define MINDSPORE_LITE_SRC_OPS_CONV2D_GRAD_FILTER_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> -#include <string> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Conv2DGradFilter : public PrimitiveC { - public: - Conv2DGradFilter() = default; - ~Conv2DGradFilter() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Conv2DGradFilter, PrimitiveC); - explicit Conv2DGradFilter(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetFormat(int format); - void SetGroup(int group); - void SetChannelIn(int channel_in); - void SetChannelOut(int channel_out); - void SetKernelW(int kernel_w); - void SetKernelH(int kernel_h); - void SetStrideW(int stride_w); - void SetStrideH(int stride_h); - void SetPadMode(int pad_mode); - void SetPadUp(int pad_up); - void SetPadDown(int pad_down); - void SetPadLeft(int pad_left); - void SetPadRight(int pad_right); - void SetDilateW(int dilate_w); - void SetDilateH(int dilate_h); - void SetActivationType(int activation_type); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetFormat() const; - int GetGroup() const; - int GetChannelIn() const; - int GetChannelOut() const; - int GetKernelW() const; - int GetKernelH() const; - int GetStrideW() const; - int GetStrideH() const; - int GetPadMode() const; - int GetPadUp() const; - int GetPadDown() const; - int GetPadLeft() const; - int GetPadRight() const; - int GetDilateW() const; - int GetDilateH() const; - int GetActivationType() const; - std::vector<int> GetFilterShape() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_CONV2D_GRAD_FILTER_H_ diff --git a/mindspore/lite/src/ops/conv2d_grad_input.cc b/mindspore/lite/src/ops/conv2d_grad_input.cc deleted file mode 100644 index 83c8f88b95..0000000000 --- a/mindspore/lite/src/ops/conv2d_grad_input.cc +++ /dev/null @@ -1,244 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/conv2d_grad_input.h" -#include "src/ops/group_conv2d_grad_input.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Conv2DGradInput::GetFormat() const { return this->primitive_->value.AsConv2DGradInput()->format; } -int Conv2DGradInput::GetGroup() const { return this->primitive_->value.AsConv2DGradInput()->group; } -int Conv2DGradInput::GetChannelIn() const { return this->primitive_->value.AsConv2DGradInput()->channelIn; } -int Conv2DGradInput::GetChannelOut() const { return this->primitive_->value.AsConv2DGradInput()->channelOut; } -int Conv2DGradInput::GetKernelW() const { return this->primitive_->value.AsConv2DGradInput()->kernelW; } -int Conv2DGradInput::GetKernelH() const { return this->primitive_->value.AsConv2DGradInput()->kernelH; } -int Conv2DGradInput::GetStrideW() const { return this->primitive_->value.AsConv2DGradInput()->strideW; } -int Conv2DGradInput::GetStrideH() const { return this->primitive_->value.AsConv2DGradInput()->strideH; } -int Conv2DGradInput::GetPadMode() const { return this->primitive_->value.AsConv2DGradInput()->padMode; } -int Conv2DGradInput::GetPadUp() const { return this->primitive_->value.AsConv2DGradInput()->padUp; } -int Conv2DGradInput::GetPadDown() const { return this->primitive_->value.AsConv2DGradInput()->padDown; } -int Conv2DGradInput::GetPadLeft() const { return this->primitive_->value.AsConv2DGradInput()->padLeft; } -int Conv2DGradInput::GetPadRight() const { return this->primitive_->value.AsConv2DGradInput()->padRight; } -int Conv2DGradInput::GetDilateW() const { return this->primitive_->value.AsConv2DGradInput()->dilateW; } -int Conv2DGradInput::GetDilateH() const { return this->primitive_->value.AsConv2DGradInput()->dilateH; } -std::vector<int> Conv2DGradInput::GetInputShape() const { - return this->primitive_->value.AsConv2DGradInput()->input_shape; -} -int Conv2DGradInput::GetActivationType() const { return this->primitive_->value.AsConv2DGradInput()->activationType; } - -void Conv2DGradInput::SetFormat(int format) { - this->primitive_->value.AsConv2DGradInput()->format = (schema::Format)format; -} -void Conv2DGradInput::SetGroup(int group) { this->primitive_->value.AsConv2DGradInput()->group = group; } -void Conv2DGradInput::SetChannelIn(int channel_in) { - this->primitive_->value.AsConv2DGradInput()->channelIn = channel_in; -} -void Conv2DGradInput::SetChannelOut(int channel_out) { - this->primitive_->value.AsConv2DGradInput()->channelOut = channel_out; -} -void Conv2DGradInput::SetKernelW(int kernel_w) { this->primitive_->value.AsConv2DGradInput()->kernelW = kernel_w; } -void Conv2DGradInput::SetKernelH(int kernel_h) { this->primitive_->value.AsConv2DGradInput()->kernelH = kernel_h; } -void Conv2DGradInput::SetStrideW(int stride_w) { this->primitive_->value.AsConv2DGradInput()->strideW = stride_w; } -void Conv2DGradInput::SetStrideH(int stride_h) { this->primitive_->value.AsConv2DGradInput()->strideH = stride_h; } -void Conv2DGradInput::SetPadMode(int pad_mode) { - this->primitive_->value.AsConv2DGradInput()->padMode = (schema::PadMode)pad_mode; -} -void Conv2DGradInput::SetPadUp(int pad_up) { this->primitive_->value.AsConv2DGradInput()->padUp = pad_up; } -void Conv2DGradInput::SetPadDown(int pad_down) { this->primitive_->value.AsConv2DGradInput()->padDown = pad_down; } -void Conv2DGradInput::SetPadLeft(int pad_left) { this->primitive_->value.AsConv2DGradInput()->padLeft = pad_left; } -void Conv2DGradInput::SetPadRight(int pad_right) { this->primitive_->value.AsConv2DGradInput()->padRight = pad_right; } -void Conv2DGradInput::SetDilateW(int dilate_w) { this->primitive_->value.AsConv2DGradInput()->dilateW = dilate_w; } -void Conv2DGradInput::SetDilateH(int dilate_h) { this->primitive_->value.AsConv2DGradInput()->dilateH = dilate_h; } -void Conv2DGradInput::SetActivationType(int activation_type) { - this->primitive_->value.AsConv2DGradInput()->activationType = (schema::ActivationType)activation_type; -} - -int Conv2DGradInput::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Conv2DGradInput; - } - if (this->primitive_->value.type != schema::PrimitiveType_Conv2DGradInput) { - MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; - return RET_ERROR; - } - - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::Conv2DGradInputT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - attr->group = CastToInt(prim.GetAttr("group")).front(); - if (attr->group > 1) { - this->primitive_->value.type = schema::PrimitiveType_GroupConv2DGradInput; - } - auto format = GetValue<std::string>(prim.GetAttr("data_format")); - if (format == "NCHW") { - attr->format = schema::Format_NCHW; - } else if (format == "NHWC") { - attr->format = schema::Format_NHWC; - } else { - attr->format = schema::Format_NUM_OF_FORMAT; - } - auto pad_list = CastToInt(prim.GetAttr("pad_list")); - attr->padUp = pad_list.at(0); - attr->padDown = pad_list.at(1); - attr->padLeft = pad_list.at(2); - attr->padRight = pad_list.at(3); - - auto dilation = CastToInt(prim.GetAttr("dilation")); - attr->dilateH = dilation.at(2); - attr->dilateW = dilation.at(3); - - auto kernel_size = CastToInt(prim.GetAttr("kernel_size")); - attr->kernelH = kernel_size.at(0); - attr->kernelW = (kernel_size.size() > 1) ? kernel_size.at(1) : kernel_size.at(0); - - auto stride = CastToInt(prim.GetAttr("stride")); - attr->strideH = stride.at(0); - attr->strideW = stride.at(1); - - attr->channelOut = CastToInt(prim.GetAttr("out_channel")).front(); - - auto pad_mode = GetValue<std::string>(prim.GetAttr("pad_mode")); - if (pad_mode == "valid") { - attr->padMode = schema::PadMode_VALID; - } else if (pad_mode == "same") { - attr->padMode = schema::PadMode_SAME_UPPER; - } else { - attr->padMode = schema::PadMode_NOTSET; - } - - if (prim.GetAttr("activation_name") != nullptr) { - std::string activate_name = GetValue<std::string>(prim.GetAttr("activation_name")); - attr->activationType = kActivationTypeMap[activate_name]; - } else { - attr->activationType = schema::ActivationType_NO_ACTIVATION; - } - - if (inputs.size() >= kAnfPopulaterInputNumThree) { - auto input_shape = inputs[kAnfPopulaterInputNumTwo]; - MS_ASSERT(input_shape != nullptr); - if (input_shape->isa<ValueNode>()) { - auto valueNode = input_shape->cast<ValueNodePtr>(); - MS_ASSERT(valueNode != nullptr); - auto value = valueNode->value(); - MS_ASSERT(value != nullptr); - if (value->isa<ValueTuple>()) { - auto valTuplPtr = dyn_cast<ValueTuple>(value); - MS_ASSERT(valTuplPtr != nullptr); - const int nchw2nhwc[] = {0, 3, 1, 2}; - attr->input_shape.resize(valTuplPtr->size()); - for (size_t i = 0; i < valTuplPtr->size(); i++) { - auto elem = (*valTuplPtr)[i]; - MS_ASSERT(elem != nullptr); - attr->input_shape[nchw2nhwc[i]] = CastToInt(elem).front(); - } - } - } - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int Conv2DGradInput::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Conv2DGradInput(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Conv2DGradInput return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> input_shape; - if (attr->input_shape() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->input_shape()->size()); i++) { - input_shape.push_back(attr->input_shape()->data()[i]); - } - } - auto val_offset = schema::CreateConv2DGradInputDirect( - *fbb, attr->format(), attr->group(), attr->channelIn(), attr->channelOut(), attr->kernelW(), attr->kernelH(), - attr->strideW(), attr->strideH(), attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(), - attr->padRight(), attr->dilateW(), attr->dilateH(), attr->hasBias(), &input_shape, attr->activationType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Conv2DGradInput, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int Conv2DGradInput::GetFormat() const { return this->primitive_->value_as_Conv2DGradInput()->format(); } -int Conv2DGradInput::GetGroup() const { return this->primitive_->value_as_Conv2DGradInput()->group(); } -int Conv2DGradInput::GetChannelIn() const { return this->primitive_->value_as_Conv2DGradInput()->channelIn(); } -int Conv2DGradInput::GetChannelOut() const { return this->primitive_->value_as_Conv2DGradInput()->channelOut(); } -int Conv2DGradInput::GetKernelW() const { return this->primitive_->value_as_Conv2DGradInput()->kernelW(); } -int Conv2DGradInput::GetKernelH() const { return this->primitive_->value_as_Conv2DGradInput()->kernelH(); } -int Conv2DGradInput::GetStrideW() const { return this->primitive_->value_as_Conv2DGradInput()->strideW(); } -int Conv2DGradInput::GetStrideH() const { return this->primitive_->value_as_Conv2DGradInput()->strideH(); } -int Conv2DGradInput::GetPadMode() const { return this->primitive_->value_as_Conv2DGradInput()->padMode(); } -int Conv2DGradInput::GetPadUp() const { return this->primitive_->value_as_Conv2DGradInput()->padUp(); } -int Conv2DGradInput::GetPadDown() const { return this->primitive_->value_as_Conv2DGradInput()->padDown(); } -int Conv2DGradInput::GetPadLeft() const { return this->primitive_->value_as_Conv2DGradInput()->padLeft(); } -int Conv2DGradInput::GetPadRight() const { return this->primitive_->value_as_Conv2DGradInput()->padRight(); } -int Conv2DGradInput::GetDilateW() const { return this->primitive_->value_as_Conv2DGradInput()->dilateW(); } -int Conv2DGradInput::GetDilateH() const { return this->primitive_->value_as_Conv2DGradInput()->dilateH(); } -std::vector<int> Conv2DGradInput::GetInputShape() const { - auto fb_vector = this->primitive_->value_as_Conv2DGradInput()->input_shape(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -int Conv2DGradInput::GetActivationType() const { - return this->primitive_->value_as_Conv2DGradInput()->activationType(); -} - -PrimitiveC *Conv2DGradInputCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<Conv2DGradInput>(primitive); -} -Registry Conv2DGradInputRegistry(schema::PrimitiveType_Conv2DGradInput, Conv2DGradInputCreator); -#endif - -int Conv2DGradInput::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - if (inputs.size() < 2) { - MS_LOG(ERROR) << "Conv2d Grad Input should be at least two input"; - return RET_ERROR; - } - if (outputs.size() != 1) { - MS_LOG(ERROR) << "Conv2d Grad output should have one output"; - return RET_ERROR; - } - - auto *in0 = inputs.at(0); - MS_ASSERT(in0 != nullptr); - - auto *out = outputs.at(0); - MS_ASSERT(out != nullptr); - out->set_shape(GetInputShape()); - out->set_data_type(in0->data_type()); - out->set_format(in0->format()); - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/conv2d_grad_input.h b/mindspore/lite/src/ops/conv2d_grad_input.h deleted file mode 100644 index b12c96a51c..0000000000 --- a/mindspore/lite/src/ops/conv2d_grad_input.h +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_CONV2D_GRAD_INPUT_H_ -#define MINDSPORE_LITE_SRC_OPS_CONV2D_GRAD_INPUT_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> -#include <string> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Conv2DGradInput : public PrimitiveC { - public: - Conv2DGradInput() = default; - ~Conv2DGradInput() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Conv2DGradInput, PrimitiveC); - explicit Conv2DGradInput(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetFormat(int format); - void SetGroup(int group); - void SetChannelIn(int channel_in); - void SetChannelOut(int channel_out); - void SetKernelW(int kernel_w); - void SetKernelH(int kernel_h); - void SetStrideW(int stride_w); - void SetStrideH(int stride_h); - void SetPadMode(int pad_mode); - void SetPadUp(int pad_up); - void SetPadDown(int pad_down); - void SetPadLeft(int pad_left); - void SetPadRight(int pad_right); - void SetDilateW(int dilate_w); - void SetDilateH(int dilate_h); - void SetActivationType(int activation_type); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetFormat() const; - int GetGroup() const; - int GetChannelIn() const; - int GetChannelOut() const; - int GetKernelW() const; - int GetKernelH() const; - int GetStrideW() const; - int GetStrideH() const; - int GetPadMode() const; - int GetPadUp() const; - int GetPadDown() const; - int GetPadLeft() const; - int GetPadRight() const; - int GetDilateW() const; - int GetDilateH() const; - int GetActivationType() const; - std::vector<int> GetInputShape() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_CONV2D_GRAD_INPUT_H_ diff --git a/mindspore/lite/src/ops/cos.cc b/mindspore/lite/src/ops/cos.cc deleted file mode 100644 index 1a02632937..0000000000 --- a/mindspore/lite/src/ops/cos.cc +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/cos.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Cos::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Cos; - } - if (this->primitive_->value.type != schema::PrimitiveType_Cos) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::CosT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else -int Cos::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateCos(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Cos, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *CosCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Cos>(primitive); } -Registry CosRegistry(schema::PrimitiveType_Cos, CosCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/cos.h b/mindspore/lite/src/ops/cos.h deleted file mode 100644 index 70269945d6..0000000000 --- a/mindspore/lite/src/ops/cos.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_COS_H_ -#define MINDSPORE_LITE_SRC_OPS_COS_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -class Cos : public ArithmeticSelf { - public: - Cos() = default; - ~Cos() = default; -#ifdef PRIMITIVE_WRITEABLE - explicit Cos(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_COS_H_ diff --git a/mindspore/lite/src/ops/crop.cc b/mindspore/lite/src/ops/crop.cc deleted file mode 100644 index c568f8832b..0000000000 --- a/mindspore/lite/src/ops/crop.cc +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/crop.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int64_t Crop::GetAxis() const { return this->primitive_->value.AsCrop()->axis; } -std::vector<int64_t> Crop::GetOffsets() const { return this->primitive_->value.AsCrop()->offsets; } - -void Crop::SetAxis(int64_t axis) { this->primitive_->value.AsCrop()->axis = axis; } -void Crop::SetOffsets(const std::vector<int64_t> &offsets) { this->primitive_->value.AsCrop()->offsets = offsets; } - -#else -int Crop::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Crop(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Crop return nullptr"; - return RET_ERROR; - } - std::vector<int64_t> offsets; - if (attr->offsets() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->offsets()->size()); i++) { - offsets.push_back(attr->offsets()->data()[i]); - } - } - auto val_offset = schema::CreateCropDirect(*fbb, attr->axis(), &offsets); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Crop, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int64_t Crop::GetAxis() const { return this->primitive_->value_as_Crop()->axis(); } -std::vector<int64_t> Crop::GetOffsets() const { - auto fb_vector = this->primitive_->value_as_Crop()->offsets(); - return std::vector<int64_t>(fb_vector->begin(), fb_vector->end()); -} - -PrimitiveC *CropCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Crop>(primitive); } -Registry CropRegistry(schema::PrimitiveType_Crop, CropCreator); -#endif - -namespace { -constexpr int kCropOutputNum = 1; -constexpr int kCropInputNum = 2; -} // namespace -int Crop::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - if (outputs.size() != kCropOutputNum || inputs.size() != kCropInputNum) { - MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); - return RET_PARAM_INVALID; - } - outputs[0]->set_format(inputs[0]->format()); - outputs[0]->set_data_type(inputs[0]->data_type()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - outputs[0]->set_shape(inputs[1]->shape()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/crop.h b/mindspore/lite/src/ops/crop.h deleted file mode 100644 index 002843d677..0000000000 --- a/mindspore/lite/src/ops/crop.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_CROP_H_ -#define LITE_MINDSPORE_LITE_C_OPS_CROP_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Crop : public PrimitiveC { - public: - Crop() = default; - ~Crop() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Crop, PrimitiveC); - explicit Crop(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAxis(int64_t axis); - void SetOffsets(const std::vector<int64_t> &offsets); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int64_t GetAxis() const; - std::vector<int64_t> GetOffsets() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_CROP_H_ diff --git a/mindspore/lite/src/ops/crop_and_resize.cc b/mindspore/lite/src/ops/crop_and_resize.cc deleted file mode 100644 index f9af64baef..0000000000 --- a/mindspore/lite/src/ops/crop_and_resize.cc +++ /dev/null @@ -1,116 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/crop_and_resize.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int CropAndResize::GetMethod() const { return this->primitive_->value.AsCropAndResize()->method; } -float CropAndResize::GetExtrapolationValue() const { - return this->primitive_->value.AsCropAndResize()->extrapolation_value; -} - -void CropAndResize::SetMethod(int method) { - this->primitive_->value.AsCropAndResize()->method = (schema::ResizeMethod)method; -} -void CropAndResize::SetExtrapolationValue(float value) { - this->primitive_->value.AsCropAndResize()->extrapolation_value = value; -} -#else - -int CropAndResize::GetMethod() const { return this->primitive_->value_as_CropAndResize()->method(); } -float CropAndResize::GetExtrapolationValue() const { - return this->primitive_->value_as_CropAndResize()->extrapolation_value(); -} -int CropAndResize::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_CropAndResize(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_CropAndResize return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateCropAndResize(*fbb, attr->method(), attr->extrapolation_value()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_CropAndResize, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *CropAndResizeCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<CropAndResize>(primitive); -} -Registry CropAndResizeRegistry(schema::PrimitiveType_CropAndResize, CropAndResizeCreator); -#endif - -namespace { -constexpr int kInputRank = 4; -} // namespace -int CropAndResize::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - if (inputs_.size() != 4) { - MS_LOG(ERROR) << "Input tensor num should be 4 for crop_an_resize."; - return RET_ERROR; - } - auto input = inputs_.front(); - if (input == nullptr) { - return RET_ERROR; - } - if (!input->shape().empty() && input->shape().size() != kInputRank) { - MS_LOG(ERROR) << "Size of input shape is wrong."; - return RET_ERROR; - } - if (input->format() != schema::Format_NHWC) { - MS_LOG(ERROR) << "Crop_an_resize op only support NHWC format."; - return RET_ERROR; - } - - auto output = outputs_.front(); - if (output == nullptr) { - return RET_NULL_PTR; - } - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - std::vector<int> output_shape; - if (inputs_[1]->data_c() != nullptr) { - auto boxes_tensor = inputs_[1]; - output_shape.push_back(boxes_tensor->shape()[0]); - } else { - output_shape.push_back(input->Batch()); - } - - auto shape_tensor = inputs_[3]; - auto data = reinterpret_cast<int32_t *>(shape_tensor->data_c()); - if (data == nullptr) { - MS_LOG(INFO) << "The data of 4th input tensor(shape tensor) for crop_an_resize op is nullptr."; - return RET_INFER_INVALID; - } - output_shape.push_back(data[0]); - output_shape.push_back(data[1]); - output_shape.push_back(input->Channel()); - output->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/crop_and_resize.h b/mindspore/lite/src/ops/crop_and_resize.h deleted file mode 100644 index 6cbfa98b8b..0000000000 --- a/mindspore/lite/src/ops/crop_and_resize.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_CROP_AND_RESIZE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_CROP_AND_RESIZE_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class CropAndResize : public PrimitiveC { - public: - CropAndResize() = default; - ~CropAndResize() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(CropAndResize, PrimitiveC); - explicit CropAndResize(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetMethod(int method); - void SetExtrapolationValue(float value); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetMethod() const; - float GetExtrapolationValue() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_CROP_AND_RESIZE_H_ diff --git a/mindspore/lite/src/ops/custom_extract_features.cc b/mindspore/lite/src/ops/custom_extract_features.cc deleted file mode 100644 index 5054b25fba..0000000000 --- a/mindspore/lite/src/ops/custom_extract_features.cc +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/custom_extract_features.h" - -#include "src/common/string_util.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int CustomExtractFeatures::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { return RET_OK; } -#else -int CustomExtractFeatures::UnPackToFlatBuilder(const schema::Primitive *primitive, - flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateCustomExtractFeatures(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_CustomExtractFeatures, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *CustomExtractFeaturesCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<CustomExtractFeatures>(primitive); -} -Registry CustomExtractFeaturesRegistry(schema::PrimitiveType_CustomExtractFeatures, CustomExtractFeaturesCreator); -#endif - -int CustomExtractFeatures::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - auto input = inputs_.at(0); - auto output0 = outputs_.at(0); - auto output1 = outputs_.at(1); - MS_ASSERT(input != nullptr); - MS_ASSERT(output0 != nullptr); - MS_ASSERT(output1 != nullptr); - - output0->set_data_type(kNumberTypeInt32); - output0->set_format(input->format()); - output1->set_data_type(kNumberTypeFloat32); - output1->set_format(input->format()); - - if (input->data_c() == nullptr) { - MS_LOG(INFO) << "Do infer shape in runtime."; - return RET_INFER_INVALID; - } - std::vector<int> shape; - int string_num = lite::GetStringCount(input); - shape.push_back(string_num == 0 ? 1 : string_num); - - output0->set_shape(shape); - output1->set_shape(shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/custom_extract_features.h b/mindspore/lite/src/ops/custom_extract_features.h deleted file mode 100644 index c9718a6b45..0000000000 --- a/mindspore/lite/src/ops/custom_extract_features.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef LITE_MINDSPORE_LITE_C_OPS_CUSTOM_EXTRACT_FEATURES_H_ -#define LITE_MINDSPORE_LITE_C_OPS_CUSTOM_EXTRACT_FEATURES_H_ - -#include <vector> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class CustomExtractFeatures : public PrimitiveC { - public: - CustomExtractFeatures() = default; - ~CustomExtractFeatures() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(CustomExtractFeatures, PrimitiveC); - explicit CustomExtractFeatures(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_CUSTOM_EXTRACT_FEATURES_H_ diff --git a/mindspore/lite/src/ops/custom_normalize.cc b/mindspore/lite/src/ops/custom_normalize.cc deleted file mode 100644 index 6ba50c6a59..0000000000 --- a/mindspore/lite/src/ops/custom_normalize.cc +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/custom_normalize.h" - -#include "src/common/string_util.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int CustomNormalize::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { return RET_OK; } -#else -int CustomNormalize::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateCustomNormalize(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_CustomNormalize, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *CustomNormalizeCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<CustomNormalize>(primitive); -} -Registry CustomNormalizeRegistry(schema::PrimitiveType_CustomNormalize, CustomNormalizeCreator); -#endif - -int CustomNormalize::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - auto input = inputs_.at(0); - auto output = outputs_.at(0); - MS_ASSERT(input != nullptr); - MS_ASSERT(output != nullptr); - - output->set_data_type(input->data_type()); - output->set_format(input->format()); - - if (input->data_c() == nullptr) { - MS_LOG(INFO) << "Do infer shape in runtime."; - return RET_INFER_INVALID; - } - std::vector<int> shape; - int string_num = lite::GetStringCount(input); - shape.push_back(string_num == 0 ? 1 : string_num); - - output->set_shape(shape); - return RET_OK; -} - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/custom_normalize.h b/mindspore/lite/src/ops/custom_normalize.h deleted file mode 100644 index 799df336aa..0000000000 --- a/mindspore/lite/src/ops/custom_normalize.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef LITE_MINDSPORE_LITE_C_OPS_CUSTOM_NORMALIZE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_CUSTOM_NORMALIZE_H_ - -#include <vector> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class CustomNormalize : public PrimitiveC { - public: - CustomNormalize() = default; - ~CustomNormalize() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(CustomNormalize, PrimitiveC); - explicit CustomNormalize(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_CUSTOM_NORMALIZE_H_ diff --git a/mindspore/lite/src/ops/custom_predict.cc b/mindspore/lite/src/ops/custom_predict.cc deleted file mode 100644 index 0afbbfa77d..0000000000 --- a/mindspore/lite/src/ops/custom_predict.cc +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/custom_predict.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int CustomPredict::GetOutputNum() const { return this->primitive_->value.AsCustomPredict()->outputNum; } -float CustomPredict::GetWeightThreshold() const { return this->primitive_->value.AsCustomPredict()->weightThreshold; } - -void CustomPredict::SetOutputNum(int output_num) { this->primitive_->value.AsCustomPredict()->outputNum = output_num; } -void CustomPredict::SetWeightThreshold(float weight_threshold) { - this->primitive_->value.AsCustomPredict()->weightThreshold = weight_threshold; -} -int CustomPredict::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { return RET_OK; } -#else -int CustomPredict::GetOutputNum() const { return this->primitive_->value_as_CustomPredict()->outputNum(); } -float CustomPredict::GetWeightThreshold() const { - return this->primitive_->value_as_CustomPredict()->weightThreshold(); -} - -int CustomPredict::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_CustomPredict(); - if (attr == nullptr) { - MS_LOG(ERROR) << "CustomPredict attr is nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateCustomPredict(*fbb, attr->outputNum(), attr->weightThreshold()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_CustomPredict, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *CustomPredictCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<CustomPredict>(primitive); -} -Registry CustomPredictRegistry(schema::PrimitiveType_CustomPredict, CustomPredictCreator); -#endif - -int CustomPredict::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - auto input = inputs_.at(0); - auto output0 = outputs_.at(0); - auto output1 = outputs_.at(1); - MS_ASSERT(input != nullptr); - MS_ASSERT(output0 != nullptr); - MS_ASSERT(output1 != nullptr); - - std::vector<int> shape; - shape.push_back(GetOutputNum()); - - output0->set_shape(shape); - output0->set_data_type(kNumberTypeInt32); - output0->set_format(input->format()); - output1->set_shape(shape); - output1->set_data_type(kNumberTypeFloat32); - output1->set_format(input->format()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/custom_predict.h b/mindspore/lite/src/ops/custom_predict.h deleted file mode 100644 index 404558829d..0000000000 --- a/mindspore/lite/src/ops/custom_predict.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef LITE_MINDSPORE_LITE_C_OPS_CUSTOM_PREDICT_H_ -#define LITE_MINDSPORE_LITE_C_OPS_CUSTOM_PREDICT_H_ - -#include <vector> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class CustomPredict : public PrimitiveC { - public: - CustomPredict() = default; - ~CustomPredict() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(CustomPredict, PrimitiveC); - explicit CustomPredict(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int GetOutputNum() const; - float GetWeightThreshold() const; - void SetOutputNum(int output_num); - void SetWeightThreshold(float weight_threshold); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int GetOutputNum() const; - float GetWeightThreshold() const; - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_CUSTOM_PREDICT_H_ diff --git a/mindspore/lite/src/ops/deconv2d.cc b/mindspore/lite/src/ops/deconv2d.cc deleted file mode 100644 index 0b36395fd6..0000000000 --- a/mindspore/lite/src/ops/deconv2d.cc +++ /dev/null @@ -1,375 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/deconv2d.h" -#include <memory> -#include <string> -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#ifdef PRIMITIVE_WRITEABLE -#include <float.h> -#include "src/param_value_lite.h" -#endif - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int DeConv2D::GetFormat() const { return this->primitive_->value.AsDeConv2D()->format; } -int DeConv2D::GetGroup() const { return this->primitive_->value.AsDeConv2D()->group; } -int DeConv2D::GetChannelIn() const { return this->primitive_->value.AsDeConv2D()->channelIn; } -int DeConv2D::GetChannelOut() const { return this->primitive_->value.AsDeConv2D()->channelOut; } -int DeConv2D::GetKernelW() const { return this->primitive_->value.AsDeConv2D()->kernelW; } -int DeConv2D::GetKernelH() const { return this->primitive_->value.AsDeConv2D()->kernelH; } -int DeConv2D::GetStrideW() const { return this->primitive_->value.AsDeConv2D()->strideW; } -int DeConv2D::GetStrideH() const { return this->primitive_->value.AsDeConv2D()->strideH; } -int DeConv2D::GetPadMode() const { return this->primitive_->value.AsDeConv2D()->padMode; } -int DeConv2D::GetPadUp() const { return this->primitive_->value.AsDeConv2D()->padUp; } -int DeConv2D::GetPadDown() const { return this->primitive_->value.AsDeConv2D()->padDown; } -int DeConv2D::GetPadLeft() const { return this->primitive_->value.AsDeConv2D()->padLeft; } -int DeConv2D::GetPadRight() const { return this->primitive_->value.AsDeConv2D()->padRight; } -int DeConv2D::GetDilateW() const { return this->primitive_->value.AsDeConv2D()->dilateW; } -int DeConv2D::GetDilateH() const { return this->primitive_->value.AsDeConv2D()->dilateH; } -int DeConv2D::GetActivationType() const { return this->primitive_->value.AsDeConv2D()->activationType; } -int DeConv2D::GetOutputPaddingW() const { return this->primitive_->value.AsDeConv2D()->outputPaddingW; } -int DeConv2D::GetOutputPaddingH() const { return this->primitive_->value.AsDeConv2D()->outputPaddingH; } - -void DeConv2D::SetFormat(int format) { this->primitive_->value.AsDeConv2D()->format = (schema::Format)format; } -void DeConv2D::SetGroup(int group) { this->primitive_->value.AsDeConv2D()->group = group; } -void DeConv2D::SetChannelIn(int channel_in) { this->primitive_->value.AsDeConv2D()->channelIn = channel_in; } -void DeConv2D::SetChannelOut(int channel_out) { this->primitive_->value.AsDeConv2D()->channelOut = channel_out; } -void DeConv2D::SetKernelW(int kernel_w) { this->primitive_->value.AsDeConv2D()->kernelW = kernel_w; } -void DeConv2D::SetKernelH(int kernel_h) { this->primitive_->value.AsDeConv2D()->kernelH = kernel_h; } -void DeConv2D::SetStrideW(int stride_w) { this->primitive_->value.AsDeConv2D()->strideW = stride_w; } -void DeConv2D::SetStrideH(int stride_h) { this->primitive_->value.AsDeConv2D()->strideH = stride_h; } -void DeConv2D::SetPadMode(int pad_mode) { this->primitive_->value.AsDeConv2D()->padMode = (schema::PadMode)pad_mode; } -void DeConv2D::SetPadUp(int pad_up) { this->primitive_->value.AsDeConv2D()->padUp = pad_up; } -void DeConv2D::SetPadDown(int pad_down) { this->primitive_->value.AsDeConv2D()->padDown = pad_down; } -void DeConv2D::SetPadLeft(int pad_left) { this->primitive_->value.AsDeConv2D()->padLeft = pad_left; } -void DeConv2D::SetPadRight(int pad_right) { this->primitive_->value.AsDeConv2D()->padRight = pad_right; } -void DeConv2D::SetDilateW(int dilate_w) { this->primitive_->value.AsDeConv2D()->dilateW = dilate_w; } -void DeConv2D::SetDilateH(int dilate_h) { this->primitive_->value.AsDeConv2D()->dilateH = dilate_h; } -void DeConv2D::SetActivationType(int activation_type) { - this->primitive_->value.AsDeConv2D()->activationType = (schema::ActivationType)activation_type; -} -template <typename T> -void ConvertConvWeight(const ParameterPtr &param_node) { - MS_ASSERT(param_node != nullptr); - auto param = param_node->default_param(); - auto weight = std::dynamic_pointer_cast<ParamValueLite>(param); - MS_ASSERT(weight != nullptr); - - std::unique_ptr<T[]> buf(new (std::nothrow) T[weight->tensor_shape_size()]); - if (buf == nullptr) { - MS_LOG(ERROR) << "new buf failed"; - return; - } - - size_t filter_k = weight->tensor_shape().at(0); - size_t filter_c = weight->tensor_shape().at(1); - size_t filter_h = weight->tensor_shape().at(2); - size_t filter_w = weight->tensor_shape().at(3); - T *p1Buff = nullptr; - T *p2Buff = nullptr; - for (size_t k = 0; k < filter_k; ++k) { - for (size_t c = 0; c < filter_c; ++c) { - for (size_t h = 0; h < filter_h; ++h) { - for (size_t w = 0; w < filter_w; ++w) { - p1Buff = reinterpret_cast<float *>(weight->tensor_addr()) + - ((k * filter_c * filter_h * filter_w) + (c * filter_h * filter_w) + (h * filter_w) + (w)); - p2Buff = - buf.get() + ((c * filter_k * filter_h * filter_w) + (k * filter_h * filter_w) + (h * filter_w) + (w)); - *p2Buff = *p1Buff; - } - } - } - } - - auto ret = ::memcpy_s(weight->tensor_addr(), weight->tensor_shape_size() * sizeof(T), buf.get(), - weight->tensor_shape_size() * sizeof(T)); - if (ret != EOK) { - MS_LOG(ERROR) << "memcpy_s failed: " << ret; - return; - } - - auto abstract_base = param_node->abstract(); - MS_ASSERT(abstract_base != nullptr); - if (utils::isa<abstract::AbstractTensorPtr>(abstract_base)) { - auto abstract_tensor = utils::cast<abstract::AbstractTensorPtr>(abstract_base); - utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[0] = filter_c; - utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[1] = filter_k; - utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[2] = filter_h; - utils::cast<abstract::ShapePtr>(abstract_tensor->BuildShape())->shape()[3] = filter_w; - } - return; -} - -void DeConv2D::PopulaterConv2DMultiGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group, - const std::vector<AnfNodePtr> &inputs) { - auto attr = std::make_unique<schema::DeDepthwiseConv2DT>(); - if (attr.get() == nullptr) { - MS_LOG(ERROR) << "Memory allocation failed"; - return; - } - auto format = GetValue<std::string>(prim.GetAttr("data_format")); - if (format == "NCHW") { - attr->format = schema::Format::Format_NCHW; - } else if (format == "NHWC") { - attr->format = schema::Format::Format_NHWC; - } else { - attr->format = schema::Format::Format_NUM_OF_FORMAT; - } - auto pad_list = CastToInt(prim.GetAttr("pad_list")); - attr->padUp = pad_list.at(0); - attr->padDown = pad_list.at(1); - attr->padLeft = pad_list.at(2); - attr->padRight = pad_list.at(3); - - auto dilation = CastToInt(prim.GetAttr("dilation")); - attr->dilateH = dilation.at(0); - attr->dilateW = dilation.at(1); - - auto kernel_size = CastToInt(prim.GetAttr("kernel_size")); - attr->kernelH = kernel_size.at(0); - attr->kernelW = kernel_size.at(1); - - auto stride = CastToInt(prim.GetAttr("stride")); - attr->strideH = stride.at(0); - attr->strideW = stride.at(1); - - auto pad_mode = GetValue<std::string>(prim.GetAttr("pad_mode")); - if (pad_mode == "valid") { - attr->padMode = schema::PadMode_VALID; - } else if (pad_mode == "same") { - attr->padMode = schema::PadMode_SAME_UPPER; - } else { - attr->padMode = schema::PadMode_NOTSET; - } - - if (prim.GetAttr("activation_name") != nullptr) { - std::string activate_name = GetValue<std::string>(prim.GetAttr("activation_name")); - attr->activationType = kActivationTypeMap[activate_name]; - } else { - attr->activationType = schema::ActivationType_NO_ACTIVATION; - } - - int channel_mutiplier = 1; - if (prim.GetAttr("channel_mutiplier") != nullptr) { - channel_mutiplier = CastToInt(prim.GetAttr("channel_multiplier")).front(); - } - attr->channelMultiplier = channel_mutiplier; - - MS_ASSERT(inputs.size() == kAnfPopulaterInputNumTwo); - auto input_node = inputs[kAnfPopulaterInputNumOne]; - MS_ASSERT(input_node != nullptr); - if (input_node->isa<Parameter>()) { - auto param_node = input_node->cast<ParameterPtr>(); - ConvertConvWeight<float>(param_node); - } - - primitive->value.type = schema::PrimitiveType_DeDepthwiseConv2D; - primitive->value.value = attr.release(); -} - -void DeConv2D::PopulaterDeConv2DSingleGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group) { - auto attr = std::make_unique<schema::DeConv2DT>(); - if (attr.get() == nullptr) { - MS_LOG(ERROR) << "Memory allocation failed"; - return; - } - attr->group = group; - auto format = GetValue<std::string>(prim.GetAttr("data_format")); - if (format == "NCHW") { - attr->format = schema::Format_NCHW; - } else if (format == "NHWC") { - attr->format = schema::Format_NHWC; - } else { - attr->format = schema::Format_NUM_OF_FORMAT; - } - auto pad_list = CastToInt(prim.GetAttr("pad_list")); - attr->padUp = pad_list.at(0); - attr->padDown = pad_list.at(1); - attr->padLeft = pad_list.at(2); - attr->padRight = pad_list.at(3); - - auto dilation = CastToInt(prim.GetAttr("dilation")); - attr->dilateH = dilation.at(0); - attr->dilateW = dilation.at(1); - - auto kernel_size = CastToInt(prim.GetAttr("kernel_size")); - attr->kernelH = kernel_size.at(0); - attr->kernelW = kernel_size.at(1); - - auto stride = CastToInt(prim.GetAttr("stride")); - attr->strideH = stride.at(0); - attr->strideW = stride.at(1); - - attr->channelOut = CastToInt(prim.GetAttr("out_channel")).front(); - - auto pad_mode = GetValue<std::string>(prim.GetAttr("pad_mode")); - if (pad_mode == "valid" || pad_mode == "VALID") { - attr->padMode = schema::PadMode_VALID; - } else if (pad_mode == "same" || pad_mode == "SAME") { - attr->padMode = schema::PadMode_SAME_UPPER; - } else { - attr->padMode = schema::PadMode_NOTSET; - } - - if (prim.GetAttr("activation_name") != nullptr) { - std::string activate_name = GetValue<std::string>(prim.GetAttr("activation_name")); - attr->activationType = kActivationTypeMap[activate_name]; - } else { - attr->activationType = schema::ActivationType_NO_ACTIVATION; - } - - primitive->value.type = schema::PrimitiveType_DeConv2D; - primitive->value.value = attr.release(); -} - -int DeConv2D::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_DeConv2D; - } - if (this->primitive_->value.type != schema::PrimitiveType_DeConv2D) { - MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; - return RET_ERROR; - } - int group = CastToInt(prim.GetAttr("group")).front(); - if (group == 1) { - PopulaterDeConv2DSingleGroup(prim, this->primitive_, group); - } else if (group > 1) { - PopulaterConv2DMultiGroup(prim, this->primitive_, group, inputs); - } - PopulaterQuantParam(prim, inputs); - return RET_OK; -} -#else -int DeConv2D::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_DeConv2D(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_DeConv2D return nullptr"; - return RET_ERROR; - } - auto val_offset = - schema::CreateDeConv2D(*fbb, attr->format(), attr->group(), attr->channelIn(), attr->channelOut(), attr->kernelW(), - attr->kernelH(), attr->strideW(), attr->strideH(), attr->padMode(), attr->padUp(), - attr->padDown(), attr->padLeft(), attr->padRight(), attr->dilateW(), attr->dilateH(), - attr->hasBias(), attr->activationType(), attr->outputPaddingW(), attr->outputPaddingH()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_DeConv2D, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int DeConv2D::GetFormat() const { return this->primitive_->value_as_DeConv2D()->format(); } -int DeConv2D::GetGroup() const { return this->primitive_->value_as_DeConv2D()->group(); } -int DeConv2D::GetChannelIn() const { return this->primitive_->value_as_DeConv2D()->channelIn(); } -int DeConv2D::GetChannelOut() const { return this->primitive_->value_as_DeConv2D()->channelOut(); } -int DeConv2D::GetKernelW() const { return this->primitive_->value_as_DeConv2D()->kernelW(); } -int DeConv2D::GetKernelH() const { return this->primitive_->value_as_DeConv2D()->kernelH(); } -int DeConv2D::GetStrideW() const { return this->primitive_->value_as_DeConv2D()->strideW(); } -int DeConv2D::GetStrideH() const { return this->primitive_->value_as_DeConv2D()->strideH(); } -int DeConv2D::GetPadMode() const { return this->primitive_->value_as_DeConv2D()->padMode(); } -int DeConv2D::GetPadUp() const { return this->primitive_->value_as_DeConv2D()->padUp(); } -int DeConv2D::GetPadDown() const { return this->primitive_->value_as_DeConv2D()->padDown(); } -int DeConv2D::GetPadLeft() const { return this->primitive_->value_as_DeConv2D()->padLeft(); } -int DeConv2D::GetPadRight() const { return this->primitive_->value_as_DeConv2D()->padRight(); } -int DeConv2D::GetDilateW() const { return this->primitive_->value_as_DeConv2D()->dilateW(); } -int DeConv2D::GetDilateH() const { return this->primitive_->value_as_DeConv2D()->dilateH(); } -int DeConv2D::GetActivationType() const { return this->primitive_->value_as_DeConv2D()->activationType(); } -int DeConv2D::GetOutputPaddingW() const { return this->primitive_->value_as_DeConv2D()->outputPaddingW(); } -int DeConv2D::GetOutputPaddingH() const { return this->primitive_->value_as_DeConv2D()->outputPaddingH(); } - -PrimitiveC *DeConv2DCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<DeConv2D>(primitive); -} -Registry DeConv2DRegistry(schema::PrimitiveType_DeConv2D, DeConv2DCreator); -#endif - -int DeConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto weight = inputs_.at(1); - MS_ASSERT(weight != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_format(input->format()); - output->set_data_type(input->data_type()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - int32_t input_h = input->Height(); - int32_t input_w = input->Width(); - - int32_t output_n = input->Batch(); - int32_t output_h = 0; - int32_t output_w = 0; - int32_t output_c = weight->Channel(); - - int kernel_w = GetKernelW(); - int kernel_h = GetKernelH(); - int stride_w = GetStrideW(); - int stride_h = GetStrideH(); - int dilate_w = GetDilateW(); - int dilate_h = GetDilateH(); - pad_l_ = GetPadLeft(); - pad_u_ = GetPadUp(); - pad_d_ = GetPadDown(); - pad_r_ = GetPadRight(); - auto pad_mode = (schema::PadMode)GetPadMode(); - if (pad_mode == schema::PadMode_CAFFE || pad_mode == schema::PadMode_NOTSET) { - output_h = (input_h - 1) * stride_h + ((kernel_h - 1) * dilate_h + 1) - pad_u_ - pad_d_; - output_w = (input_w - 1) * stride_w + ((kernel_w - 1) * dilate_w + 1) - pad_l_ - pad_r_; - } else if (pad_mode == schema::PadMode_SAME_UPPER) { - output_h = input_h * stride_h; - output_w = input_w * stride_w; - } else if (pad_mode == schema::PadMode_VALID) { - output_h = (input_h - 1) * stride_h + kernel_h; - output_w = (input_w - 1) * stride_w + kernel_w; - } else { - MS_LOG(ERROR) << "unsupported pad mode for deconv"; - return RET_ERROR; - } - output_h += GetOutputPaddingH(); - output_w += GetOutputPaddingW(); - std::vector<int> out_shape = {output_n, output_h, output_w, output_c}; - output->set_shape(out_shape); - - if (pad_mode == schema::PadMode_SAME_UPPER) { - pad_u_ = ((input_h - 1) * stride_h + (kernel_h - 1) * dilate_h + 1 - output_h) / 2; - pad_l_ = ((input_w - 1) * stride_w + (kernel_w - 1) * dilate_w + 1 - output_w) / 2; - } else if (pad_mode == schema::PadMode_VALID) { - pad_u_ = 0; - pad_l_ = 0; - } else if (pad_mode == schema::PadMode_CAFFE || pad_mode == schema::PadMode_NOTSET) { - } else { - MS_LOG(ERROR) << "unsupported pad mode for deconv"; - return RET_ERROR; - } - - return 0; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/deconv2d.h b/mindspore/lite/src/ops/deconv2d.h deleted file mode 100644 index 5ffe92b83a..0000000000 --- a/mindspore/lite/src/ops/deconv2d.h +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_DE_CONV2_D_H_ -#define LITE_MINDSPORE_LITE_C_OPS_DE_CONV2_D_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class DeConv2D : public PrimitiveC { - public: - DeConv2D() = default; - ~DeConv2D() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(DeConv2D, PrimitiveC); - explicit DeConv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetFormat(int format); - void SetGroup(int group); - void SetChannelIn(int channel_in); - void SetChannelOut(int channel_out); - void SetKernelW(int kernel_w); - void SetKernelH(int kernel_h); - void SetStrideW(int stride_w); - void SetStrideH(int stride_h); - void SetPadMode(int pad_mode); - void SetPadUp(int pad_up); - void SetPadDown(int pad_down); - void SetPadLeft(int pad_left); - void SetPadRight(int pad_right); - void SetDilateW(int dilate_w); - void SetDilateH(int dilate_h); - void SetActivationType(int activation_type); - void PopulaterDeConv2DSingleGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group); - void PopulaterConv2DMultiGroup(const Primitive &prim, schema::PrimitiveT *primitive, const int &group, - const std::vector<AnfNodePtr> &inputs); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetFormat() const; - int GetGroup() const; - int GetChannelIn() const; - int GetChannelOut() const; - int GetKernelW() const; - int GetKernelH() const; - int GetStrideW() const; - int GetStrideH() const; - int GetPadMode() const; - int GetPadUp() const; - int GetPadDown() const; - int GetPadLeft() const; - int GetPadRight() const; - int GetDilateW() const; - int GetDilateH() const; - int GetActivationType() const; - int GetOutputPaddingW() const; - int GetOutputPaddingH() const; - int PadUp() const { return this->pad_u_; } - int PadDown() const { return this->pad_d_; } - int PadLeft() const { return this->pad_l_; } - int PadRight() const { return this->pad_r_; } - - protected: - int pad_u_ = 0; - int pad_d_ = 0; - int pad_l_ = 0; - int pad_r_ = 0; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_DE_CONV2_D_H_ diff --git a/mindspore/lite/src/ops/dedepthwise_conv2d.cc b/mindspore/lite/src/ops/dedepthwise_conv2d.cc deleted file mode 100644 index 5badb82105..0000000000 --- a/mindspore/lite/src/ops/dedepthwise_conv2d.cc +++ /dev/null @@ -1,171 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/dedepthwise_conv2d.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int DeDepthwiseConv2D::GetFormat() const { return this->primitive_->value.AsDeDepthwiseConv2D()->format; } -int DeDepthwiseConv2D::GetChannelIn() const { return this->primitive_->value.AsDeDepthwiseConv2D()->channelIn; } -int DeDepthwiseConv2D::GetChannelMultiplier() const { - return this->primitive_->value.AsDeDepthwiseConv2D()->channelMultiplier; -} -int DeDepthwiseConv2D::GetKernelW() const { return this->primitive_->value.AsDeDepthwiseConv2D()->kernelW; } -int DeDepthwiseConv2D::GetKernelH() const { return this->primitive_->value.AsDeDepthwiseConv2D()->kernelH; } -int DeDepthwiseConv2D::GetStrideW() const { return this->primitive_->value.AsDeDepthwiseConv2D()->strideW; } -int DeDepthwiseConv2D::GetStrideH() const { return this->primitive_->value.AsDeDepthwiseConv2D()->strideH; } -int DeDepthwiseConv2D::GetPadMode() const { return this->primitive_->value.AsDeDepthwiseConv2D()->padMode; } -int DeDepthwiseConv2D::GetPadUp() const { return this->primitive_->value.AsDeDepthwiseConv2D()->padUp; } -int DeDepthwiseConv2D::GetPadDown() const { return this->primitive_->value.AsDeDepthwiseConv2D()->padDown; } -int DeDepthwiseConv2D::GetPadLeft() const { return this->primitive_->value.AsDeDepthwiseConv2D()->padLeft; } -int DeDepthwiseConv2D::GetPadRight() const { return this->primitive_->value.AsDeDepthwiseConv2D()->padRight; } -int DeDepthwiseConv2D::GetDilateW() const { return this->primitive_->value.AsDeDepthwiseConv2D()->dilateW; } -int DeDepthwiseConv2D::GetDilateH() const { return this->primitive_->value.AsDeDepthwiseConv2D()->dilateH; } -int DeDepthwiseConv2D::GetActivationType() const { - return this->primitive_->value.AsDeDepthwiseConv2D()->activationType; -} - -void DeDepthwiseConv2D::SetFormat(int format) { - this->primitive_->value.AsDeDepthwiseConv2D()->format = static_cast<schema::Format>(format); -} -void DeDepthwiseConv2D::SetChannelIn(int channel_in) { - this->primitive_->value.AsDeDepthwiseConv2D()->channelIn = channel_in; -} -void DeDepthwiseConv2D::SetChannelMultiplier(int channel_multiplier) { - this->primitive_->value.AsDeDepthwiseConv2D()->channelMultiplier = channel_multiplier; -} -void DeDepthwiseConv2D::SetKernelW(int kernel_w) { this->primitive_->value.AsDeDepthwiseConv2D()->kernelW = kernel_w; } -void DeDepthwiseConv2D::SetKernelH(int kernel_h) { this->primitive_->value.AsDeDepthwiseConv2D()->kernelH = kernel_h; } -void DeDepthwiseConv2D::SetStrideW(int stride_w) { this->primitive_->value.AsDeDepthwiseConv2D()->strideW = stride_w; } -void DeDepthwiseConv2D::SetStrideH(int stride_h) { this->primitive_->value.AsDeDepthwiseConv2D()->strideH = stride_h; } -void DeDepthwiseConv2D::SetPadMode(int pad_mode) { - this->primitive_->value.AsDeDepthwiseConv2D()->padMode = static_cast<schema::PadMode>(pad_mode); -} -void DeDepthwiseConv2D::SetPadUp(int pad_up) { this->primitive_->value.AsDeDepthwiseConv2D()->padUp = pad_up; } -void DeDepthwiseConv2D::SetPadDown(int pad_down) { this->primitive_->value.AsDeDepthwiseConv2D()->padDown = pad_down; } -void DeDepthwiseConv2D::SetPadLeft(int pad_left) { this->primitive_->value.AsDeDepthwiseConv2D()->padLeft = pad_left; } -void DeDepthwiseConv2D::SetPadRight(int pad_right) { - this->primitive_->value.AsDeDepthwiseConv2D()->padRight = pad_right; -} -void DeDepthwiseConv2D::SetDilateW(int dilate_w) { this->primitive_->value.AsDeDepthwiseConv2D()->dilateW = dilate_w; } -void DeDepthwiseConv2D::SetDilateH(int dilate_h) { this->primitive_->value.AsDeDepthwiseConv2D()->dilateH = dilate_h; } -void DeDepthwiseConv2D::SetActivationType(int activation_type) { - this->primitive_->value.AsDeDepthwiseConv2D()->activationType = static_cast<schema::ActivationType>(activation_type); -} - -#else -int DeDepthwiseConv2D::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto attr = primitive->value_as_DeDepthwiseConv2D(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_DeDepthwiseConv2D return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateDeDepthwiseConv2D( - *fbb, attr->format(), attr->channelIn(), attr->channelMultiplier(), attr->kernelW(), attr->kernelH(), - attr->strideW(), attr->strideH(), attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(), - attr->padRight(), attr->dilateW(), attr->dilateH(), attr->hasBias(), attr->activationType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_DeDepthwiseConv2D, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int DeDepthwiseConv2D::GetFormat() const { return this->primitive_->value_as_DeDepthwiseConv2D()->format(); } -int DeDepthwiseConv2D::GetChannelIn() const { return this->primitive_->value_as_DeDepthwiseConv2D()->channelIn(); } -int DeDepthwiseConv2D::GetChannelMultiplier() const { - return this->primitive_->value_as_DeDepthwiseConv2D()->channelMultiplier(); -} -int DeDepthwiseConv2D::GetKernelW() const { return this->primitive_->value_as_DeDepthwiseConv2D()->kernelW(); } -int DeDepthwiseConv2D::GetKernelH() const { return this->primitive_->value_as_DeDepthwiseConv2D()->kernelH(); } -int DeDepthwiseConv2D::GetStrideW() const { return this->primitive_->value_as_DeDepthwiseConv2D()->strideW(); } -int DeDepthwiseConv2D::GetStrideH() const { return this->primitive_->value_as_DeDepthwiseConv2D()->strideH(); } -int DeDepthwiseConv2D::GetPadMode() const { return this->primitive_->value_as_DeDepthwiseConv2D()->padMode(); } -int DeDepthwiseConv2D::GetPadUp() const { return this->primitive_->value_as_DeDepthwiseConv2D()->padUp(); } -int DeDepthwiseConv2D::GetPadDown() const { return this->primitive_->value_as_DeDepthwiseConv2D()->padDown(); } -int DeDepthwiseConv2D::GetPadLeft() const { return this->primitive_->value_as_DeDepthwiseConv2D()->padLeft(); } -int DeDepthwiseConv2D::GetPadRight() const { return this->primitive_->value_as_DeDepthwiseConv2D()->padRight(); } -int DeDepthwiseConv2D::GetDilateW() const { return this->primitive_->value_as_DeDepthwiseConv2D()->dilateW(); } -int DeDepthwiseConv2D::GetDilateH() const { return this->primitive_->value_as_DeDepthwiseConv2D()->dilateH(); } -int DeDepthwiseConv2D::GetActivationType() const { - return this->primitive_->value_as_DeDepthwiseConv2D()->activationType(); -} - -PrimitiveC *DeDepthwiseConv2DCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<DeDepthwiseConv2D>(primitive); -} -Registry DeDepthwiseConv2DRegistry(schema::PrimitiveType_DeDepthwiseConv2D, DeDepthwiseConv2DCreator); -#endif - -int DeDepthwiseConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - if (inputs_.size() != kDoubleNum && inputs_.size() != kTripleNum) { - MS_LOG(ERROR) << "inputs number is invalid"; - return 1; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "output number is invalid"; - return 1; - } - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto weight = inputs_.at(1); - MS_ASSERT(weight != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_format(input->format()); - output->set_data_type(input->data_type()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto in_shape = input->shape(); - int input_h = in_shape.at(1); - int input_w = in_shape.at(2); - int input_channel = in_shape.at(3); - int output_w = 0, output_h = 0; - - pad_l_ = GetPadLeft(); - pad_u_ = GetPadUp(); - pad_d_ = GetPadDown(); - pad_r_ = GetPadRight(); - output_h = GetStrideH() * (input_h - 1) + GetKernelH() - pad_u_ - pad_d_; - output_w = GetStrideW() * (input_w - 1) + GetKernelW() - pad_l_ - pad_r_; - if ((output_h + GetPadUp() + GetPadDown() - GetKernelH()) % GetStrideH() != 0) { - output_h += (output_h + GetPadLeft() + GetPadRight() - GetKernelH()) % GetStrideH(); - } - if ((output_w + GetPadLeft() + GetPadRight() - GetKernelW()) % GetStrideW() != 0) { - output_w += (output_w + GetPadLeft() + GetPadRight() - GetKernelW()) % GetStrideW(); - } - std::vector<int> out_shape{input->shape()}; - out_shape.at(1) = output_h; - out_shape.at(2) = output_w; - if (GetChannelMultiplier() * input_channel != weight->shape()[0]) { - MS_LOG(ERROR) << "Conv dedepthwise only support group equals output channel."; - return RET_ERROR; - } - out_shape.at(3) = weight->shape()[0] * weight->shape()[3]; // in_channel * out_channel - - output->set_shape(out_shape); - return 0; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/dedepthwise_conv2d.h b/mindspore/lite/src/ops/dedepthwise_conv2d.h deleted file mode 100644 index 4848661564..0000000000 --- a/mindspore/lite/src/ops/dedepthwise_conv2d.h +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_DEDEPTHWISE_CONV2D_H_ -#define MINDSPORE_LITE_SRC_OPS_DEDEPTHWISE_CONV2D_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class DeDepthwiseConv2D : public PrimitiveC { - public: - DeDepthwiseConv2D() = default; - ~DeDepthwiseConv2D() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(DeDepthwiseConv2D, PrimitiveC); - explicit DeDepthwiseConv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetFormat(int format); - void SetChannelIn(int channel_in); - void SetChannelMultiplier(int channel_multiplier); - void SetKernelW(int kernel_w); - void SetKernelH(int kernel_h); - void SetStrideW(int stride_w); - void SetStrideH(int stride_h); - void SetPadMode(int pad_mode); - void SetPadUp(int pad_up); - void SetPadDown(int pad_down); - void SetPadLeft(int pad_left); - void SetPadRight(int pad_right); - void SetDilateW(int dilate_w); - void SetDilateH(int dilate_h); - void SetActivationType(int activation_type); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetFormat() const; - int GetChannelIn() const; - int GetChannelMultiplier() const; - int GetKernelW() const; - int GetKernelH() const; - int GetStrideW() const; - int GetStrideH() const; - int GetPadMode() const; - int GetPadUp() const; - int GetPadDown() const; - int GetPadLeft() const; - int GetPadRight() const; - int GetDilateW() const; - int GetDilateH() const; - int GetActivationType() const; - - int PadUp() const { return this->pad_u_; } - int PadDown() const { return this->pad_d_; } - int PadLeft() const { return this->pad_l_; } - int PadRight() const { return this->pad_r_; } - - protected: - int pad_u_ = 0; - int pad_d_ = 0; - int pad_l_ = 0; - int pad_r_ = 0; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_DEDEPTHWISE_CONV2D_H_ diff --git a/mindspore/lite/src/ops/depend.cc b/mindspore/lite/src/ops/depend.cc deleted file mode 100644 index f1a4139e4f..0000000000 --- a/mindspore/lite/src/ops/depend.cc +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/depend.h" -#include <vector> -#include <memory> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Depend::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Depend; - } - if (this->primitive_->value.type != schema::PrimitiveType_Depend) { - MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow)(schema::DependT); - if (attr == nullptr) { - MS_LOG(ERROR) << "attr is nullptr"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - } - return RET_OK; -} -#else -int Depend::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateDepend(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Depend, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *DependCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Depend>(primitive); } -Registry DependRegistry(schema::PrimitiveType_Depend, DependCreator); -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/depend.h b/mindspore/lite/src/ops/depend.h deleted file mode 100644 index cc7f797308..0000000000 --- a/mindspore/lite/src/ops/depend.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_SRC_OPS_DEPEND_H_ -#define LITE_MINDSPORE_LITE_SRC_OPS_DEPEND_H_ - -#include <vector> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Depend : public PrimitiveC { - public: - Depend() = default; - ~Depend() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Depend, PrimitiveC); - explicit Depend(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_SRC_OPS_DEPEND_H_ diff --git a/mindspore/lite/src/ops/depth_to_space.cc b/mindspore/lite/src/ops/depth_to_space.cc deleted file mode 100644 index 1109e678a5..0000000000 --- a/mindspore/lite/src/ops/depth_to_space.cc +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/depth_to_space.h" -#include "src/common/common.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int DepthToSpace::GetBlockSize() const { return this->primitive_->value.AsDepthToSpace()->blockSize; } -int DepthToSpace::GetFormat() const { return this->primitive_->value.AsDepthToSpace()->format; } - -void DepthToSpace::SetBlockSize(int block_size) { this->primitive_->value.AsDepthToSpace()->blockSize = block_size; } -void DepthToSpace::SetFormat(int format) { this->primitive_->value.AsDepthToSpace()->format = (schema::Format)format; } - -#else -int DepthToSpace::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_DepthToSpace(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_DepthToSpace return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateDepthToSpace(*fbb, attr->blockSize(), attr->format()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_DepthToSpace, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int DepthToSpace::GetBlockSize() const { return this->primitive_->value_as_DepthToSpace()->blockSize(); } -int DepthToSpace::GetFormat() const { return this->primitive_->value_as_DepthToSpace()->format(); } - -PrimitiveC *DepthToSpaceCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<DepthToSpace>(primitive); -} -Registry DepthToSpaceRegistry(schema::PrimitiveType_DepthToSpace, DepthToSpaceCreator); - -#endif - -namespace { -constexpr int kDepthToSpaceOutputNum = 1; -constexpr int kDepthToSpaceInputNum = 1; -} // namespace - -int DepthToSpace::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - MS_ASSERT(this->primitive_ != nullptr); - if (outputs.size() != kDepthToSpaceOutputNum || inputs.size() != kDepthToSpaceInputNum) { - MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); - return RET_PARAM_INVALID; - } - - auto input = inputs.at(0); - if (input->format() != schema::Format::Format_NHWC) { - MS_LOG(ERROR) << "depth_to_space only support NHWC now!"; - return RET_FORMAT_ERR; - } - outputs[0]->set_data_type(input->data_type()); - outputs[0]->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input_shape = input->shape(); - if (input_shape.size() != kQuadrupleNum) { - MS_LOG(ERROR) << "input shape dimension size should == " << kQuadrupleNum; - return RET_PARAM_INVALID; - } - - int32_t block_size = GetBlockSize(); - if (input_shape[NHWC_C] % (block_size * block_size) != 0 || input_shape[NHWC_C] == 0) { - MS_LOG(ERROR) << "input dimension c size " << input_shape[NHWC_C] << " should be multiple of block_size(" - << block_size << ") * block_size)!"; - return RET_PARAM_INVALID; - } - std::vector<int32_t> output_shape(input_shape.size()); - output_shape[NHWC_N] = input_shape[NHWC_N]; - output_shape[NHWC_H] = input_shape[NHWC_H] * block_size; - output_shape[NHWC_W] = input_shape[NHWC_W] * block_size; - output_shape[NHWC_C] = input_shape[NHWC_C] / (block_size * block_size); - outputs[0]->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/depth_to_space.h b/mindspore/lite/src/ops/depth_to_space.h deleted file mode 100644 index c9066fea37..0000000000 --- a/mindspore/lite/src/ops/depth_to_space.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_DEPTH_TO_SPACE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_DEPTH_TO_SPACE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class DepthToSpace : public PrimitiveC { - public: - DepthToSpace() = default; - ~DepthToSpace() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(DepthToSpace, PrimitiveC); - explicit DepthToSpace(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetBlockSize(int block_size); - void SetFormat(int format); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetBlockSize() const; - int GetFormat() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_DEPTH_TO_SPACE_H_ diff --git a/mindspore/lite/src/ops/depthwise_conv2d.cc b/mindspore/lite/src/ops/depthwise_conv2d.cc deleted file mode 100644 index 587a58242b..0000000000 --- a/mindspore/lite/src/ops/depthwise_conv2d.cc +++ /dev/null @@ -1,262 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/depthwise_conv2d.h" - -#include <memory> -#include <string> -#ifdef PRIMITIVE_WRITEABLE -#include "src/param_value_lite.h" -#endif -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int DepthwiseConv2D::GetFormat() const { return this->primitive_->value.AsDepthwiseConv2D()->format; } -int DepthwiseConv2D::GetChannelIn() const { return this->primitive_->value.AsDepthwiseConv2D()->channelIn; } -int DepthwiseConv2D::GetChannelMultiplier() const { - return this->primitive_->value.AsDepthwiseConv2D()->channelMultiplier; -} -int DepthwiseConv2D::GetKernelW() const { return this->primitive_->value.AsDepthwiseConv2D()->kernelW; } -int DepthwiseConv2D::GetKernelH() const { return this->primitive_->value.AsDepthwiseConv2D()->kernelH; } -int DepthwiseConv2D::GetStrideW() const { return this->primitive_->value.AsDepthwiseConv2D()->strideW; } -int DepthwiseConv2D::GetStrideH() const { return this->primitive_->value.AsDepthwiseConv2D()->strideH; } -int DepthwiseConv2D::GetPadMode() const { return this->primitive_->value.AsDepthwiseConv2D()->padMode; } -int DepthwiseConv2D::GetPadUp() const { return this->primitive_->value.AsDepthwiseConv2D()->padUp; } -int DepthwiseConv2D::GetPadDown() const { return this->primitive_->value.AsDepthwiseConv2D()->padDown; } -int DepthwiseConv2D::GetPadLeft() const { return this->primitive_->value.AsDepthwiseConv2D()->padLeft; } -int DepthwiseConv2D::GetPadRight() const { return this->primitive_->value.AsDepthwiseConv2D()->padRight; } -int DepthwiseConv2D::GetDilateW() const { return this->primitive_->value.AsDepthwiseConv2D()->dilateW; } -int DepthwiseConv2D::GetDilateH() const { return this->primitive_->value.AsDepthwiseConv2D()->dilateH; } -int DepthwiseConv2D::GetActivationType() const { return this->primitive_->value.AsDepthwiseConv2D()->activationType; } - -void DepthwiseConv2D::SetFormat(int format) { - this->primitive_->value.AsDepthwiseConv2D()->format = static_cast<schema::Format>(format); -} -void DepthwiseConv2D::SetChannelIn(int channel_in) { - this->primitive_->value.AsDepthwiseConv2D()->channelIn = channel_in; -} -void DepthwiseConv2D::SetChannelMultiplier(int channel_multiplier) { - this->primitive_->value.AsDepthwiseConv2D()->channelMultiplier = channel_multiplier; -} -void DepthwiseConv2D::SetKernelW(int kernel_w) { this->primitive_->value.AsDepthwiseConv2D()->kernelW = kernel_w; } -void DepthwiseConv2D::SetKernelH(int kernel_h) { this->primitive_->value.AsDepthwiseConv2D()->kernelH = kernel_h; } -void DepthwiseConv2D::SetStrideW(int stride_w) { this->primitive_->value.AsDepthwiseConv2D()->strideW = stride_w; } -void DepthwiseConv2D::SetStrideH(int stride_h) { this->primitive_->value.AsDepthwiseConv2D()->strideH = stride_h; } -void DepthwiseConv2D::SetPadMode(int pad_mode) { - this->primitive_->value.AsDepthwiseConv2D()->padMode = static_cast<schema::PadMode>(pad_mode); -} -void DepthwiseConv2D::SetPadUp(int pad_up) { this->primitive_->value.AsDepthwiseConv2D()->padUp = pad_up; } -void DepthwiseConv2D::SetPadDown(int pad_down) { this->primitive_->value.AsDepthwiseConv2D()->padDown = pad_down; } -void DepthwiseConv2D::SetPadLeft(int pad_left) { this->primitive_->value.AsDepthwiseConv2D()->padLeft = pad_left; } -void DepthwiseConv2D::SetPadRight(int pad_right) { this->primitive_->value.AsDepthwiseConv2D()->padRight = pad_right; } -void DepthwiseConv2D::SetDilateW(int dilate_w) { this->primitive_->value.AsDepthwiseConv2D()->dilateW = dilate_w; } -void DepthwiseConv2D::SetDilateH(int dilate_h) { this->primitive_->value.AsDepthwiseConv2D()->dilateH = dilate_h; } -void DepthwiseConv2D::SetActivationType(int activation_type) { - this->primitive_->value.AsDepthwiseConv2D()->activationType = static_cast<schema::ActivationType>(activation_type); -} - -int DepthwiseConv2D::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - this->primitive_ = new (schema::PrimitiveT); - auto attr = std::make_unique<schema::DepthwiseConv2DT>(); - - auto format = GetValue<std::string>(prim.GetAttr("data_format")); - if (format == "NCHW") { - attr->format = schema::Format::Format_NCHW; - } else if (format == "NHWC") { - attr->format = schema::Format::Format_NHWC; - } else { - attr->format = schema::Format::Format_NUM_OF_FORMAT; - } - auto pad_list = CastToInt(prim.GetAttr("pads")); - attr->padUp = pad_list.at(0); - attr->padDown = pad_list.at(1); - attr->padLeft = pad_list.at(2); - attr->padRight = pad_list.at(3); - - auto dilation = CastToInt(prim.GetAttr("dilation")); - attr->dilateH = dilation.at(0); - attr->dilateW = dilation.at(1); - - if (utils::isa<ValueSequeue>(prim.GetAttr("kernel_size"))) { - auto kernel_size = CastToInt(prim.GetAttr("kernel_size")); - attr->kernelH = kernel_size.at(0); - attr->kernelW = kernel_size.at(1); - } else { - auto kernel_size = CastToInt(prim.GetAttr("kernel_size")).front(); - attr->kernelH = kernel_size; - attr->kernelW = kernel_size; - } - - auto stride = CastToInt(prim.GetAttr("stride")); - attr->strideH = stride.at(2); - attr->strideW = stride.at(3); - - auto pad_mode = GetValue<std::string>(prim.GetAttr("pad_mode")); - if (pad_mode == "valid") { - attr->padMode = schema::PadMode_VALID; - } else if (pad_mode == "same") { - attr->padMode = schema::PadMode_SAME_UPPER; - } else { - attr->padMode = schema::PadMode_NOTSET; - } - if (prim.GetAttr("activation_name") != nullptr) { - std::string activate_name = GetValue<std::string>(prim.GetAttr("activation_name")); - attr->activationType = kActivationTypeMap[activate_name]; - } else { - attr->activationType = schema::ActivationType_NO_ACTIVATION; - } - auto channel_multiplier = CastToInt(prim.GetAttr("channel_multiplier")).front(); - attr->channelMultiplier = channel_multiplier; - - MS_ASSERT(inputs.size() == kAnfPopulaterInputNumTwo); - auto inputNode = inputs.at(kAnfPopulaterInputNumOne); - MS_ASSERT(inputNode != nullptr); - if (inputNode->isa<Parameter>()) { - auto paramNode = inputNode->cast<ParameterPtr>(); - auto abstractBase = paramNode->abstract(); - MS_ASSERT(abstractBase != nullptr); - if (utils::isa<abstract::AbstractTensorPtr>(abstractBase)) { - auto abstractTensor = utils::cast<abstract::AbstractTensorPtr>(abstractBase); - MS_ASSERT(abstractTensor != nullptr); - if (utils::isa<abstract::ShapePtr>(abstractTensor->BuildShape())) { - auto dims = utils::cast<abstract::ShapePtr>(abstractTensor->BuildShape())->shape(); - attr->channelIn = dims.at(kAnfPopulaterInputNumOne); - } - } - } - - this->primitive_->value.type = schema::PrimitiveType_DepthwiseConv2D; - this->primitive_->value.value = attr.release(); - PopulaterQuantParam(prim, inputs); - return RET_OK; -} - -#else -int DepthwiseConv2D::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_DepthwiseConv2D(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_DepthwiseConv2D return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateDepthwiseConv2D( - *fbb, attr->format(), attr->channelIn(), attr->channelMultiplier(), attr->kernelW(), attr->kernelH(), - attr->strideW(), attr->strideH(), attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(), - attr->padRight(), attr->dilateW(), attr->dilateH(), attr->hasBias(), attr->activationType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_DepthwiseConv2D, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int DepthwiseConv2D::GetFormat() const { return this->primitive_->value_as_DepthwiseConv2D()->format(); } -int DepthwiseConv2D::GetChannelIn() const { return this->primitive_->value_as_DepthwiseConv2D()->channelIn(); } -int DepthwiseConv2D::GetChannelMultiplier() const { - return this->primitive_->value_as_DepthwiseConv2D()->channelMultiplier(); -} -int DepthwiseConv2D::GetKernelW() const { return this->primitive_->value_as_DepthwiseConv2D()->kernelW(); } -int DepthwiseConv2D::GetKernelH() const { return this->primitive_->value_as_DepthwiseConv2D()->kernelH(); } -int DepthwiseConv2D::GetStrideW() const { return this->primitive_->value_as_DepthwiseConv2D()->strideW(); } -int DepthwiseConv2D::GetStrideH() const { return this->primitive_->value_as_DepthwiseConv2D()->strideH(); } -int DepthwiseConv2D::GetPadMode() const { return this->primitive_->value_as_DepthwiseConv2D()->padMode(); } -int DepthwiseConv2D::GetPadUp() const { return this->primitive_->value_as_DepthwiseConv2D()->padUp(); } -int DepthwiseConv2D::GetPadDown() const { return this->primitive_->value_as_DepthwiseConv2D()->padDown(); } -int DepthwiseConv2D::GetPadLeft() const { return this->primitive_->value_as_DepthwiseConv2D()->padLeft(); } -int DepthwiseConv2D::GetPadRight() const { return this->primitive_->value_as_DepthwiseConv2D()->padRight(); } -int DepthwiseConv2D::GetDilateW() const { return this->primitive_->value_as_DepthwiseConv2D()->dilateW(); } -int DepthwiseConv2D::GetDilateH() const { return this->primitive_->value_as_DepthwiseConv2D()->dilateH(); } -int DepthwiseConv2D::GetActivationType() const { - return this->primitive_->value_as_DepthwiseConv2D()->activationType(); -} - -PrimitiveC *DepthWiseConv2DCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<DepthwiseConv2D>(primitive); -} -Registry DepthWiseConv2DRegistry(schema::PrimitiveType_DepthwiseConv2D, DepthWiseConv2DCreator); - -#endif - -int DepthwiseConv2D::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - if (inputs_.size() != kDoubleNum && inputs_.size() != kTripleNum) { - MS_LOG(ERROR) << "inputs number is invalid"; - return 1; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "output number is invalid"; - return 1; - } - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto weight = inputs_.at(1); - MS_ASSERT(weight != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_format(input->format()); - output->set_data_type(input->data_type()); - pad_l_ = GetPadLeft(); - pad_u_ = GetPadUp(); - pad_d_ = GetPadDown(); - pad_r_ = GetPadRight(); - - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto in_shape = input->shape(); - int input_h = in_shape.at(1); - int input_w = in_shape.at(2); - int input_channel = in_shape.at(3); - int output_w = 0, output_h = 0; - input_channel_ = input_channel; - - if (GetPadMode() == schema::PadMode_SAME_UPPER) { - output_h = std::ceil(static_cast<float>(input_h) / static_cast<float>(GetStrideH())); - output_w = std::ceil(static_cast<float>(input_w) / static_cast<float>(GetStrideW())); - auto pad_h_all = ((output_h - 1) * GetStrideH() + (GetKernelH() - 1) * GetDilateH() + 1 - input_h); - auto pad_w_all = ((output_w - 1) * GetStrideW() + (GetKernelW() - 1) * GetDilateW() + 1 - input_w); - if (pad_h_all > 0) { - pad_u_ = pad_h_all / 2; - pad_d_ = pad_h_all - pad_u_; - } - if (pad_w_all > 0) { - pad_l_ = pad_w_all / 2; - pad_r_ = pad_w_all - pad_l_; - } - } else { - output_h = std::ceil((static_cast<float>(input_h) + pad_u_ + pad_d_ - - (static_cast<float>(GetKernelH()) - 1) * static_cast<float>(GetDilateH())) / - static_cast<float>(GetStrideH())); - output_w = std::ceil((static_cast<float>(input_w) + pad_l_ + pad_r_ - - (static_cast<float>(GetKernelW()) - 1) * static_cast<float>(GetDilateW())) / - static_cast<float>(GetStrideW())); - } - std::vector<int> out_shape{input->shape()}; - out_shape.at(1) = output_h; - out_shape.at(2) = output_w; - if (GetChannelMultiplier() * input_channel != weight->shape().at(0)) { - MS_LOG(ERROR) << "Conv depthwise only support group equals output channel."; - return 1; - } - out_shape.at(3) = weight->shape().at(0) * weight->shape().at(3); // in_channel * out_channel - - output->set_shape(out_shape); - return 0; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/depthwise_conv2d.h b/mindspore/lite/src/ops/depthwise_conv2d.h deleted file mode 100644 index 7243914a2a..0000000000 --- a/mindspore/lite/src/ops/depthwise_conv2d.h +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_DEPTHWISE_CONV2D_H_ -#define MINDSPORE_LITE_SRC_OPS_DEPTHWISE_CONV2D_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class DepthwiseConv2D : public PrimitiveC { - public: - DepthwiseConv2D() = default; - ~DepthwiseConv2D() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(DepthwiseConv2D, PrimitiveC); - explicit DepthwiseConv2D(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - void SetFormat(int format); - void SetChannelIn(int channel_in); - void SetChannelMultiplier(int channel_multiplier); - void SetKernelW(int kernel_w); - void SetKernelH(int kernel_h); - void SetStrideW(int stride_w); - void SetStrideH(int stride_h); - void SetPadMode(int pad_mode); - void SetPadUp(int pad_up); - void SetPadDown(int pad_down); - void SetPadLeft(int pad_left); - void SetPadRight(int pad_right); - void SetDilateW(int dilate_w); - void SetDilateH(int dilate_h); - void SetActivationType(int activation_type); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetFormat() const; - int GetChannelIn() const; - int GetChannelMultiplier() const; - int GetKernelW() const; - int GetKernelH() const; - int GetStrideW() const; - int GetStrideH() const; - int GetPadMode() const; - int GetPadUp() const; - int GetPadDown() const; - int GetPadLeft() const; - int GetPadRight() const; - int GetDilateW() const; - int GetDilateH() const; - int GetActivationType() const; - - int PadUp() const { return this->pad_u_; } - int PadDown() const { return this->pad_d_; } - int PadLeft() const { return this->pad_l_; } - int PadRight() const { return this->pad_r_; } - int GetInputChannel() const { return this->input_channel_; } - - protected: - int pad_u_ = 0; - int pad_d_ = 0; - int pad_l_ = 0; - int pad_r_ = 0; - int input_channel_ = 0; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_DEPTHWISE_CONV2D_H_ diff --git a/mindspore/lite/src/ops/dequant.cc b/mindspore/lite/src/ops/dequant.cc deleted file mode 100644 index 13de810376..0000000000 --- a/mindspore/lite/src/ops/dequant.cc +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/dequant.h" -#include <vector> -#include <memory> - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Dequant::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_OnnxInt8Dequantize; - } - if (this->primitive_->value.type != schema::PrimitiveType_OnnxInt8Dequantize) { - MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow)(schema::OnnxInt8DequantizeT); - if (attr == nullptr) { - MS_LOG(ERROR) << "attr is nullptr"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - } - return RET_OK; -} -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/dequant.h b/mindspore/lite/src/ops/dequant.h deleted file mode 100644 index 046055abbd..0000000000 --- a/mindspore/lite/src/ops/dequant.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_SRC_OPS_DEQUANT_H_ -#define LITE_MINDSPORE_LITE_SRC_OPS_DEQUANT_H_ - -#include <vector> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Dequant : public PrimitiveC { - public: - Dequant() = default; - ~Dequant() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Dequant, PrimitiveC); - explicit Dequant(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_SRC_OPS_DEQUANT_H_ diff --git a/mindspore/lite/src/ops/detection_post_process.cc b/mindspore/lite/src/ops/detection_post_process.cc deleted file mode 100644 index dc608ef40e..0000000000 --- a/mindspore/lite/src/ops/detection_post_process.cc +++ /dev/null @@ -1,208 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/detection_post_process.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int DetectionPostProcess::GetFormat() const { return this->primitive_->value.AsDetectionPostProcess()->format; } -int DetectionPostProcess::GetInputSize() const { return this->primitive_->value.AsDetectionPostProcess()->inputSize; } -float DetectionPostProcess::GetHScale() const { return this->primitive_->value.AsDetectionPostProcess()->hScale; } -float DetectionPostProcess::GetWScale() const { return this->primitive_->value.AsDetectionPostProcess()->wScale; } -float DetectionPostProcess::GetXScale() const { return this->primitive_->value.AsDetectionPostProcess()->xScale; } -float DetectionPostProcess::GetYScale() const { return this->primitive_->value.AsDetectionPostProcess()->yScale; } -float DetectionPostProcess::GetNmsIouThreshold() const { - return this->primitive_->value.AsDetectionPostProcess()->NmsIouThreshold; -} -float DetectionPostProcess::GetNmsScoreThreshold() const { - return this->primitive_->value.AsDetectionPostProcess()->NmsScoreThreshold; -} -int64_t DetectionPostProcess::GetMaxDetections() const { - return this->primitive_->value.AsDetectionPostProcess()->MaxDetections; -} -int64_t DetectionPostProcess::GetDetectionsPerClass() const { - return this->primitive_->value.AsDetectionPostProcess()->DetectionsPerClass; -} -int64_t DetectionPostProcess::GetMaxClassesPerDetection() const { - return this->primitive_->value.AsDetectionPostProcess()->MaxClassesPerDetection; -} -int64_t DetectionPostProcess::GetNumClasses() const { - return this->primitive_->value.AsDetectionPostProcess()->NumClasses; -} -bool DetectionPostProcess::GetUseRegularNms() const { - return this->primitive_->value.AsDetectionPostProcess()->UseRegularNms; -} -void DetectionPostProcess::SetFormat(int format) { - this->primitive_->value.AsDetectionPostProcess()->format = (schema::Format)format; -} -void DetectionPostProcess::SetInputSize(int input_size) { - this->primitive_->value.AsDetectionPostProcess()->inputSize = input_size; -} -void DetectionPostProcess::SetHScale(float h_scale) { - this->primitive_->value.AsDetectionPostProcess()->hScale = h_scale; -} -void DetectionPostProcess::SetWScale(float w_scale) { - this->primitive_->value.AsDetectionPostProcess()->wScale = w_scale; -} -void DetectionPostProcess::SetXScale(float x_scale) { - this->primitive_->value.AsDetectionPostProcess()->xScale = x_scale; -} -void DetectionPostProcess::SetYScale(float y_scale) { - this->primitive_->value.AsDetectionPostProcess()->yScale = y_scale; -} -void DetectionPostProcess::SetNmsIouThreshold(float nms_iou_threshold) { - this->primitive_->value.AsDetectionPostProcess()->NmsIouThreshold = nms_iou_threshold; -} -void DetectionPostProcess::SetNmsScoreThreshold(float nms_score_threshold) { - this->primitive_->value.AsDetectionPostProcess()->NmsScoreThreshold = nms_score_threshold; -} -void DetectionPostProcess::SetMaxDetections(int64_t max_detections) { - this->primitive_->value.AsDetectionPostProcess()->MaxDetections = max_detections; -} -void DetectionPostProcess::SetDetectionsPerClass(int64_t detections_per_class) { - this->primitive_->value.AsDetectionPostProcess()->DetectionsPerClass = detections_per_class; -} -void DetectionPostProcess::SetMaxClassesPerDetection(int64_t max_classes_per_detection) { - this->primitive_->value.AsDetectionPostProcess()->MaxClassesPerDetection = max_classes_per_detection; -} -void DetectionPostProcess::SetNumClasses(int64_t num_classes) { - this->primitive_->value.AsDetectionPostProcess()->NumClasses = num_classes; -} -void DetectionPostProcess::SetUseRegularNms(bool use_regular_nms) { - this->primitive_->value.AsDetectionPostProcess()->UseRegularNms = use_regular_nms; -} -void DetectionPostProcess::SetOutQuantized(bool out_quantized) { - this->primitive_->value.AsDetectionPostProcess()->OutQuantized = out_quantized; -} - -#else -int DetectionPostProcess::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_DetectionPostProcess(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_DetectionPostProcess return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateDetectionPostProcess( - *fbb, attr->format(), attr->inputSize(), attr->hScale(), attr->wScale(), attr->xScale(), attr->yScale(), - attr->NmsIouThreshold(), attr->NmsScoreThreshold(), attr->MaxDetections(), attr->DetectionsPerClass(), - attr->MaxClassesPerDetection(), attr->NumClasses(), attr->UseRegularNms(), attr->OutQuantized()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_DetectionPostProcess, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int DetectionPostProcess::GetFormat() const { return this->primitive_->value_as_DetectionPostProcess()->format(); } -int DetectionPostProcess::GetInputSize() const { - return this->primitive_->value_as_DetectionPostProcess()->inputSize(); -} -float DetectionPostProcess::GetHScale() const { return this->primitive_->value_as_DetectionPostProcess()->hScale(); } -float DetectionPostProcess::GetWScale() const { return this->primitive_->value_as_DetectionPostProcess()->wScale(); } -float DetectionPostProcess::GetXScale() const { return this->primitive_->value_as_DetectionPostProcess()->xScale(); } -float DetectionPostProcess::GetYScale() const { return this->primitive_->value_as_DetectionPostProcess()->yScale(); } -float DetectionPostProcess::GetNmsIouThreshold() const { - return this->primitive_->value_as_DetectionPostProcess()->NmsIouThreshold(); -} -float DetectionPostProcess::GetNmsScoreThreshold() const { - return this->primitive_->value_as_DetectionPostProcess()->NmsScoreThreshold(); -} -int64_t DetectionPostProcess::GetMaxDetections() const { - return this->primitive_->value_as_DetectionPostProcess()->MaxDetections(); -} -int64_t DetectionPostProcess::GetDetectionsPerClass() const { - return this->primitive_->value_as_DetectionPostProcess()->DetectionsPerClass(); -} -int64_t DetectionPostProcess::GetMaxClassesPerDetection() const { - return this->primitive_->value_as_DetectionPostProcess()->MaxClassesPerDetection(); -} -int64_t DetectionPostProcess::GetNumClasses() const { - return this->primitive_->value_as_DetectionPostProcess()->NumClasses(); -} -bool DetectionPostProcess::GetUseRegularNms() const { - return this->primitive_->value_as_DetectionPostProcess()->UseRegularNms(); -} -bool DetectionPostProcess::GetOutQuantized() const { - return this->primitive_->value_as_DetectionPostProcess()->OutQuantized(); -} - -PrimitiveC *DetectionPostProcessCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<DetectionPostProcess>(primitive); -} -Registry DetectionPostProcessRegistry(schema::PrimitiveType_DetectionPostProcess, DetectionPostProcessCreator); -#endif -namespace { -constexpr int kDetectionPostProcessOutputNum = 4; -constexpr int kDetectionPostProcessInputNum = 3; -} // namespace -int DetectionPostProcess::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - if (outputs_.size() != kDetectionPostProcessOutputNum || inputs_.size() != kDetectionPostProcessInputNum) { - MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs_.size() << ",input size: " << inputs_.size(); - return RET_PARAM_INVALID; - } - auto boxes = inputs_.at(0); - MS_ASSERT(boxes != nullptr); - auto scores = inputs_.at(1); - MS_ASSERT(scores != nullptr); - auto anchors = inputs_.at(2); - MS_ASSERT(anchors != nullptr); - - const auto input_box_shape = boxes->shape(); - const auto input_scores_shape = scores->shape(); - const auto input_anchors_shape = anchors->shape(); - MS_ASSERT(input_scores_shape[2] >= GetNumClasses()); - MS_ASSERT(input_scores_shape[2] - GetNumClasses() <= 1); - MS_ASSERT(input_box_shape[1] == input_scores_shape[1]); - MS_ASSERT(input_box_shape[1] == input_anchors_shape[0]); - - auto detected_boxes = outputs_.at(0); - MS_ASSERT(detected_boxes != nullptr); - auto detected_classes = outputs_.at(1); - MS_ASSERT(detected_classes != nullptr); - auto detected_scores = outputs_.at(2); - MS_ASSERT(detected_scores != nullptr); - auto num_det = outputs_.at(3); - MS_ASSERT(num_det != nullptr); - - detected_boxes->set_format(boxes->format()); - detected_boxes->set_data_type(kNumberTypeFloat32); - detected_classes->set_format(boxes->format()); - detected_classes->set_data_type(kNumberTypeFloat32); - detected_scores->set_format(boxes->format()); - detected_scores->set_data_type(kNumberTypeFloat32); - num_det->set_format(boxes->format()); - num_det->set_data_type(kNumberTypeFloat32); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - const auto max_detections = GetMaxDetections(); - const auto max_classes_per_detection = GetMaxClassesPerDetection(); - const auto num_detected_boxes = static_cast<int>(max_detections * max_classes_per_detection); - const std::vector<int> box_shape{1, num_detected_boxes, 4}; - const std::vector<int> class_shape{1, num_detected_boxes}; - const std::vector<int> num_shape{1}; - detected_boxes->set_shape(box_shape); - detected_classes->set_shape(class_shape); - detected_scores->set_shape(class_shape); - num_det->set_shape(num_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/detection_post_process.h b/mindspore/lite/src/ops/detection_post_process.h deleted file mode 100644 index d93d5807a2..0000000000 --- a/mindspore/lite/src/ops/detection_post_process.h +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_DETECTION_POST_PROCESS_H_ -#define LITE_MINDSPORE_LITE_C_OPS_DETECTION_POST_PROCESS_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class DetectionPostProcess : public PrimitiveC { - public: - DetectionPostProcess() = default; - ~DetectionPostProcess() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(DetectionPostProcess, PrimitiveC); - explicit DetectionPostProcess(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetFormat(int format); - void SetInputSize(int input_size); - void SetHScale(float h_scale); - void SetWScale(float w_scale); - void SetXScale(float x_scale); - void SetYScale(float y_scale); - void SetNmsIouThreshold(float nms_iou_threshold); - void SetNmsScoreThreshold(float nms_score_threshold); - void SetMaxDetections(int64_t max_detections); - void SetDetectionsPerClass(int64_t detections_per_class); - void SetMaxClassesPerDetection(int64_t max_classes_per_detection); - void SetNumClasses(int64_t num_classes); - void SetUseRegularNms(bool use_regular_nms); - void SetOutQuantized(bool out_quantized); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetFormat() const; - int GetInputSize() const; - float GetHScale() const; - float GetWScale() const; - float GetXScale() const; - float GetYScale() const; - float GetNmsIouThreshold() const; - float GetNmsScoreThreshold() const; - int64_t GetMaxDetections() const; - int64_t GetDetectionsPerClass() const; - int64_t GetMaxClassesPerDetection() const; - int64_t GetNumClasses() const; - bool GetUseRegularNms() const; - bool GetOutQuantized() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_DETECTION_POST_PROCESS_H_ diff --git a/mindspore/lite/src/ops/div.cc b/mindspore/lite/src/ops/div.cc deleted file mode 100644 index f345e01d30..0000000000 --- a/mindspore/lite/src/ops/div.cc +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/div.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Div::GetActivationType() const { return this->primitive_->value.AsDiv()->activationType; } - -void Div::SetActivationType(int activation_type) { - this->primitive_->value.AsDiv()->activationType = (schema::ActivationType)activation_type; -} - -int Div::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Div; - } - if (this->primitive_->value.type != schema::PrimitiveType_Div) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::DivT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - - return RET_OK; -} - -#else -int Div::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Div(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Div return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateDiv(*fbb, attr->activationType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Div, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int Div::GetActivationType() const { return this->primitive_->value_as_Div()->activationType(); } - -PrimitiveC *DivCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Div>(primitive); } -Registry DivRegistry(schema::PrimitiveType_Div, DivCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/div.h b/mindspore/lite/src/ops/div.h deleted file mode 100644 index c23e7ab5c4..0000000000 --- a/mindspore/lite/src/ops/div.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_DIV_H_ -#define LITE_MINDSPORE_LITE_C_OPS_DIV_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/arithmetic.h" - -namespace mindspore { -namespace lite { -class Div : public Arithmetic { - public: - Div() = default; - ~Div() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Div, Arithmetic); - explicit Div(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} - void SetActivationType(int activation_type); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int GetActivationType() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_DIV_H_ diff --git a/mindspore/lite/src/ops/dropout.cc b/mindspore/lite/src/ops/dropout.cc deleted file mode 100644 index a34bdeaa97..0000000000 --- a/mindspore/lite/src/ops/dropout.cc +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/dropout.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float Dropout::GetRatio() const { return this->primitive_->value.AsDropout()->ratio; } - -void Dropout::SetRatio(float ratio) { this->primitive_->value.AsDropout()->ratio = ratio; } - -int Dropout::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Dropout; - } - if (this->primitive_->value.type != schema::PrimitiveType_Dropout) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::DropoutT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - if (prim.GetAttr("keep_prob") != nullptr) { - attr->ratio = GetValue<float>(prim.GetAttr("keep_prob")); - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else -int Dropout::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Dropout(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Dropout return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateDropout(*fbb, attr->ratio()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Dropout, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -float Dropout::GetRatio() const { return this->primitive_->value_as_Dropout()->ratio(); } - -PrimitiveC *DropoutCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Dropout>(primitive); } -Registry DropoutRegistry(schema::PrimitiveType_Dropout, DropoutCreator); -#endif -int Dropout::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output0 = outputs_.front(); - MS_ASSERT(output0 != nullptr); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - output0->set_shape(input->shape()); - output0->set_data_type(input->data_type()); - output0->set_format(input->format()); - if (outputs_.size() > 1) { - auto output1 = outputs_[1]; - MS_ASSERT(output1 != nullptr); - output1->set_shape(input->shape()); - output1->set_data_type(input->data_type()); - output1->set_format(input->format()); - } - return RET_OK; -} - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/dropout.h b/mindspore/lite/src/ops/dropout.h deleted file mode 100644 index 21310974b6..0000000000 --- a/mindspore/lite/src/ops/dropout.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_DROPOUT_H_ -#define MINDSPORE_LITE_SRC_OPS_DROPOUT_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Dropout : public PrimitiveC { - public: - Dropout() = default; - ~Dropout() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Dropout, PrimitiveC); - explicit Dropout(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetRatio(float ratio); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - float GetRatio() const; - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; - -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_DROPOUT_H_ diff --git a/mindspore/lite/src/ops/dropout_grad.cc b/mindspore/lite/src/ops/dropout_grad.cc deleted file mode 100644 index 443a5571e9..0000000000 --- a/mindspore/lite/src/ops/dropout_grad.cc +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/dropout_grad.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float DropoutGrad::GetRatio() const { return this->primitive_->value.AsDropout()->ratio; } - -void DropoutGrad::SetRatio(float ratio) { this->primitive_->value.AsDropout()->ratio = ratio; } - -int DropoutGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_DropoutGrad; - } - if (this->primitive_->value.type != schema::PrimitiveType_DropoutGrad) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::DropoutGradT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - if (prim.GetAttr("keep_prob") != nullptr) { - attr->ratio = GetValue<float>(prim.GetAttr("keep_prob")); - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int DropoutGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_DropoutGrad(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_DropoutGrad return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateDropoutGrad(*fbb, attr->ratio()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_DropoutGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -float DropoutGrad::GetRatio() const { return this->primitive_->value_as_DropoutGrad()->ratio(); } - -PrimitiveC *DropoutGradCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<DropoutGrad>(primitive); -} -Registry DropoutGradRegistry(schema::PrimitiveType_DropoutGrad, DropoutGradCreator); - -#endif -int DropoutGrad::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - MS_ASSERT(inputs_.size() == 2); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - output->set_shape(input->shape()); - output->set_data_type(input->data_type()); - output->set_format(input->format()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/dropout_grad.h b/mindspore/lite/src/ops/dropout_grad.h deleted file mode 100644 index c0d0d11c29..0000000000 --- a/mindspore/lite/src/ops/dropout_grad.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_DROPOUT_GRAD_H_ -#define MINDSPORE_LITE_SRC_OPS_DROPOUT_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class DropoutGrad : public PrimitiveC { - public: -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(DropoutGrad, PrimitiveC); - DropoutGrad() = default; - explicit DropoutGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetRatio(float ratio); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - -#else - DropoutGrad() = default; - - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - float GetRatio() const; - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_DROPOUT_GRAD_H_ diff --git a/mindspore/lite/src/ops/eltwise.cc b/mindspore/lite/src/ops/eltwise.cc deleted file mode 100644 index 0bec8276c4..0000000000 --- a/mindspore/lite/src/ops/eltwise.cc +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/eltwise.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Eltwise::GetMode() const { return this->primitive_->value.AsEltwise()->mode; } - -void Eltwise::SetMode(int mode) { this->primitive_->value.AsEltwise()->mode = (schema::EltwiseMode)mode; } - -#else -int Eltwise::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Eltwise(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Eltwise return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateEltwise(*fbb, attr->mode()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Eltwise, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int Eltwise::GetMode() const { return this->primitive_->value_as_Eltwise()->mode(); } - -PrimitiveC *EltwiseCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Eltwise>(primitive); } -Registry EltwiseRegistry(schema::PrimitiveType_Eltwise, EltwiseCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/eltwise.h b/mindspore/lite/src/ops/eltwise.h deleted file mode 100644 index 1f6222144c..0000000000 --- a/mindspore/lite/src/ops/eltwise.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_ELTWISE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ELTWISE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" -#include "src/ops/arithmetic.h" - -namespace mindspore { -namespace lite { -class Eltwise : public Arithmetic { - public: - Eltwise() = default; - ~Eltwise() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Eltwise, Arithmetic); - explicit Eltwise(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} - void SetMode(int mode); - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int GetMode() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_ELTWISE_H_ diff --git a/mindspore/lite/src/ops/elu.cc b/mindspore/lite/src/ops/elu.cc deleted file mode 100644 index 506f9f381f..0000000000 --- a/mindspore/lite/src/ops/elu.cc +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/elu.h" -#include <memory> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float Elu::GetAlpha() const { return this->primitive_->value.AsElu()->alpha; } - -void Elu::SetAlpha(float alpha) { this->primitive_->value.AsElu()->alpha = alpha; } - -int Elu::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Elu; - } - if (this->primitive_->value.type != schema::PrimitiveType_Elu) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - auto attr = std::make_unique<schema::EluT>(); - this->primitive_->value.value = attr.release(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - return RET_OK; -} -#else -int Elu::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Elu(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Elu return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateElu(*fbb, attr->alpha()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Elu, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -float Elu::GetAlpha() const { return this->primitive_->value_as_Elu()->alpha(); } - -PrimitiveC *EluCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Elu>(primitive); } -Registry EluRegistry(schema::PrimitiveType_Elu, EluCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/elu.h b/mindspore/lite/src/ops/elu.h deleted file mode 100644 index 9b025e69bd..0000000000 --- a/mindspore/lite/src/ops/elu.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_ELU_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ELU_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Elu : public PrimitiveC { - public: - Elu() = default; - ~Elu() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Elu, PrimitiveC); - explicit Elu(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAlpha(float alpha); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - float GetAlpha() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_ELU_H_ diff --git a/mindspore/lite/src/ops/embedding_lookup.cc b/mindspore/lite/src/ops/embedding_lookup.cc deleted file mode 100644 index a0a3ee7a06..0000000000 --- a/mindspore/lite/src/ops/embedding_lookup.cc +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/embedding_lookup.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float EmbeddingLookup::GetMaxNorm() const { return this->primitive_->value.AsEmbeddingLookup()->maxNorm; } - -void EmbeddingLookup::SetMaxNorm(float max_norm) { this->primitive_->value.AsEmbeddingLookup()->maxNorm = max_norm; } - -#else -int EmbeddingLookup::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto attr = primitive->value_as_EmbeddingLookup(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_EmbeddingLookup return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateEmbeddingLookup(*fbb, attr->maxNorm()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_EmbeddingLookup, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -float EmbeddingLookup::GetMaxNorm() const { return this->primitive_->value_as_EmbeddingLookup()->maxNorm(); } - -PrimitiveC *EmbeddingLookupCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<EmbeddingLookup>(primitive); -} -Registry EmbeddingLookupRegistry(schema::PrimitiveType_EmbeddingLookup, EmbeddingLookupCreator); -#endif - -int EmbeddingLookup::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - if (inputs_.size() < kDoubleNum) { - MS_LOG(ERROR) << "Embedding Lookup should have at least two inputs"; - return RET_INPUT_TENSOR_ERROR; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "Embedding Lookup should have one outputs"; - return RET_INPUT_TENSOR_ERROR; - } - auto params_ = inputs_.front(); - MS_ASSERT(params_ != nullptr); - auto ids = inputs_.back(); - MS_ASSERT(ids != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_format(params_->format()); - output->set_data_type(params_->data_type()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - auto embedding_shape = params_->shape(); - embedding_shape.erase(embedding_shape.begin()); - std::vector<int> output_shape(ids->shape()); - for (size_t i = 0; i < embedding_shape.size(); ++i) { - output_shape.push_back(embedding_shape.at(i)); - } - for (size_t i = 1; i < inputs_.size() - 1; ++i) { - auto embedding_shape_t = inputs_.at(i)->shape(); - embedding_shape_t.erase(embedding_shape_t.begin()); - if (embedding_shape_t != embedding_shape) { - MS_LOG(ERROR) << "The embedded layers should have the same shape"; - return RET_INPUT_TENSOR_ERROR; - } - } - output->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/embedding_lookup.h b/mindspore/lite/src/ops/embedding_lookup.h deleted file mode 100644 index 01898bb7fb..0000000000 --- a/mindspore/lite/src/ops/embedding_lookup.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_EMBEDDING_LOOKUP_H_ -#define LITE_MINDSPORE_LITE_C_OPS_EMBEDDING_LOOKUP_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class EmbeddingLookup : public PrimitiveC { - public: - EmbeddingLookup() = default; - ~EmbeddingLookup() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(EmbeddingLookup, PrimitiveC); - explicit EmbeddingLookup(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetMaxNorm(float max_norm); - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - float GetMaxNorm() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_EMBEDDING_LOOKUP_H_ diff --git a/mindspore/lite/src/ops/equal.cc b/mindspore/lite/src/ops/equal.cc deleted file mode 100644 index ef2ebaeee6..0000000000 --- a/mindspore/lite/src/ops/equal.cc +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/equal.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Equal::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Equal; - } - if (this->primitive_->value.type != schema::PrimitiveType_Equal) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::EqualT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int Equal::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateEqual(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Equal, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *EqualCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Equal>(primitive); } -Registry EqualRegistry(schema::PrimitiveType_Equal, EqualCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/equal.h b/mindspore/lite/src/ops/equal.h deleted file mode 100644 index 1dc8d3ab75..0000000000 --- a/mindspore/lite/src/ops/equal.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_EQUAL_H_ -#define LITE_MINDSPORE_LITE_C_OPS_EQUAL_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/arithmetic_compare.h" - -namespace mindspore { -namespace lite { -class Equal : public ArithmeticCompare { - public: - Equal() = default; - ~Equal() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Equal, ArithmeticCompare); - explicit Equal(schema::PrimitiveT *primitive) : ArithmeticCompare(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_EQUAL_H_ diff --git a/mindspore/lite/src/ops/erf.h b/mindspore/lite/src/ops/erf.h deleted file mode 100644 index a8c9c56038..0000000000 --- a/mindspore/lite/src/ops/erf.h +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/primitive_c.h" - -#ifndef LITE_MINDSPORE_LITE_C_OPS_ERF_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ERF_H_ - -namespace mindspore { -namespace lite { -class Erf : public PrimitiveC { - public: - MS_DECLARE_PARENT(Erf, PrimitiveC); - Erf() = default; - ~Erf() = default; - explicit Erf(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -}; -} // namespace lite -} // namespace mindspore -#endif // LITE_MINDSPORE_LITE_C_OPS_ERF_H_ diff --git a/mindspore/lite/src/ops/exp.cc b/mindspore/lite/src/ops/exp.cc deleted file mode 100644 index e51a67f05e..0000000000 --- a/mindspore/lite/src/ops/exp.cc +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/exp.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -void Exp::SetBase(float base) { this->primitive_->value.AsExp()->base = base; } -void Exp::SetScale(float scale) { this->primitive_->value.AsExp()->scale = scale; } -void Exp::SetShift(float shift) { this->primitive_->value.AsExp()->shift = shift; } - -float Exp::GetBase() const { return this->primitive_->value.AsExp()->base; } -float Exp::GetScale() const { return this->primitive_->value.AsExp()->scale; } -float Exp::GetShift() const { return this->primitive_->value.AsExp()->shift; } - -int Exp::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Exp; - } - if (this->primitive_->value.type != schema::PrimitiveType_Exp) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::ExpT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else - -int Exp::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Exp(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Exp return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateExp(*fbb, attr->base(), attr->scale(), attr->shift()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Exp, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -float Exp::GetBase() const { return this->primitive_->value_as_Exp()->base(); } -float Exp::GetScale() const { return this->primitive_->value_as_Exp()->scale(); } -float Exp::GetShift() const { return this->primitive_->value_as_Exp()->shift(); } - -PrimitiveC *ExpCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Exp>(primitive); } -Registry ExpRegistry(schema::PrimitiveType_Exp, ExpCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/exp.h b/mindspore/lite/src/ops/exp.h deleted file mode 100644 index 681326efea..0000000000 --- a/mindspore/lite/src/ops/exp.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_EXP_H_ -#define LITE_MINDSPORE_LITE_C_OPS_EXP_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Exp : public PrimitiveC { - public: - Exp() = default; - ~Exp() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Exp, PrimitiveC); - explicit Exp(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetBase(float base); - void SetShift(float shift); - void SetScale(float scale); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - float GetBase() const; - float GetShift() const; - float GetScale() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_EXP_H_ diff --git a/mindspore/lite/src/ops/expand_dims.cc b/mindspore/lite/src/ops/expand_dims.cc deleted file mode 100644 index 4ca40f682b..0000000000 --- a/mindspore/lite/src/ops/expand_dims.cc +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/expand_dims.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int ExpandDims::GetDim() const { return this->primitive_->value.AsExpandDims()->dim; } - -void ExpandDims::SetDim(int dim) { this->primitive_->value.AsExpandDims()->dim = dim; } - -int ExpandDims::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_ExpandDims; - } - if (this->primitive_->value.type != schema::PrimitiveType_ExpandDims) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::ExpandDimsT(); - if (attr == nullptr) { - delete this->primitive_; - this->primitive_ = nullptr; - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - // use axis instead of dim - if (inputs.at(1)->isa<ValueNode>()) { - auto axis_tensor = inputs.at(1)->cast<ValueNodePtr>(); - int axis = CastToInt(axis_tensor->value()).front(); - attr->dim = axis; - } else { - MS_LOG(ERROR) << "input axis is not value node."; - delete this->primitive_; - delete attr; - this->primitive_ = nullptr; - attr = nullptr; - return RET_ERROR; - } - this->primitive_->value.value = attr; - } - return RET_OK; -} - -#else -int ExpandDims::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_ExpandDims(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_ExpandDims return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateExpandDims(*fbb, attr->dim()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ExpandDims, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int ExpandDims::GetDim() const { return this->primitive_->value_as_ExpandDims()->dim(); } - -PrimitiveC *ExpandDimsCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<ExpandDims>(primitive); -} -Registry ExpandDimsRegistry(schema::PrimitiveType_ExpandDims, ExpandDimsCreator); -#endif - -int ExpandDims::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "output size is invalid"; - } - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - int dim = GetDim(); - if (dim < 0) { - dim += input->shape().size() + 1; - } - if (dim > static_cast<int>(input->shape().size())) { - MS_LOG(ERROR) << "attribute dim out of range"; - return RET_INPUT_TENSOR_ERROR; - } - auto out_shape = input->shape(); - out_shape.insert(out_shape.begin() + dim, 1, 1); - output->set_shape(out_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/expand_dims.h b/mindspore/lite/src/ops/expand_dims.h deleted file mode 100644 index bb580b8411..0000000000 --- a/mindspore/lite/src/ops/expand_dims.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_EXPAND_DIMS_H_ -#define LITE_MINDSPORE_LITE_C_OPS_EXPAND_DIMS_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class ExpandDims : public PrimitiveC { - public: - ExpandDims() = default; - ~ExpandDims() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(ExpandDims, PrimitiveC); - explicit ExpandDims(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetDim(int dim); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetDim() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_EXPAND_DIMS_H_ diff --git a/mindspore/lite/src/ops/fake_quant_with_min_max_vars.cc b/mindspore/lite/src/ops/fake_quant_with_min_max_vars.cc deleted file mode 100644 index 0f09dc4de2..0000000000 --- a/mindspore/lite/src/ops/fake_quant_with_min_max_vars.cc +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/fake_quant_with_min_max_vars.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -bool FakeQuantWithMinMaxVars::GetNarrowRange() const { - return this->primitive_->value.AsFakeQuantWithMinMaxVars()->narrowRange; -} -int FakeQuantWithMinMaxVars::GetNumBits() const { return this->primitive_->value.AsFakeQuantWithMinMaxVars()->numBits; } - -void FakeQuantWithMinMaxVars::SetNarrowRange(bool narrow_range) { - this->primitive_->value.AsFakeQuantWithMinMaxVars()->narrowRange = narrow_range; -} -void FakeQuantWithMinMaxVars::SetNumBits(int num_bits) { - this->primitive_->value.AsFakeQuantWithMinMaxVars()->numBits = num_bits; -} - -#else -int FakeQuantWithMinMaxVars::UnPackToFlatBuilder(const schema::Primitive *primitive, - flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_FakeQuantWithMinMaxVars(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_FakeQuantWithMinMaxVars return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateFakeQuantWithMinMaxVars(*fbb, attr->narrowRange(), attr->numBits()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_FakeQuantWithMinMaxVars, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -bool FakeQuantWithMinMaxVars::GetNarrowRange() const { - return this->primitive_->value_as_FakeQuantWithMinMaxVars()->narrowRange(); -} -int FakeQuantWithMinMaxVars::GetNumBits() const { - return this->primitive_->value_as_FakeQuantWithMinMaxVars()->numBits(); -} - -PrimitiveC *FakeQuantWithMinMaxVarsCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<FakeQuantWithMinMaxVars>(primitive); -} -Registry FakeQuantWithMinMaxVarsRegistry(schema::PrimitiveType_FakeQuantWithMinMaxVars, FakeQuantWithMinMaxVarsCreator); -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/fake_quant_with_min_max_vars.h b/mindspore/lite/src/ops/fake_quant_with_min_max_vars.h deleted file mode 100644 index 7b9e6dd1c5..0000000000 --- a/mindspore/lite/src/ops/fake_quant_with_min_max_vars.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_FAKE_QUANT_WITH_MIN_MAX_VARS_H_ -#define LITE_MINDSPORE_LITE_C_OPS_FAKE_QUANT_WITH_MIN_MAX_VARS_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class FakeQuantWithMinMaxVars : public PrimitiveC { - public: - FakeQuantWithMinMaxVars() = default; - ~FakeQuantWithMinMaxVars() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(FakeQuantWithMinMaxVars, PrimitiveC); - explicit FakeQuantWithMinMaxVars(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetNarrowRange(bool narrow_range); - void SetNumBits(int num_bits); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - bool GetNarrowRange() const; - int GetNumBits() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_FAKE_QUANT_WITH_MIN_MAX_VARS_H_ diff --git a/mindspore/lite/src/ops/fft_imag.cc b/mindspore/lite/src/ops/fft_imag.cc deleted file mode 100644 index 73f9b9b60f..0000000000 --- a/mindspore/lite/src/ops/fft_imag.cc +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/fft_imag.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifndef PRIMITIVE_WRITEABLE -int FftImag::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateEqual(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_FftImag, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *FftImagCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<FftImag>(primitive); } -Registry FftImagRegistry(schema::PrimitiveType_FftImag, FftImagCreator); -#endif -int FftImag::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(TypeId::kNumberTypeFloat32); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input_shape = input->shape(); - input_shape.pop_back(); - outputs_.front()->set_shape(input_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/fft_imag.h b/mindspore/lite/src/ops/fft_imag.h deleted file mode 100644 index c804630b10..0000000000 --- a/mindspore/lite/src/ops/fft_imag.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_FFT_IMAG_H_ -#define LITE_MINDSPORE_LITE_C_OPS_FFT_IMAG_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class FftImag : public PrimitiveC { - public: - FftImag() = default; - ~FftImag() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(FftImag, PrimitiveC); - explicit FftImag(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_FFT_IMAG_H_ diff --git a/mindspore/lite/src/ops/fft_real.cc b/mindspore/lite/src/ops/fft_real.cc deleted file mode 100644 index 5d65ce0f34..0000000000 --- a/mindspore/lite/src/ops/fft_real.cc +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/fft_real.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifndef PRIMITIVE_WRITEABLE -int FftReal::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateEqual(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_FftReal, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *FftRealCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<FftReal>(primitive); } -Registry FftRealRegistry(schema::PrimitiveType_FftReal, FftRealCreator); -#endif -int FftReal::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(TypeId::kNumberTypeFloat32); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input_shape = input->shape(); - input_shape.pop_back(); - outputs_.front()->set_shape(input_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/fft_real.h b/mindspore/lite/src/ops/fft_real.h deleted file mode 100644 index f61493956e..0000000000 --- a/mindspore/lite/src/ops/fft_real.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_FFT_REAL_H_ -#define LITE_MINDSPORE_LITE_C_OPS_FFT_REAL_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class FftReal : public PrimitiveC { - public: - FftReal() = default; - ~FftReal() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(FftReal, PrimitiveC); - explicit FftReal(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_FFT_REAL_H_ diff --git a/mindspore/lite/src/ops/fill.cc b/mindspore/lite/src/ops/fill.cc deleted file mode 100644 index b322bc1ac6..0000000000 --- a/mindspore/lite/src/ops/fill.cc +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/fill.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> Fill::GetDims() const { return this->primitive_->value.AsFill()->dims; } - -void Fill::SetDims(const std::vector<int> &dims) { this->primitive_->value.AsFill()->dims = dims; } - -#else -int Fill::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Fill(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Fill return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> dims; - if (attr->dims() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->dims()->size()); i++) { - dims.push_back(attr->dims()->data()[i]); - } - } - auto val_offset = schema::CreateFillDirect(*fbb, &dims); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Fill, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -std::vector<int> Fill::GetDims() const { - auto fb_vector = this->primitive_->value_as_Fill()->dims(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} - -PrimitiveC *FillCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Fill>(primitive); } -Registry FillRegistry(schema::PrimitiveType_Fill, FillCreator); -#endif - -int Fill::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - auto output = outputs_.front(); - if (input == nullptr || output == nullptr) { - MS_LOG(ERROR) << "Fill input or output is null!"; - return RET_ERROR; - } - if ((inputs_.size() != kSingleNum && inputs_.size() != kDoubleNum) || outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "input size: " << inputs_.size() << ", output size: " << outputs_.size(); - return RET_INPUT_TENSOR_ERROR; - } - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - std::vector<int> output_shape; - auto param_dims = GetDims(); - for (size_t i = 0; i < param_dims.size(); i++) { - output_shape.push_back(param_dims.at(i)); - } - - if (inputs_.size() == kDoubleNum) { - auto input_dims = inputs_.at(1); - MS_ASSERT(input_dims != nullptr); - if (input_dims->data_c() == nullptr) { - return RET_INFER_INVALID; - } - int *dims_data = reinterpret_cast<int *>(input_dims->data_c()); - output_shape = std::vector<int>{dims_data, dims_data + input_dims->ElementsNum()}; - } - - output->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/fill.h b/mindspore/lite/src/ops/fill.h deleted file mode 100644 index 5af4037c3c..0000000000 --- a/mindspore/lite/src/ops/fill.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_FILL_H_ -#define LITE_MINDSPORE_LITE_C_OPS_FILL_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Fill : public PrimitiveC { - public: - Fill() = default; - ~Fill() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Fill, PrimitiveC); - explicit Fill(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetDims(const std::vector<int> &dims); - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - std::vector<int> GetDims() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_FILL_H_ diff --git a/mindspore/lite/src/ops/flatten.cc b/mindspore/lite/src/ops/flatten.cc deleted file mode 100644 index 06227a12d7..0000000000 --- a/mindspore/lite/src/ops/flatten.cc +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/flatten.h" -#include <memory> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { - -int Flatten::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - auto output = outputs_.front(); - if (input == nullptr || output == nullptr) { - MS_LOG(ERROR) << "Flatten input or output is null!"; - return RET_ERROR; - } - if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "input size: " << inputs_.size() << ", output size: " << outputs_.size(); - return RET_INPUT_TENSOR_ERROR; - } - - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - auto input_shape = input->shape(); - std::vector<int> output_shape(2); - output_shape.at(0) = input_shape.at(0); - output_shape.at(1) = 1; - for (size_t i = 1; i < input_shape.size(); i++) { - output_shape.at(1) *= input_shape.at(i); - } - output->set_shape(output_shape); - return RET_OK; -} -#ifdef PRIMITIVE_WRITEABLE -int Flatten::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Flatten; - } - if (this->primitive_->value.type != schema::PrimitiveType_Flatten) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::FlattenT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int Flatten::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateFlatten(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Flatten, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *FlattenCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Flatten>(primitive); } -Registry FlattenRegistry(schema::PrimitiveType_Flatten, FlattenCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/flatten.h b/mindspore/lite/src/ops/flatten.h deleted file mode 100644 index 04b5d97550..0000000000 --- a/mindspore/lite/src/ops/flatten.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_FLATTEN_H_ -#define LITE_MINDSPORE_LITE_C_OPS_FLATTEN_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Flatten : public PrimitiveC { - public: - Flatten() = default; - ~Flatten() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Flatten, PrimitiveC); - explicit Flatten(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_FLATTEN_H_ diff --git a/mindspore/lite/src/ops/flatten_grad.cc b/mindspore/lite/src/ops/flatten_grad.cc deleted file mode 100644 index f0e52562f3..0000000000 --- a/mindspore/lite/src/ops/flatten_grad.cc +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/flatten_grad.h" -#include <memory> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -int FlattenGrad::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - auto output = outputs_.front(); - if (input == nullptr || output == nullptr) { - MS_LOG(ERROR) << "FlattenGrad input or output is null!"; - return RET_ERROR; - } - if (inputs_.size() != kDoubleNum || outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "input size: " << inputs_.size() << ", output size: " << outputs_.size(); - return RET_INPUT_TENSOR_ERROR; - } - - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - auto output_size = inputs_.at(1)->shape().at(0); - std::vector<int> output_shape(output_size); - for (int i = 0; i < output_size; i++) { - output_shape.at(i) = static_cast<int *>(inputs_.at(1)->data_c())[i]; - } - output->set_shape(output_shape); - return RET_OK; -} - -#ifdef PRIMITIVE_WRITEABLE -int FlattenGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_FlattenGrad; - } - if (this->primitive_->value.type != schema::PrimitiveType_FlattenGrad) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::FlattenGradT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int FlattenGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateFlattenGrad(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_FlattenGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *FlattenGradCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<FlattenGrad>(primitive); -} -Registry FlattenGradRegistry(schema::PrimitiveType_FlattenGrad, FlattenGradCreator); -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/flatten_grad.h b/mindspore/lite/src/ops/flatten_grad.h deleted file mode 100644 index 59fb1823e0..0000000000 --- a/mindspore/lite/src/ops/flatten_grad.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_FlattenGrad_GRAD_H_ -#define LITE_MINDSPORE_LITE_C_OPS_FlattenGrad_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class FlattenGrad : public PrimitiveC { - public: - FlattenGrad() = default; - ~FlattenGrad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(FlattenGrad, PrimitiveC); - explicit FlattenGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_FlattenGrad_H_ diff --git a/mindspore/lite/src/ops/floor.cc b/mindspore/lite/src/ops/floor.cc deleted file mode 100644 index 80e4bc1122..0000000000 --- a/mindspore/lite/src/ops/floor.cc +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/floor.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Floor::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Floor; - } - if (this->primitive_->value.type != schema::PrimitiveType_Floor) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::FloorT(); - if (attr == nullptr) { - delete this->primitive_; - this->primitive_ = nullptr; - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - } - return RET_OK; -} -#else -int Floor::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateFloor(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Floor, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *FloorCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Floor>(primitive); } -Registry FloorRegistry(schema::PrimitiveType_Floor, FloorCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/floor.h b/mindspore/lite/src/ops/floor.h deleted file mode 100644 index 54a1ad566f..0000000000 --- a/mindspore/lite/src/ops/floor.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_FLOOR_H_ -#define LITE_MINDSPORE_LITE_C_OPS_FLOOR_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -class Floor : public ArithmeticSelf { - public: - Floor() = default; - ~Floor() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Floor, ArithmeticSelf); - explicit Floor(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_FLOOR_H_ diff --git a/mindspore/lite/src/ops/floor_div.cc b/mindspore/lite/src/ops/floor_div.cc deleted file mode 100644 index c52a6e84c9..0000000000 --- a/mindspore/lite/src/ops/floor_div.cc +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/floor_div.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int FloorDiv::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_FloorDiv; - } - if (this->primitive_->value.type != schema::PrimitiveType_FloorDiv) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::FloorDivT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} -#else - -int FloorDiv::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateFloorDiv(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_FloorDiv, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *FloorDivCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<FloorDiv>(primitive); -} -Registry FloorDivRegistry(schema::PrimitiveType_FloorDiv, FloorDivCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/floor_div.h b/mindspore/lite/src/ops/floor_div.h deleted file mode 100644 index f8515d4ab2..0000000000 --- a/mindspore/lite/src/ops/floor_div.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_FLOOR_DIV_H_ -#define MINDSPORE_LITE_SRC_OPS_FLOOR_DIV_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/arithmetic.h" - -namespace mindspore { -namespace lite { -class FloorDiv : public Arithmetic { - public: - FloorDiv() = default; - ~FloorDiv() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(FloorDiv, Arithmetic); - explicit FloorDiv(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_FLOOR_DIV_H_ diff --git a/mindspore/lite/src/ops/floor_mod.cc b/mindspore/lite/src/ops/floor_mod.cc deleted file mode 100644 index d84de0d21d..0000000000 --- a/mindspore/lite/src/ops/floor_mod.cc +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/floor_mod.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifndef PRIMITIVE_WRITEABLE - -int FloorMod::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateFloorMod(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_FloorMod, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *FloorModCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<FloorMod>(primitive); -} -Registry FloorModRegistry(schema::PrimitiveType_FloorMod, FloorModCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/floor_mod.h b/mindspore/lite/src/ops/floor_mod.h deleted file mode 100644 index ecd4a44f16..0000000000 --- a/mindspore/lite/src/ops/floor_mod.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_FLOOR_MOD_H_ -#define LITE_MINDSPORE_LITE_C_OPS_FLOOR_MOD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/arithmetic.h" - -namespace mindspore { -namespace lite { -class FloorMod : public Arithmetic { - public: - FloorMod() = default; - ~FloorMod() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(FloorMod, Arithmetic); - explicit FloorMod(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_FLOOR_MOD_H_ diff --git a/mindspore/lite/src/ops/full_connection.cc b/mindspore/lite/src/ops/full_connection.cc deleted file mode 100644 index 7ec366c870..0000000000 --- a/mindspore/lite/src/ops/full_connection.cc +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/full_connection.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -bool FullConnection::GetHasBias() const { return this->primitive_->value.AsFullConnection()->hasBias; } -int FullConnection::GetAxis() const { return this->primitive_->value.AsFullConnection()->axis; } -bool FullConnection::GetUseAxis() const { return this->primitive_->value.AsFullConnection()->useAxis; } -int FullConnection::GetActivationType() const { return this->primitive_->value.AsFullConnection()->activationType; } - -void FullConnection::SetHasBias(bool has_bias) { this->primitive_->value.AsFullConnection()->hasBias = has_bias; } -void FullConnection::SetAxis(int axis) { this->primitive_->value.AsFullConnection()->axis = axis; } -void FullConnection::SetUseAxis(bool use_axis) { this->primitive_->value.AsFullConnection()->useAxis = use_axis; } -void FullConnection::SetActivationType(int activationType) { - this->primitive_->value.AsFullConnection()->activationType = static_cast<schema::ActivationType>(activationType); -} -#else -int FullConnection::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_FullConnection(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_FullConnection return nullptr"; - return RET_ERROR; - } - - auto val_offset = - schema::CreateFullConnection(*fbb, attr->hasBias(), attr->axis(), attr->useAxis(), attr->activationType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_FullConnection, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -bool FullConnection::GetHasBias() const { return this->primitive_->value_as_FullConnection()->hasBias(); } -int FullConnection::GetAxis() const { return this->primitive_->value_as_FullConnection()->axis(); } -bool FullConnection::GetUseAxis() const { return this->primitive_->value_as_FullConnection()->useAxis(); } -int FullConnection::GetActivationType() const { return this->primitive_->value_as_FullConnection()->activationType(); } - -PrimitiveC *FullConnectionCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<FullConnection>(primitive); -} -Registry FullConnectionRegistry(schema::PrimitiveType_FullConnection, FullConnectionCreator); -#endif - -int FullConnection::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input0 = inputs_.front(); - MS_ASSERT(input0 != nullptr); - auto input1 = inputs_.at(1); - MS_ASSERT(input1 != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - if ((GetHasBias() && inputs_.size() != kTripleNum) || (!GetHasBias() && inputs_.size() != kDoubleNum)) { - MS_LOG(ERROR) << "Input tensors num error"; - return RET_INPUT_TENSOR_ERROR; - } - if (GetUseAxis() && (GetAxis() < 1 || GetAxis() > static_cast<int>(input0->shape().size()))) { - MS_LOG(ERROR) << "FullConnection axis invalid"; - return RET_ERROR; - } - int new_k = 1; - if (GetUseAxis()) { - for (size_t i = GetAxis(); i < input0->shape().size(); ++i) { - new_k *= input0->shape().at(i); - } - if (new_k != input1->shape().at(1)) { - MS_LOG(ERROR) << "Input1 size invalid"; - return RET_INPUT_TENSOR_ERROR; - } - } else { - new_k = input1->shape().at(1); - } - if (GetHasBias()) { - if (inputs_.at(2)->shape().at(0) != input1->shape().at(0)) { - MS_LOG(ERROR) << "bias size invalid"; - return RET_INPUT_TENSOR_ERROR; - } - } - std::vector<int> out_shape{inputs_.at(0)->shape()}; - if (GetUseAxis()) { - out_shape.resize(GetAxis() + 1); - out_shape.at(GetAxis()) = input1->shape().at(0); - } else { - int total = 1; - for (size_t i = 0; i < input0->shape().size(); ++i) { - total *= input0->shape().at(i); - } - out_shape.resize(2); - auto batch_size = total / new_k; - out_shape.at(0) = batch_size; - out_shape.at(1) = input1->shape().at(0); - } - output->set_shape(out_shape); - output->set_data_type(input0->data_type()); - output->set_format(input0->format()); - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/full_connection.h b/mindspore/lite/src/ops/full_connection.h deleted file mode 100644 index 53e3ddd524..0000000000 --- a/mindspore/lite/src/ops/full_connection.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_FULL_CONNECTION_H_ -#define LITE_MINDSPORE_LITE_C_OPS_FULL_CONNECTION_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class FullConnection : public PrimitiveC { - public: - FullConnection() = default; - ~FullConnection() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(FullConnection, PrimitiveC); - explicit FullConnection(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetHasBias(bool has_bias); - void SetAxis(int axis); - void SetUseAxis(bool use_axis); - void SetActivationType(int activationType); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - bool GetHasBias() const; - int GetAxis() const; - bool GetUseAxis() const; - int GetActivationType() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_FULL_CONNECTION_H_ diff --git a/mindspore/lite/src/ops/fused_batchnorm.cc b/mindspore/lite/src/ops/fused_batchnorm.cc deleted file mode 100644 index f1c79306a5..0000000000 --- a/mindspore/lite/src/ops/fused_batchnorm.cc +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/fused_batchnorm.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float FusedBatchNorm::GetEpsilon() const { return this->primitive_->value.AsFusedBatchNorm()->epsilon; } -float FusedBatchNorm::GetMomentum() const { return this->primitive_->value.AsFusedBatchNorm()->momentum; } -int FusedBatchNorm::GetSpatial() const { return this->primitive_->value.AsFusedBatchNorm()->spatial; } - -void FusedBatchNorm::SetEpsilon(float epsilon) { this->primitive_->value.AsFusedBatchNorm()->epsilon = epsilon; } -void FusedBatchNorm::SetMomentum(float momentum) { this->primitive_->value.AsFusedBatchNorm()->momentum = momentum; } -void FusedBatchNorm::SetSpatial(int spatial) { this->primitive_->value.AsFusedBatchNorm()->spatial = spatial; } - -int FusedBatchNorm::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_FusedBatchNorm; - } - if (this->primitive_->value.type != schema::PrimitiveType_FusedBatchNorm) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::FusedBatchNormT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr value failed"; - return RET_ERROR; - } - attr->epsilon = GetValue<float>(prim.GetAttr("epsilon")); - attr->momentum = GetValue<float>(prim.GetAttr("momentum")); - this->primitive_->value.value = attr; - } - return RET_OK; -} - -#else -int FusedBatchNorm::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_FusedBatchNorm(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_FusedBatchNorm return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateFusedBatchNorm(*fbb, attr->epsilon(), attr->momentum(), attr->spatial()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_FusedBatchNorm, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -float FusedBatchNorm::GetEpsilon() const { return this->primitive_->value_as_FusedBatchNorm()->epsilon(); } -float FusedBatchNorm::GetMomentum() const { return this->primitive_->value_as_FusedBatchNorm()->momentum(); } -int FusedBatchNorm::GetSpatial() const { return this->primitive_->value_as_FusedBatchNorm()->spatial(); } - -PrimitiveC *FusedBatchNormCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<FusedBatchNorm>(primitive); -} -Registry FusedBatchNormRegistry(schema::PrimitiveType_FusedBatchNorm, FusedBatchNormCreator); -#endif - -int FusedBatchNorm::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - for (size_t i = 0; i < inputs_.size(); i++) { - if (outputs_.size() <= i) { - break; - } - outputs_.at(i)->set_shape(inputs_.at(i)->shape()); - outputs_.at(i)->set_data_type(inputs_.at(i)->data_type()); - outputs_.at(i)->set_format(inputs_.at(i)->format()); - } - if (outputs_.size() > 5) { - outputs_.at(5)->set_data_type(inputs_.at(0)->data_type()); - outputs_.at(5)->set_format(inputs_.at(0)->format()); - outputs_.at(5)->set_shape({1}); - } - return 0; -} - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/fused_batchnorm.h b/mindspore/lite/src/ops/fused_batchnorm.h deleted file mode 100644 index 3da196b580..0000000000 --- a/mindspore/lite/src/ops/fused_batchnorm.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_FUSED_BATCHNORM_H_ -#define MINDSPORE_LITE_SRC_OPS_FUSED_BATCHNORM_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class FusedBatchNorm : public PrimitiveC { - public: - FusedBatchNorm() = default; - ~FusedBatchNorm() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(FusedBatchNorm, PrimitiveC); - explicit FusedBatchNorm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetEpsilon(float epsilon); - void SetMomentum(float momentum); - void SetSpatial(int spatial); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - float GetEpsilon() const; - float GetMomentum() const; - int GetSpatial() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_FUSED_BATCHNORM_H_ diff --git a/mindspore/lite/src/ops/gather.cc b/mindspore/lite/src/ops/gather.cc deleted file mode 100644 index 9a7c6a7645..0000000000 --- a/mindspore/lite/src/ops/gather.cc +++ /dev/null @@ -1,149 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/gather.h" -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Gather::GetAxis() const { return this->primitive_->value.AsGather()->axis; } -int Gather::GetBatchDims() const { return this->primitive_->value.AsGather()->batchDims; } - -void Gather::SetAxis(int axis) { this->primitive_->value.AsGather()->axis = axis; } -void Gather::SetBatchDims(int batch_dims) { this->primitive_->value.AsGather()->batchDims = batch_dims; } -int Gather::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitive error"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Gather; - } - if (this->primitive_->value.type != schema::PrimitiveType_Gather) { - MS_LOG(ERROR) << "Gather primitive value type : " << schema::EnumNamePrimitiveType(primitive_->value.type) - << "is not equal" << schema::EnumNamePrimitiveType(schema::PrimitiveType_Gather); - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto gather_attr = new (std::nothrow) schema::GatherT(); - if (gather_attr == nullptr) { - MS_LOG(ERROR) << "new primitive value.value error"; - delete this->primitive_; - delete gather_attr; - this->primitive_ = nullptr; - gather_attr = nullptr; - return RET_ERROR; - } - if (inputs.at(2)->isa<ValueNode>()) { - ValueNodePtr axis_tensor = inputs.at(2)->cast<ValueNodePtr>(); - int axis = CastToInt(axis_tensor->value()).front(); - gather_attr->axis = axis; - } else { - MS_LOG(ERROR) << "input axis is not value node."; - delete this->primitive_; - delete gather_attr; - this->primitive_ = nullptr; - gather_attr = nullptr; - return RET_ERROR; - } - gather_attr->batchDims = 0; - this->primitive_->value.value = gather_attr; - } - return RET_OK; -} -#else -int Gather::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Gather(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Gather return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateGather(*fbb, attr->axis(), attr->batchDims()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Gather, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int Gather::GetAxis() const { return this->primitive_->value_as_Gather()->axis(); } -int Gather::GetBatchDims() const { return this->primitive_->value_as_Gather()->batchDims(); } - -PrimitiveC *GatherCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Gather>(primitive); } -Registry GatherRegistry(schema::PrimitiveType_Gather, GatherCreator); -#endif - -int Gather::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - if (inputs_.size() < kDoubleNum) { - MS_LOG(DEBUG) << "Gather should be at least two inputs"; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "Gather should have one outputs"; - return RET_INPUT_TENSOR_ERROR; - } - auto input = inputs_.at(0); - MS_ASSERT(input != nullptr); - auto indices = inputs_.at(1); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(input != nullptr); - output->set_data_type(input->data_type()); - if (this->quant_type() == schema::QuantType_WeightQuant) { - output->set_data_type(kNumberTypeFloat32); - } - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - int axis = GetAxis(); - int batch_dims = GetBatchDims(); - if (axis < 0) { - axis += input->shape().size(); - } - auto indices_shape = indices->shape(); - int indices_rank = indices_shape.size(); - if (batch_dims != 0) { - MS_LOG(ERROR) << "batchDims " << batch_dims << " != 0, which is not support"; - return RET_ERROR; - } - auto in_shape = input->shape(); - int in_rank = in_shape.size(); - if (in_rank < axis + 1) { - MS_LOG(ERROR) << "input[0]'s rank is less than axis + 1"; - return RET_ERROR; - } - std::vector<int> out_shape{in_shape}; - out_shape.erase(out_shape.begin() + axis); - for (int i = indices_rank - 1; i >= 0; --i) { - out_shape.insert(out_shape.begin() + axis, indices_shape.at(i)); - } - output->set_shape(out_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/gather.h b/mindspore/lite/src/ops/gather.h deleted file mode 100644 index f7dbc2adce..0000000000 --- a/mindspore/lite/src/ops/gather.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_GATHER_H_ -#define LITE_MINDSPORE_LITE_C_OPS_GATHER_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Gather : public PrimitiveC { - public: - Gather() = default; - ~Gather() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Gather, PrimitiveC); - explicit Gather(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAxis(int axis); - void SetBatchDims(int batch_dims); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetAxis() const; - int GetBatchDims() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_GATHER_H_ diff --git a/mindspore/lite/src/ops/gather_nd.cc b/mindspore/lite/src/ops/gather_nd.cc deleted file mode 100644 index f420e606f8..0000000000 --- a/mindspore/lite/src/ops/gather_nd.cc +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/gather_nd.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int GatherNd::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_GatherNd; - } - if (this->primitive_->value.type != schema::PrimitiveType_GatherNd) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::GatherNdT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - if (prim.GetAttr("batchDims") != nullptr) { - attr->batchDims = static_cast<int32_t>(GetValue<int64_t>(prim.GetAttr("batchDims"))); - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int GatherNd::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_GatherNd(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_GatherNd return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateGatherNd(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_GatherNd, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *GatherNdCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<GatherNd>(primitive); -} -Registry GatherNdRegistry(schema::PrimitiveType_GatherNd, GatherNdCreator); -#endif - -int GatherNd::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - if (inputs_.size() != kDoubleNum) { - MS_LOG(ERROR) << "GatherNd should have two inputs"; - return RET_INPUT_TENSOR_ERROR; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "GatherNd should have one outputs"; - return RET_INPUT_TENSOR_ERROR; - } - auto input = inputs_.at(0); - MS_ASSERT(input != nullptr); - auto indices = inputs_.at(1); - MS_ASSERT(indices != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto in_shape = input->shape(); - int in_rank = in_shape.size(); - auto indices_shape = indices->shape(); - int indices_rank = indices_shape.size(); - if (indices_shape.at(indices_rank - 1) > in_rank) { - MS_LOG(ERROR) << "Input of indices data is error!"; - return RET_ERROR; - } - std::vector<int> out_shape; - int i = 0; - for (i = 0; i < indices_rank - 1; ++i) { - out_shape.emplace_back(indices_shape.at(i)); - } - for (i = indices_shape.at(indices_rank - 1); i < in_rank; ++i) { - out_shape.emplace_back(in_shape.at(i)); - } - output->set_shape(out_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/gather_nd.h b/mindspore/lite/src/ops/gather_nd.h deleted file mode 100644 index 7733050c53..0000000000 --- a/mindspore/lite/src/ops/gather_nd.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_GATHER_ND_H_ -#define LITE_MINDSPORE_LITE_C_OPS_GATHER_ND_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class GatherNd : public PrimitiveC { - public: - GatherNd() = default; - ~GatherNd() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(GatherNd, PrimitiveC); - explicit GatherNd(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_GATHER_ND_H_ diff --git a/mindspore/lite/src/ops/gelu.cc b/mindspore/lite/src/ops/gelu.cc deleted file mode 100644 index 234f8e7454..0000000000 --- a/mindspore/lite/src/ops/gelu.cc +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/gelu.h" -#include <memory> -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int GeLU::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_GeLU; - } - if (this->primitive_->value.type != schema::PrimitiveType_GeLU) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::GeLUT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/gelu.h b/mindspore/lite/src/ops/gelu.h deleted file mode 100644 index d2fc914a75..0000000000 --- a/mindspore/lite/src/ops/gelu.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_GELU_H_ -#define LITE_MINDSPORE_LITE_C_OPS_GELU_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class GeLU : public PrimitiveC { - public: - GeLU() = default; - ~GeLU() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(GeLU, PrimitiveC); - explicit GeLU(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_GELU_H_ diff --git a/mindspore/lite/src/ops/greater.cc b/mindspore/lite/src/ops/greater.cc deleted file mode 100644 index a90926b5be..0000000000 --- a/mindspore/lite/src/ops/greater.cc +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/greater.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Greater::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Greater; - } - if (this->primitive_->value.type != schema::PrimitiveType_Greater) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::GreaterT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int Greater::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateGreater(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Greater, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *GreaterCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Greater>(primitive); } -Registry GreaterRegistry(schema::PrimitiveType_Greater, GreaterCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/greater.h b/mindspore/lite/src/ops/greater.h deleted file mode 100644 index ae7ef82b51..0000000000 --- a/mindspore/lite/src/ops/greater.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef LITE_MINDSPORE_LITE_C_OPS_GREATER_H_ -#define LITE_MINDSPORE_LITE_C_OPS_GREATER_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_compare.h" - -namespace mindspore { -namespace lite { -class Greater : public ArithmeticCompare { - public: - Greater() = default; - ~Greater() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Greater, ArithmeticCompare); - explicit Greater(schema::PrimitiveT *primitive) : ArithmeticCompare(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_GREATER_H_ diff --git a/mindspore/lite/src/ops/greater_equal.cc b/mindspore/lite/src/ops/greater_equal.cc deleted file mode 100644 index e7dd799802..0000000000 --- a/mindspore/lite/src/ops/greater_equal.cc +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/greater_equal.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifndef PRIMITIVE_WRITEABLE -int GreaterEqual::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateGreaterEqual(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_GreaterEqual, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *GreaterEqualCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<GreaterEqual>(primitive); -} -Registry GreaterEqualRegistry(schema::PrimitiveType_GreaterEqual, GreaterEqualCreator); - -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/greater_equal.h b/mindspore/lite/src/ops/greater_equal.h deleted file mode 100644 index f8df62e2fa..0000000000 --- a/mindspore/lite/src/ops/greater_equal.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_GREATER_EQUAL_H_ -#define LITE_MINDSPORE_LITE_C_OPS_GREATER_EQUAL_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_compare.h" - -namespace mindspore { -namespace lite { -class GreaterEqual : public ArithmeticCompare { - public: - GreaterEqual() = default; - ~GreaterEqual() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(GreaterEqual, ArithmeticCompare); - explicit GreaterEqual(schema::PrimitiveT *primitive) : ArithmeticCompare(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_GREATER_EQUAL_H_ diff --git a/mindspore/lite/src/ops/group_conv2d_grad_input.cc b/mindspore/lite/src/ops/group_conv2d_grad_input.cc deleted file mode 100644 index 7858392340..0000000000 --- a/mindspore/lite/src/ops/group_conv2d_grad_input.cc +++ /dev/null @@ -1,172 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/group_conv2d_grad_input.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int GroupConv2DGradInput::GetFormat() const { return this->primitive_->value.AsGroupConv2DGradInput()->format; } -int GroupConv2DGradInput::GetGroup() const { return this->primitive_->value.AsGroupConv2DGradInput()->group; } -int GroupConv2DGradInput::GetChannelIn() const { return this->primitive_->value.AsGroupConv2DGradInput()->channelIn; } -int GroupConv2DGradInput::GetChannelOut() const { return this->primitive_->value.AsGroupConv2DGradInput()->channelOut; } -int GroupConv2DGradInput::GetKernelW() const { return this->primitive_->value.AsGroupConv2DGradInput()->kernelW; } -int GroupConv2DGradInput::GetKernelH() const { return this->primitive_->value.AsGroupConv2DGradInput()->kernelH; } -int GroupConv2DGradInput::GetStrideW() const { return this->primitive_->value.AsGroupConv2DGradInput()->strideW; } -int GroupConv2DGradInput::GetStrideH() const { return this->primitive_->value.AsGroupConv2DGradInput()->strideH; } -int GroupConv2DGradInput::GetPadMode() const { return this->primitive_->value.AsGroupConv2DGradInput()->padMode; } -int GroupConv2DGradInput::GetPadUp() const { return this->primitive_->value.AsGroupConv2DGradInput()->padUp; } -int GroupConv2DGradInput::GetPadDown() const { return this->primitive_->value.AsGroupConv2DGradInput()->padDown; } -int GroupConv2DGradInput::GetPadLeft() const { return this->primitive_->value.AsGroupConv2DGradInput()->padLeft; } -int GroupConv2DGradInput::GetPadRight() const { return this->primitive_->value.AsGroupConv2DGradInput()->padRight; } -int GroupConv2DGradInput::GetDilateW() const { return this->primitive_->value.AsGroupConv2DGradInput()->dilateW; } -int GroupConv2DGradInput::GetDilateH() const { return this->primitive_->value.AsGroupConv2DGradInput()->dilateH; } -std::vector<int> GroupConv2DGradInput::GetInputShape() const { - return this->primitive_->value.AsGroupConv2DGradInput()->input_shape; -} -int GroupConv2DGradInput::GetActivationType() const { - return this->primitive_->value.AsGroupConv2DGradInput()->activationType; -} - -void GroupConv2DGradInput::SetFormat(int format) { - this->primitive_->value.AsGroupConv2DGradInput()->format = (schema::Format)format; -} -void GroupConv2DGradInput::SetGroup(int group) { this->primitive_->value.AsGroupConv2DGradInput()->group = group; } -void GroupConv2DGradInput::SetChannelIn(int channel_in) { - this->primitive_->value.AsGroupConv2DGradInput()->channelIn = channel_in; -} -void GroupConv2DGradInput::SetChannelOut(int channel_out) { - this->primitive_->value.AsGroupConv2DGradInput()->channelOut = channel_out; -} -void GroupConv2DGradInput::SetKernelW(int kernel_w) { - this->primitive_->value.AsGroupConv2DGradInput()->kernelW = kernel_w; -} -void GroupConv2DGradInput::SetKernelH(int kernel_h) { - this->primitive_->value.AsGroupConv2DGradInput()->kernelH = kernel_h; -} -void GroupConv2DGradInput::SetStrideW(int stride_w) { - this->primitive_->value.AsGroupConv2DGradInput()->strideW = stride_w; -} -void GroupConv2DGradInput::SetStrideH(int stride_h) { - this->primitive_->value.AsGroupConv2DGradInput()->strideH = stride_h; -} -void GroupConv2DGradInput::SetPadMode(int pad_mode) { - this->primitive_->value.AsGroupConv2DGradInput()->padMode = (schema::PadMode)pad_mode; -} -void GroupConv2DGradInput::SetPadUp(int pad_up) { this->primitive_->value.AsGroupConv2DGradInput()->padUp = pad_up; } -void GroupConv2DGradInput::SetPadDown(int pad_down) { - this->primitive_->value.AsGroupConv2DGradInput()->padDown = pad_down; -} -void GroupConv2DGradInput::SetPadLeft(int pad_left) { - this->primitive_->value.AsGroupConv2DGradInput()->padLeft = pad_left; -} -void GroupConv2DGradInput::SetPadRight(int pad_right) { - this->primitive_->value.AsGroupConv2DGradInput()->padRight = pad_right; -} -void GroupConv2DGradInput::SetDilateW(int dilate_w) { - this->primitive_->value.AsGroupConv2DGradInput()->dilateW = dilate_w; -} -void GroupConv2DGradInput::SetDilateH(int dilate_h) { - this->primitive_->value.AsGroupConv2DGradInput()->dilateH = dilate_h; -} -void GroupConv2DGradInput::SetActivationType(int activation_type) { - this->primitive_->value.AsGroupConv2DGradInput()->activationType = (schema::ActivationType)activation_type; -} -#else -int GroupConv2DGradInput::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_GroupConv2DGradInput(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_GroupConv2DGradInput return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> input_shape; - if (attr->input_shape() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->input_shape()->size()); i++) { - input_shape.push_back(attr->input_shape()->data()[i]); - } - } - auto val_offset = schema::CreateGroupConv2DGradInputDirect( - *fbb, attr->format(), attr->group(), attr->channelIn(), attr->channelOut(), attr->kernelW(), attr->kernelH(), - attr->strideW(), attr->strideH(), attr->padMode(), attr->padUp(), attr->padDown(), attr->padLeft(), - attr->padRight(), attr->dilateW(), attr->dilateH(), attr->hasBias(), &input_shape, attr->activationType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_GroupConv2DGradInput, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -int GroupConv2DGradInput::GetFormat() const { return this->primitive_->value_as_GroupConv2DGradInput()->format(); } -int GroupConv2DGradInput::GetGroup() const { return this->primitive_->value_as_GroupConv2DGradInput()->group(); } -int GroupConv2DGradInput::GetChannelIn() const { - return this->primitive_->value_as_GroupConv2DGradInput()->channelIn(); -} -int GroupConv2DGradInput::GetChannelOut() const { - return this->primitive_->value_as_GroupConv2DGradInput()->channelOut(); -} -int GroupConv2DGradInput::GetKernelW() const { return this->primitive_->value_as_GroupConv2DGradInput()->kernelW(); } -int GroupConv2DGradInput::GetKernelH() const { return this->primitive_->value_as_GroupConv2DGradInput()->kernelH(); } -int GroupConv2DGradInput::GetStrideW() const { return this->primitive_->value_as_GroupConv2DGradInput()->strideW(); } -int GroupConv2DGradInput::GetStrideH() const { return this->primitive_->value_as_GroupConv2DGradInput()->strideH(); } -int GroupConv2DGradInput::GetPadMode() const { return this->primitive_->value_as_GroupConv2DGradInput()->padMode(); } -int GroupConv2DGradInput::GetPadUp() const { return this->primitive_->value_as_GroupConv2DGradInput()->padUp(); } -int GroupConv2DGradInput::GetPadDown() const { return this->primitive_->value_as_GroupConv2DGradInput()->padDown(); } -int GroupConv2DGradInput::GetPadLeft() const { return this->primitive_->value_as_GroupConv2DGradInput()->padLeft(); } -int GroupConv2DGradInput::GetPadRight() const { return this->primitive_->value_as_GroupConv2DGradInput()->padRight(); } -int GroupConv2DGradInput::GetDilateW() const { return this->primitive_->value_as_GroupConv2DGradInput()->dilateW(); } -int GroupConv2DGradInput::GetDilateH() const { return this->primitive_->value_as_GroupConv2DGradInput()->dilateH(); } -std::vector<int> GroupConv2DGradInput::GetInputShape() const { - auto fb_vector = this->primitive_->value_as_GroupConv2DGradInput()->input_shape(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -int GroupConv2DGradInput::GetActivationType() const { - return this->primitive_->value_as_GroupConv2DGradInput()->activationType(); -} -PrimitiveC *GroupConv2DGradInputCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<GroupConv2DGradInput>(primitive); -} -Registry GroupConv2DGradInputRegistry(schema::PrimitiveType_GroupConv2DGradInput, GroupConv2DGradInputCreator); - -#endif - -int GroupConv2DGradInput::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - if (inputs.size() < 2) { - MS_LOG(ERROR) << "Conv2d Grad input should be at least two input"; - return RET_ERROR; - } - if (1 != outputs.size()) { - MS_LOG(ERROR) << "Conv2d Grad output should have one output"; - return RET_ERROR; - } - - auto *in0 = inputs.at(0); - - MS_ASSERT(in0 != nullptr); - - auto *out = outputs.at(0); - MS_ASSERT(out != nullptr); - out->set_shape(GetInputShape()); - - out->set_data_type(in0->data_type()); - out->set_format(in0->format()); - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/group_conv2d_grad_input.h b/mindspore/lite/src/ops/group_conv2d_grad_input.h deleted file mode 100644 index 8581abdfcb..0000000000 --- a/mindspore/lite/src/ops/group_conv2d_grad_input.h +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_GROUP_CONV2D_GRAD_INPUT_H_ -#define MINDSPORE_LITE_SRC_OPS_GROUP_CONV2D_GRAD_INPUT_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> -#include <string> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class GroupConv2DGradInput : public PrimitiveC { - public: - GroupConv2DGradInput() = default; - ~GroupConv2DGradInput() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(GroupConv2DGradInput, PrimitiveC); - explicit GroupConv2DGradInput(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetFormat(int format); - void SetGroup(int group); - void SetChannelIn(int channel_in); - void SetChannelOut(int channel_out); - void SetKernelW(int kernel_w); - void SetKernelH(int kernel_h); - void SetStrideW(int stride_w); - void SetStrideH(int stride_h); - void SetPadMode(int pad_mode); - void SetPadUp(int pad_up); - void SetPadDown(int pad_down); - void SetPadLeft(int pad_left); - void SetPadRight(int pad_right); - void SetDilateW(int dilate_w); - void SetDilateH(int dilate_h); - void SetActivationType(int activation_type); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetFormat() const; - int GetGroup() const; - int GetChannelIn() const; - int GetChannelOut() const; - int GetKernelW() const; - int GetKernelH() const; - int GetStrideW() const; - int GetStrideH() const; - int GetPadMode() const; - int GetPadUp() const; - int GetPadDown() const; - int GetPadLeft() const; - int GetPadRight() const; - int GetDilateW() const; - int GetDilateH() const; - std::vector<int> GetInputShape() const; - int GetActivationType() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_GROUP_CONV2D_GRAD_INPUT_H_ diff --git a/mindspore/lite/src/ops/gru.cc b/mindspore/lite/src/ops/gru.cc deleted file mode 100644 index 40ae70335d..0000000000 --- a/mindspore/lite/src/ops/gru.cc +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/gru.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -bool Gru::GetBidirection() const { return this->primitive_->value.AsGru()->bidirection; } - -void Gru::SetBidirection(bool bidirection) { this->primitive_->value.AsGru()->bidirection = bidirection; } - -#else - -bool Gru::GetBidirection() const { return this->primitive_->value_as_Gru()->bidirection(); } -int Gru::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Gru(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Gru return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateGru(*fbb, attr->bidirection()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Gru, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *GruCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Gru>(primitive); } -Registry GruRegistry(schema::PrimitiveType_Gru, GruCreator); -#endif - -const int kGruInputNum = 5; -const int kGruInputWithSeqLenNum = 6; -const int kGruOutputNum = 2; -int Gru::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - if ((inputs_.size() != kGruInputNum && inputs_.size() != kGruInputWithSeqLenNum) || - outputs_.size() != kGruOutputNum) { - MS_LOG(ERROR) << "OpGru inputs or outputs size error."; - return RET_INPUT_TENSOR_ERROR; - } - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto weight_gate = inputs_.at(1); - MS_ASSERT(weight_gate != nullptr); - auto weight_recurrence = inputs_.at(2); - MS_ASSERT(weight_recurrence != nullptr); - auto bias = inputs_.at(3); - MS_ASSERT(bias != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - for (int i = 0; i < kGruOutputNum; i++) { - outputs_.at(i)->set_data_type(input->data_type()); - outputs_.at(i)->set_format(input->format()); - } - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - auto in_shape = input->shape(); // seq_len, batch, input_size - auto w_gate_shape = weight_gate->shape(); // num_direction, hidden_size * 3, input_size - auto w_recu_shape = weight_recurrence->shape(); // num_direction, hidden_size * 3, hidden_size - auto bias_shape = bias->shape(); // num_direction, hidden_size * 6 - if (in_shape.size() != 3 || w_gate_shape.size() != 3 || w_recu_shape.size() != 3) { - MS_LOG(ERROR) << "OpGru input dims should be 3."; - return RET_ERROR; - } - if (w_gate_shape[1] != w_recu_shape[1] || w_recu_shape[1] * 2 != bias_shape[1]) { - MS_LOG(ERROR) << "OpGru w_gate, w_recu and bias hidden size not match."; - return RET_ERROR; - } - if (inputs_.size() == kGruInputWithSeqLenNum) { - auto seq_len_shape = inputs_.at(5)->shape(); - if (seq_len_shape[0] > 1) { - MS_LOG(WARNING) << "OpGru with batch_size > 1 only support all same sequence_len now."; - return RET_ERROR; - } - if (seq_len_shape.size() != 1 && seq_len_shape[0] != in_shape[1]) { - MS_LOG(ERROR) << "OpGru sequence_len shape[0] and batch_size not match."; - return RET_ERROR; - } - } - - int hidden_size = w_gate_shape[1] / 3; - // set output - std::vector<int> out_shape(in_shape); - out_shape[2] = hidden_size; - if (GetBidirection()) { - out_shape.insert(out_shape.begin() + 1, 2); - } else { - out_shape.insert(out_shape.begin() + 1, 1); - } - output->set_shape(out_shape); - // set hidden state - std::vector<int> state_shape(in_shape); - state_shape[0] = GetBidirection() ? 2 : 1; - state_shape[2] = hidden_size; - outputs_[1]->set_shape(state_shape); - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/gru.h b/mindspore/lite/src/ops/gru.h deleted file mode 100644 index 84ca28fb9b..0000000000 --- a/mindspore/lite/src/ops/gru.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_SRC_OPS_GRU_H_ -#define MINDSPORE_LITE_SRC_OPS_GRU_H_ -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -/* - * gru with linear_before_reset = 0 - */ -class Gru : public PrimitiveC { - public: - Gru() = default; - ~Gru() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Gru, PrimitiveC); - explicit Gru(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetBidirection(bool bidirection); - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - bool GetBidirection() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_GRU_H_ diff --git a/mindspore/lite/src/ops/hashtable_lookup.cc b/mindspore/lite/src/ops/hashtable_lookup.cc deleted file mode 100644 index d479bdddb0..0000000000 --- a/mindspore/lite/src/ops/hashtable_lookup.cc +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/hashtable_lookup.h" - -#include "src/common/string_util.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int HashtableLookup::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { return RET_OK; } -#else -int HashtableLookup::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateHashtableLookup(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_HashtableLookup, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *HashtableLookupCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<HashtableLookup>(primitive); -} -Registry HashtableLookupRegistry(schema::PrimitiveType_HashtableLookup, HashtableLookupCreator); -#endif - -int HashtableLookup::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - auto input = inputs_.at(0); - auto values = inputs_.at(2); - auto output = outputs_.at(0); - auto hits = outputs_.at(1); - MS_ASSERT(input != nullptr); - MS_ASSERT(values != nullptr); - MS_ASSERT(output != nullptr); - MS_ASSERT(hits != nullptr); - - std::vector<int> hits_shape; - hits_shape.push_back(input->DimensionSize(0)); - - output->set_data_type(values->data_type()); - output->set_format(input->format()); - hits->set_shape(hits_shape); - hits->set_data_type(kNumberTypeUInt8); - hits->set_format(input->format()); - - if (input->data_c() == nullptr) { - MS_LOG(INFO) << "Do infer shape in runtime."; - return RET_INFER_INVALID; - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/hashtable_lookup.h b/mindspore/lite/src/ops/hashtable_lookup.h deleted file mode 100644 index fd8fa86be3..0000000000 --- a/mindspore/lite/src/ops/hashtable_lookup.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef LITE_MINDSPORE_LITE_C_OPS_HASHTABLE_LOOKUP_H_ -#define LITE_MINDSPORE_LITE_C_OPS_HASHTABLE_LOOKUP_H_ - -#include <vector> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class HashtableLookup : public PrimitiveC { - public: - HashtableLookup() = default; - ~HashtableLookup() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(HashtableLookup, PrimitiveC); - explicit HashtableLookup(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_HASHTABLE_LOOKUP_H_ diff --git a/mindspore/lite/src/ops/identity.h b/mindspore/lite/src/ops/identity.h deleted file mode 100644 index a66091e1b7..0000000000 --- a/mindspore/lite/src/ops/identity.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/primitive_c.h" - -#ifndef LITE_MINDSPORE_LITE_C_OPS_IDENTITY_H_ -#define LITE_MINDSPORE_LITE_C_OPS_IDENTITY_H_ - -namespace mindspore { -namespace lite { -class Identity : public PrimitiveC { - public: - MS_DECLARE_PARENT(Identity, PrimitiveC); - Identity() = default; - ~Identity() = default; - explicit Identity(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -}; -} // namespace lite -} // namespace mindspore -#endif // LITE_MINDSPORE_LITE_C_OPS_IDENTITY_H_ diff --git a/mindspore/lite/src/ops/if.h b/mindspore/lite/src/ops/if.h deleted file mode 100644 index 6c5f0a730f..0000000000 --- a/mindspore/lite/src/ops/if.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/primitive_c.h" - -#ifndef LITE_MINDSPORE_LITE_C_OPS_IF_H_ -#define LITE_MINDSPORE_LITE_C_OPS_IF_H_ - -namespace mindspore { -namespace lite { -class If : public PrimitiveC { - public: - MS_DECLARE_PARENT(If, PrimitiveC); - If() = default; - ~If() = default; - explicit If(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -}; -} // namespace lite -} // namespace mindspore -#endif // LITE_MINDSPORE_LITE_C_OPS_IF_H_ diff --git a/mindspore/lite/src/ops/instance_norm.cc b/mindspore/lite/src/ops/instance_norm.cc deleted file mode 100644 index 62d8cfc65e..0000000000 --- a/mindspore/lite/src/ops/instance_norm.cc +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/instance_norm.h" -#include <memory> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float InstanceNorm::GetEpsilon() const { return this->primitive_->value.AsInstanceNorm()->epsilon; } - -void InstanceNorm::SetEpsilon(float epsilon) { this->primitive_->value.AsInstanceNorm()->epsilon = epsilon; } - -int InstanceNorm::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_InstanceNorm; - } - if (this->primitive_->value.type != schema::PrimitiveType_InstanceNorm) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::InstanceNormT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new InstanceNormT failed"; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - attr->epsilon = GetValue<float>(prim.GetAttr("epsilon")); - this->primitive_->value.value = attr; - } - return RET_OK; -} - -#else -int InstanceNorm::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateInstanceNorm(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_InstanceNorm, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -float InstanceNorm::GetEpsilon() const { return this->primitive_->value_as_InstanceNorm()->epsilon(); } - -PrimitiveC *InstanceNormCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<InstanceNorm>(primitive); -} -Registry InstanceNormRegistry(schema::PrimitiveType_InstanceNorm, InstanceNormCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/instance_norm.h b/mindspore/lite/src/ops/instance_norm.h deleted file mode 100644 index 7f74fc0da2..0000000000 --- a/mindspore/lite/src/ops/instance_norm.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_INSTANE_NORM_H_ -#define LITE_MINDSPORE_LITE_C_OPS_INSTANE_NORM_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class InstanceNorm : public PrimitiveC { - public: - InstanceNorm() = default; - ~InstanceNorm() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(InstanceNorm, PrimitiveC); - explicit InstanceNorm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - void SetEpsilon(float epsilon); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - float GetEpsilon() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_INSTANE_NORM_H_ diff --git a/mindspore/lite/src/ops/invert_permutation.cc b/mindspore/lite/src/ops/invert_permutation.cc deleted file mode 100644 index 3d7479988d..0000000000 --- a/mindspore/lite/src/ops/invert_permutation.cc +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/invert_permutation.h" -#include "src/common/common.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { - -#ifdef PRIMITIVE_WRITEABLE -#else -int InvertPermutation::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateSize(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_InvertPermutation, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *InvertPermutationCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<InvertPermutation>(primitive); -} -Registry InvertPermutationRegistry(schema::PrimitiveType_InvertPermutation, InvertPermutationCreator); -#endif - -int InvertPermutation::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_format(input->format()); - output->set_data_type(input->data_type()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - if (input->data_type() != kNumberTypeInt32) { - MS_LOG(ERROR) << "InvertPermutation does not support input of data type: " << input->data_type(); - return RET_ERROR; - } - if (input->shape().size() != 1) { - MS_LOG(ERROR) << "InvertPermutation input must be one-dimensional."; - return RET_ERROR; - } - output->set_shape(input->shape()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/invert_permutation.h b/mindspore/lite/src/ops/invert_permutation.h deleted file mode 100644 index a79f4814a4..0000000000 --- a/mindspore/lite/src/ops/invert_permutation.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_INVERTPERMUTATION_H_ -#define LITE_MINDSPORE_LITE_C_OPS_INVERTPERMUTATION_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class InvertPermutation : public PrimitiveC { - public: - InvertPermutation() = default; - ~InvertPermutation() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(InvertPermutation, PrimitiveC); - explicit InvertPermutation(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_INVERTPERMUTATION_H_ diff --git a/mindspore/lite/src/ops/is_finite.h b/mindspore/lite/src/ops/is_finite.h deleted file mode 100644 index 9d18ebc757..0000000000 --- a/mindspore/lite/src/ops/is_finite.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/primitive_c.h" - -#ifndef LITE_MINDSPORE_LITE_C_OPS_IS_FINITE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_IS_FINITE_H_ - -namespace mindspore { -namespace lite { -class IsFinite : public PrimitiveC { - public: - MS_DECLARE_PARENT(IsFinite, PrimitiveC); - IsFinite() = default; - ~IsFinite() = default; - explicit IsFinite(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -}; -} // namespace lite -} // namespace mindspore -#endif // LITE_MINDSPORE_LITE_C_OPS_IS_FINITE_H_ diff --git a/mindspore/lite/src/ops/l2_norm.cc b/mindspore/lite/src/ops/l2_norm.cc deleted file mode 100644 index 3edaec93e6..0000000000 --- a/mindspore/lite/src/ops/l2_norm.cc +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/l2_norm.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> L2Norm::GetAxis() const { return this->primitive_->value.AsL2Norm()->axis; } -float L2Norm::GetEpsilon() const { return this->primitive_->value.AsL2Norm()->epsilon; } -int L2Norm::GetActivationType() const { return this->primitive_->value.AsL2Norm()->activationType; } - -void L2Norm::SetAxis(const std::vector<int> &axis) { this->primitive_->value.AsL2Norm()->axis = axis; } -void L2Norm::SetEpsilon(float epsilon) { this->primitive_->value.AsL2Norm()->epsilon = epsilon; } -void L2Norm::SetActivationType(int activationType) { - this->primitive_->value.AsL2Norm()->activationType = (schema::ActivationType)activationType; -} - -#else -int L2Norm::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_L2Norm(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_L2Norm return nullptr"; - return RET_ERROR; - } - - std::vector<int32_t> axis; - if (attr->axis() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->axis()->size()); i++) { - axis.push_back(attr->axis()->data()[i]); - } - } - auto val_offset = schema::CreateL2NormDirect(*fbb, &axis, attr->epsilon()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_L2Norm, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -std::vector<int> L2Norm::GetAxis() const { - auto fb_vector = this->primitive_->value_as_L2Norm()->axis(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -float L2Norm::GetEpsilon() const { return this->primitive_->value_as_L2Norm()->epsilon(); } -int L2Norm::GetActivationType() const { return this->primitive_->value_as_L2Norm()->activationType(); } - -PrimitiveC *L2NormCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<L2Norm>(primitive); } -Registry L2NormRegistry(schema::PrimitiveType_L2Norm, L2NormCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/l2_norm.h b/mindspore/lite/src/ops/l2_norm.h deleted file mode 100644 index e4e0aefb25..0000000000 --- a/mindspore/lite/src/ops/l2_norm.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_L2_NORM_H_ -#define LITE_MINDSPORE_LITE_C_OPS_L2_NORM_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class L2Norm : public PrimitiveC { - public: - L2Norm() = default; - ~L2Norm() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(L2Norm, PrimitiveC); - explicit L2Norm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAxis(const std::vector<int> &axis); - void SetEpsilon(float epsilon); - void SetActivationType(int activationType); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - std::vector<int> GetAxis() const; - float GetEpsilon() const; - int GetActivationType() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_L2_NORM_H_ diff --git a/mindspore/lite/src/ops/layer_norm.cc b/mindspore/lite/src/ops/layer_norm.cc deleted file mode 100644 index 1a531d73df..0000000000 --- a/mindspore/lite/src/ops/layer_norm.cc +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/layer_norm.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float LayerNorm::GetEpsilon() const { return this->primitive_->value.AsLayerNorm()->epsilon; } -int LayerNorm::GetBeginNormAxis() const { return this->primitive_->value.AsLayerNorm()->begin_norm_axis; } -int LayerNorm::GetBeginParamsAxis() const { return this->primitive_->value.AsLayerNorm()->begin_params_axis; } - -void LayerNorm::SetEpsilon(float epsilon) { this->primitive_->value.AsLayerNorm()->epsilon = epsilon; } -void LayerNorm::SetBeginNormAxis(int axis) { this->primitive_->value.AsLayerNorm()->begin_norm_axis = axis; } -void LayerNorm::SetBeginParamsAxis(int axis) { this->primitive_->value.AsLayerNorm()->begin_params_axis = axis; } - -int LayerNorm::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitive error"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_LayerNorm; - } - if (this->primitive_->value.type != schema::PrimitiveType_LayerNorm) { - MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto layer_norm_attr = new (std::nothrow) schema::LayerNormT(); - if (layer_norm_attr == nullptr) { - MS_LOG(ERROR) << "new primitive value.value error"; - return RET_ERROR; - } - auto value_attr = prim.GetAttr("epsilon"); - if (value_attr != nullptr) { - layer_norm_attr->epsilon = GetValue<float>(value_attr); - } else { - layer_norm_attr->epsilon = 1e-7; - } - auto norm_axis_attr = prim.GetAttr("begin_norm_axis"); - if (norm_axis_attr != nullptr) { - layer_norm_attr->begin_norm_axis = GetValue<float>(norm_axis_attr); - } else { - layer_norm_attr->begin_norm_axis = -1; - } - auto params_axis_attr = prim.GetAttr("begin_params_axis"); - if (params_axis_attr != nullptr) { - layer_norm_attr->begin_params_axis = GetValue<float>(params_axis_attr); - } else { - layer_norm_attr->begin_params_axis = -1; - } - this->primitive_->value.value = layer_norm_attr; - } - return RET_OK; -} -#else -int LayerNorm::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_LayerNorm(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_LayerNorm return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateLayerNorm(*fbb, attr->begin_norm_axis(), attr->begin_params_axis(), attr->epsilon()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_LayerNorm, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -float LayerNorm::GetEpsilon() const { return this->primitive_->value_as_LayerNorm()->epsilon(); } -int LayerNorm::GetBeginNormAxis() const { return this->primitive_->value_as_LayerNorm()->begin_norm_axis(); } -int LayerNorm::GetBeginParamsAxis() const { return this->primitive_->value_as_LayerNorm()->begin_params_axis(); } - -PrimitiveC *LayerNormCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<LayerNorm>(primitive); -} -Registry LayerNormRegistry(schema::PrimitiveType_LayerNorm, LayerNormCreator); -#endif -int LayerNorm::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - if (outputs_.size() != kSingleNum || (inputs_.size() != kSingleNum && inputs_.size() != kTripleNum)) { - MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs_.size() << ",input size: " << inputs_.size(); - return RET_PARAM_INVALID; - } - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.at(0); - MS_ASSERT(output != nullptr); - output->set_format(input->format()); - output->set_data_type(input->data_type()); - - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input_shape = input->shape(); - for (size_t i = GetBeginNormAxis(); i < input_shape.size(); i++) { - normlized_shape_.push_back(input_shape[i]); - } - output->set_shape(input_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/layer_norm.h b/mindspore/lite/src/ops/layer_norm.h deleted file mode 100644 index 6307c8bb8d..0000000000 --- a/mindspore/lite/src/ops/layer_norm.h +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_SRC_OPS_LAYER_NORM_H_ -#define MINDSPORE_LITE_SRC_OPS_LAYER_NORM_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class LayerNorm : public PrimitiveC { - public: - LayerNorm() = default; - ~LayerNorm() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(LayerNorm, PrimitiveC); - explicit LayerNorm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetEpsilon(float epsilon); - void SetBeginNormAxis(int axis); - void SetBeginParamsAxis(int axis); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - float GetEpsilon() const; - int GetBeginNormAxis() const; - int GetBeginParamsAxis() const; - std::vector<int> GetNormlizedShape() const { return normlized_shape_; } - - protected: - std::vector<int> normlized_shape_; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_LAYER_NORM_H_ diff --git a/mindspore/lite/src/ops/leaky_relu.cc b/mindspore/lite/src/ops/leaky_relu.cc deleted file mode 100644 index e39858c8e1..0000000000 --- a/mindspore/lite/src/ops/leaky_relu.cc +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/leaky_relu.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float LeakyReLU::GetNegativeSlope() const { return this->primitive_->value.AsLeakyReLU()->negativeSlope; } - -void LeakyReLU::SetNegativeSlope(float negative_slope) { - this->primitive_->value.AsLeakyReLU()->negativeSlope = negative_slope; -} - -#else - -float LeakyReLU::GetNegativeSlope() const { return this->primitive_->value_as_LeakyReLU()->negativeSlope(); } - -int LeakyReLU::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_LeakyReLU(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_LeakyReLU return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateLeakyReLU(*fbb, attr->negativeSlope()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_LeakyReLU, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *LeakyReLUCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<LeakyReLU>(primitive); -} -Registry LeakyReLURegistry(schema::PrimitiveType_LeakyReLU, LeakyReLUCreator); -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/leaky_relu.h b/mindspore/lite/src/ops/leaky_relu.h deleted file mode 100644 index 64922f1afa..0000000000 --- a/mindspore/lite/src/ops/leaky_relu.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_LEAKY_RE_L_U_H_ -#define LITE_MINDSPORE_LITE_C_OPS_LEAKY_RE_L_U_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class LeakyReLU : public PrimitiveC { - public: - LeakyReLU() = default; - ~LeakyReLU() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(LeakyReLU, PrimitiveC); - explicit LeakyReLU(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetNegativeSlope(float negative_slope); - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - float GetNegativeSlope() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_LEAKY_RE_L_U_H_ diff --git a/mindspore/lite/src/ops/less.cc b/mindspore/lite/src/ops/less.cc deleted file mode 100644 index fe4d82ee76..0000000000 --- a/mindspore/lite/src/ops/less.cc +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/less.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -#else -int Less::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto val_offset = schema::CreateLess(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Less, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *LessCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Less>(primitive); } -Registry LessRegistry(schema::PrimitiveType_Less, LessCreator); - -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/less.h b/mindspore/lite/src/ops/less.h deleted file mode 100644 index 2967cfd70a..0000000000 --- a/mindspore/lite/src/ops/less.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_LESS_H_ -#define LITE_MINDSPORE_LITE_C_OPS_LESS_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_compare.h" - -namespace mindspore { -namespace lite { -class Less : public ArithmeticCompare { - public: - Less() = default; - ~Less() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Less, ArithmeticCompare); - explicit Less(schema::PrimitiveT *primitive) : ArithmeticCompare(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_LESS_H_ diff --git a/mindspore/lite/src/ops/less_equal.cc b/mindspore/lite/src/ops/less_equal.cc deleted file mode 100644 index 89a88fc6c7..0000000000 --- a/mindspore/lite/src/ops/less_equal.cc +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/less_equal.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -#else -int LessEqual::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateLessEqual(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_LessEqual, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *LessEqualCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<LessEqual>(primitive); -} -Registry LessEqualRegistry(schema::PrimitiveType_LessEqual, LessEqualCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/less_equal.h b/mindspore/lite/src/ops/less_equal.h deleted file mode 100644 index ade4d12c4c..0000000000 --- a/mindspore/lite/src/ops/less_equal.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_LESS_EQUAL_H_ -#define LITE_MINDSPORE_LITE_C_OPS_LESS_EQUAL_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_compare.h" - -namespace mindspore { -namespace lite { -class LessEqual : public ArithmeticCompare { - public: - LessEqual() = default; - ~LessEqual() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(LessEqual, ArithmeticCompare); - explicit LessEqual(schema::PrimitiveT *primitive) : ArithmeticCompare(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_LESS_EQUAL_H_ diff --git a/mindspore/lite/src/ops/lin_space.cc b/mindspore/lite/src/ops/lin_space.cc deleted file mode 100644 index 5a1ac650fb..0000000000 --- a/mindspore/lite/src/ops/lin_space.cc +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/lin_space.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifndef PRIMITIVE_WRITEABLE -int LinSpace::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateLinSpace(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_LinSpace, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *LinSpaceCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<LinSpace>(primitive); -} -Registry LinSpaceRegistry(schema::PrimitiveType_LinSpace, LinSpaceCreator); -#endif -int LinSpace::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - auto input = inputs.front(); - MS_ASSERT(input != nullptr); - auto output = outputs.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(input->data_type()); - output->set_format(input->format()); - auto num = inputs.at(2)->data_c(); - if (num == nullptr) { - return RET_INFER_INVALID; - } - output->set_shape({reinterpret_cast<int *>(num)[0]}); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/lin_space.h b/mindspore/lite/src/ops/lin_space.h deleted file mode 100644 index d22f959922..0000000000 --- a/mindspore/lite/src/ops/lin_space.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -#ifndef LITE_MINDSPORE_LITE_C_OPS_LIN_SPACE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_LIN_SPACE_H_ - -namespace mindspore { -namespace lite { -class LinSpace : public PrimitiveC { - public: - LinSpace() = default; - ~LinSpace() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(LinSpace, PrimitiveC); - explicit LinSpace(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) override; -}; -} // namespace lite -} // namespace mindspore -#endif // LITE_MINDSPORE_LITE_C_OPS_LIN_SPACE_H_ diff --git a/mindspore/lite/src/ops/local_response_normalization.cc b/mindspore/lite/src/ops/local_response_normalization.cc deleted file mode 100644 index d3df71d4a6..0000000000 --- a/mindspore/lite/src/ops/local_response_normalization.cc +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/local_response_normalization.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int LocalResponseNormalization::GetDepthRadius() const { - return this->primitive_->value.AsLocalResponseNormalization()->depth_radius; -} -float LocalResponseNormalization::GetBias() const { - return this->primitive_->value.AsLocalResponseNormalization()->bias; -} -float LocalResponseNormalization::GetAlpha() const { - return this->primitive_->value.AsLocalResponseNormalization()->alpha; -} -float LocalResponseNormalization::GetBeta() const { - return this->primitive_->value.AsLocalResponseNormalization()->beta; -} - -void LocalResponseNormalization::SetDepthRadius(int depth_radius) { - this->primitive_->value.AsLocalResponseNormalization()->depth_radius = depth_radius; -} -void LocalResponseNormalization::SetBias(float bias) { - this->primitive_->value.AsLocalResponseNormalization()->bias = bias; -} -void LocalResponseNormalization::SetAlpha(float alpha) { - this->primitive_->value.AsLocalResponseNormalization()->alpha = alpha; -} -void LocalResponseNormalization::SetBeta(float beta) { - this->primitive_->value.AsLocalResponseNormalization()->beta = beta; -} - -#else - -int LocalResponseNormalization::GetDepthRadius() const { - return this->primitive_->value_as_LocalResponseNormalization()->depth_radius(); -} -float LocalResponseNormalization::GetBias() const { - return this->primitive_->value_as_LocalResponseNormalization()->bias(); -} -float LocalResponseNormalization::GetAlpha() const { - return this->primitive_->value_as_LocalResponseNormalization()->alpha(); -} -float LocalResponseNormalization::GetBeta() const { - return this->primitive_->value_as_LocalResponseNormalization()->beta(); -} - -int LocalResponseNormalization::UnPackToFlatBuilder(const schema::Primitive *primitive, - flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_LocalResponseNormalization(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_LocalResponseNormalization return nullptr"; - return RET_ERROR; - } - auto val_offset = - schema::CreateLocalResponseNormalization(*fbb, attr->depth_radius(), attr->bias(), attr->alpha(), attr->beta()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_LocalResponseNormalization, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *LocalResponseNormalizationCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<LocalResponseNormalization>(primitive); -} -Registry LocalResponseNormalizationRegistry(schema::PrimitiveType_LocalResponseNormalization, - LocalResponseNormalizationCreator); - -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/local_response_normalization.h b/mindspore/lite/src/ops/local_response_normalization.h deleted file mode 100644 index 972a38c4b0..0000000000 --- a/mindspore/lite/src/ops/local_response_normalization.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_LOCAL_RESPONSE_NORMALIZATION_H_ -#define LITE_MINDSPORE_LITE_C_OPS_LOCAL_RESPONSE_NORMALIZATION_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class LocalResponseNormalization : public PrimitiveC { - public: - LocalResponseNormalization() = default; - ~LocalResponseNormalization() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(LocalResponseNormalization, PrimitiveC); - explicit LocalResponseNormalization(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetDepthRadius(int depth_radius); - void SetBias(float bias); - void SetAlpha(float alpha); - void SetBeta(float beta); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int GetDepthRadius() const; - float GetBias() const; - float GetAlpha() const; - float GetBeta() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_LOCAL_RESPONSE_NORMALIZATION_H_ diff --git a/mindspore/lite/src/ops/log.cc b/mindspore/lite/src/ops/log.cc deleted file mode 100644 index 73b57e2a25..0000000000 --- a/mindspore/lite/src/ops/log.cc +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/log.h" -#include <memory> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Log::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Log; - } - if (this->primitive_->value.type != schema::PrimitiveType_Log) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - auto attr = std::make_unique<schema::LogT>(); - this->primitive_->value.value = attr.release(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - return RET_OK; -} -#else -int Log::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateLog(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Log, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *LogCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Log>(primitive); } -Registry LogRegistry(schema::PrimitiveType_Log, LogCreator); - -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/log.h b/mindspore/lite/src/ops/log.h deleted file mode 100644 index f742e087e8..0000000000 --- a/mindspore/lite/src/ops/log.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_LOG_H_ -#define LITE_MINDSPORE_LITE_C_OPS_LOG_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -class Log : public ArithmeticSelf { - public: - Log() = default; - ~Log() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Log, ArithmeticSelf); - explicit Log(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_LOG_H_ diff --git a/mindspore/lite/src/ops/log_grad.cc b/mindspore/lite/src/ops/log_grad.cc deleted file mode 100644 index 4bb8be9d67..0000000000 --- a/mindspore/lite/src/ops/log_grad.cc +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/log_grad.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -#ifndef PRIMITIVE_WRITEABLE -int LogGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(primitive != nullptr); - MS_ASSERT(fbb != nullptr); - auto attr = primitive->value_as_LogGrad(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_LogGrad return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateLogGrad(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_LogGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *LogGradCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<LogGrad>(primitive); } -Registry LogGradRegistry(schema::PrimitiveType_LogGrad, LogGradCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/log_grad.h b/mindspore/lite/src/ops/log_grad.h deleted file mode 100644 index 6c88dd5a1a..0000000000 --- a/mindspore/lite/src/ops/log_grad.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_LOG_GRAD_H_ -#define LITE_MINDSPORE_LITE_C_OPS_LOG_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class LogGrad : public PrimitiveC { - public: - LogGrad() = default; - ~LogGrad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(LogGrad, PrimitiveC); - explicit LogGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore -#endif // LITE_MINDSPORE_LITE_C_OPS_LOG_GRAD_H_ diff --git a/mindspore/lite/src/ops/logical_and.cc b/mindspore/lite/src/ops/logical_and.cc deleted file mode 100644 index 461d87e7b3..0000000000 --- a/mindspore/lite/src/ops/logical_and.cc +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/logical_and.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -#else -int LogicalAnd::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateLogicalAnd(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_LogicalAnd, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *LogicalAndCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<LogicalAnd>(primitive); -} -Registry LogicalAndRegistry(schema::PrimitiveType_LogicalAnd, LogicalAndCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/logical_and.h b/mindspore/lite/src/ops/logical_and.h deleted file mode 100644 index 765a7cb5d9..0000000000 --- a/mindspore/lite/src/ops/logical_and.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_LOGICAL_AND_H_ -#define LITE_MINDSPORE_LITE_C_OPS_LOGICAL_AND_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic.h" - -namespace mindspore { -namespace lite { -class LogicalAnd : public Arithmetic { - public: - LogicalAnd() = default; - ~LogicalAnd() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(LogicalAnd, Arithmetic); - explicit LogicalAnd(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_LOGICAL_AND_H_ diff --git a/mindspore/lite/src/ops/logical_not.cc b/mindspore/lite/src/ops/logical_not.cc deleted file mode 100644 index 5eeae1915d..0000000000 --- a/mindspore/lite/src/ops/logical_not.cc +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/logical_not.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -#else -int LogicalNot::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateLogicalNot(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_LogicalNot, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *LogicalNotCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<LogicalNot>(primitive); -} -Registry LogicalNotRegistry(schema::PrimitiveType_LogicalNot, LogicalNotCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/logical_not.h b/mindspore/lite/src/ops/logical_not.h deleted file mode 100644 index 53b511c104..0000000000 --- a/mindspore/lite/src/ops/logical_not.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_LOGICAL_NOT_H_ -#define LITE_MINDSPORE_LITE_C_OPS_LOGICAL_NOT_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -class LogicalNot : public ArithmeticSelf { - public: - LogicalNot() = default; - ~LogicalNot() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(LogicalNot, ArithmeticSelf); - explicit LogicalNot(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_LOGICAL_NOT_H_ diff --git a/mindspore/lite/src/ops/logical_or.cc b/mindspore/lite/src/ops/logical_or.cc deleted file mode 100644 index 142d22b986..0000000000 --- a/mindspore/lite/src/ops/logical_or.cc +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/logical_or.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -#else -int LogicalOr::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateLogicalOr(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_LogicalOr, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *LogicalOrCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<LogicalOr>(primitive); -} -Registry LogicalOrRegistry(schema::PrimitiveType_LogicalOr, LogicalOrCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/logical_or.h b/mindspore/lite/src/ops/logical_or.h deleted file mode 100644 index 5c342410bf..0000000000 --- a/mindspore/lite/src/ops/logical_or.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_LOGICAL_OR_H_ -#define LITE_MINDSPORE_LITE_C_OPS_LOGICAL_OR_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic.h" - -namespace mindspore { -namespace lite { -class LogicalOr : public Arithmetic { - public: - LogicalOr() = default; - ~LogicalOr() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(LogicalOr, Arithmetic); - explicit LogicalOr(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_LOGICAL_OR_H_ diff --git a/mindspore/lite/src/ops/lrn.cc b/mindspore/lite/src/ops/lrn.cc deleted file mode 100644 index 851e070c10..0000000000 --- a/mindspore/lite/src/ops/lrn.cc +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/lrn.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float Lrn::GetAlpha() const { return this->primitive_->value.AsLrn()->alpha; } -float Lrn::GetBeta() const { return this->primitive_->value.AsLrn()->beta; } -float Lrn::GetBias() const { return this->primitive_->value.AsLrn()->bias; } -int Lrn::GetSize() const { return this->primitive_->value.AsLrn()->size; } - -void Lrn::SetAlpha(float alpha) { this->primitive_->value.AsLrn()->alpha = alpha; } -void Lrn::SetBeta(float beta) { this->primitive_->value.AsLrn()->beta = beta; } -void Lrn::SetBias(float bias) { this->primitive_->value.AsLrn()->bias = bias; } -void Lrn::SetSize(int size) { this->primitive_->value.AsLrn()->size = size; } - -#else - -float Lrn::GetAlpha() const { return this->primitive_->value_as_Lrn()->alpha(); } -float Lrn::GetBeta() const { return this->primitive_->value_as_Lrn()->beta(); } -float Lrn::GetBias() const { return this->primitive_->value_as_Lrn()->bias(); } -int Lrn::GetSize() const { return this->primitive_->value_as_Lrn()->size(); } - -int Lrn::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Lrn(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Lrn return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateLrn(*fbb, attr->alpha(), attr->beta(), attr->bias(), attr->size()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Lrn, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *LrnCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Lrn>(primitive); } -Registry LrnRegistry(schema::PrimitiveType_Lrn, LrnCreator); -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/lrn.h b/mindspore/lite/src/ops/lrn.h deleted file mode 100644 index fac65bd8ef..0000000000 --- a/mindspore/lite/src/ops/lrn.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_LRN_H_ -#define LITE_MINDSPORE_LITE_C_OPS_LRN_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Lrn : public PrimitiveC { - public: - Lrn() = default; - ~Lrn() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Lrn, PrimitiveC); - explicit Lrn(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAlpha(float alpha); - void SetBeta(float beta); - void SetBias(float bias); - void SetSize(int size); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - float GetAlpha() const; - float GetBeta() const; - float GetBias() const; - int GetSize() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_LRN_H_ diff --git a/mindspore/lite/src/ops/lsh_projection.cc b/mindspore/lite/src/ops/lsh_projection.cc deleted file mode 100644 index 893fde7870..0000000000 --- a/mindspore/lite/src/ops/lsh_projection.cc +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/lsh_projection.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int LshProjection::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { return RET_OK; } -int LshProjection::GetLshType() const { return this->primitive_->value.AsLshProjection()->type; } -#else -int LshProjection::GetLshType() const { return this->primitive_->value_as_LshProjection()->type(); } - -int LshProjection::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_LshProjection(); - if (attr == nullptr) { - MS_LOG(ERROR) << "LshProjection attr is nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateLshProjection(*fbb, attr->type()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_LshProjection, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *LshProjectionCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<LshProjection>(primitive); -} -Registry LshProjectionRegistry(schema::PrimitiveType_LshProjection, LshProjectionCreator); - -#endif - -int LshProjection::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - if (inputs_.size() != kDoubleNum && inputs_.size() != kTripleNum) { - MS_LOG(ERROR) << "inputs to LshProjection operator should be 2 or 3, but " << inputs_.size() << " is given."; - return RET_ERROR; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "outputs to Shape operator should be 1, but " << outputs_.size() << " is given."; - return RET_ERROR; - } - - auto in_hash = inputs_.at(0); - MS_ASSERT(in_hash->shape().size() == 2); - MS_ASSERT(in_hash->DimensionSize(1) <= 32); - MS_ASSERT(inputs_.at(1)->shape().size() >= 1); - - if (inputs_.size() == kTripleNum) { - MS_ASSERT(inputs_.at(2)->shape().size() == 1); - MS_ASSERT(inputs_.at(2)->DimensionSize(0) == inputs_.at(1)->DimensionSize(0)); - } - - auto out_tensor = outputs_.front(); - out_tensor->set_data_type(kNumberTypeInt32); - out_tensor->set_format(schema::Format::Format_NHWC); - - std::vector<int> out_shape; - switch (GetLshType()) { - case schema::LshProjectionType_SPARSE: - out_shape.push_back(in_hash->DimensionSize(0)); - break; - case schema::LshProjectionType_DENSE: - out_shape.push_back(in_hash->DimensionSize(0) * in_hash->DimensionSize(1)); - break; - default: - return RET_ERROR; - } - out_tensor->set_shape(out_shape); - return RET_OK; -} - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/lsh_projection.h b/mindspore/lite/src/ops/lsh_projection.h deleted file mode 100644 index 8888d4d73f..0000000000 --- a/mindspore/lite/src/ops/lsh_projection.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef LITE_MINDSPORE_LITE_C_OPS_LSH_PROJECTION_H_ -#define LITE_MINDSPORE_LITE_C_OPS_LSH_PROJECTION_H_ - -#include <vector> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class LshProjection : public PrimitiveC { - public: - LshProjection() = default; - ~LshProjection() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(LshProjection, PrimitiveC); - explicit LshProjection(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) override; - int GetLshType() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_LSH_PROJECTION_H_ diff --git a/mindspore/lite/src/ops/lstm.cc b/mindspore/lite/src/ops/lstm.cc deleted file mode 100644 index 7d1a398784..0000000000 --- a/mindspore/lite/src/ops/lstm.cc +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/lstm.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -bool Lstm::GetBidirection() const { return this->primitive_->value.AsLstm()->bidirection; } - -float Lstm::GetSmooth() const { return this->primitive_->value.AsLstm()->smooth; } - -void Lstm::SetBidirection(bool bidirection) { this->primitive_->value.AsLstm()->bidirection = bidirection; } - -void Lstm::SetSmooth(float smooth) { this->primitive_->value.AsLstm()->smooth = smooth; } - -#else - -bool Lstm::GetBidirection() const { return this->primitive_->value_as_Lstm()->bidirection(); } -float Lstm::GetSmooth() const { return this->primitive_->value_as_Lstm()->smooth(); } -int Lstm::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Lstm(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Lstm return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateLstm(*fbb, attr->bidirection(), attr->smooth()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Lstm, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *LstmCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Lstm>(primitive); } -Registry LstmRegistry(schema::PrimitiveType_Lstm, LstmCreator); - -#endif - -const int kLstmInputNum = 6; -const int kLstmOutputNum = 3; -int Lstm::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - if (inputs_.size() != kLstmInputNum || outputs_.size() != kLstmOutputNum) { - MS_LOG(ERROR) << "OpLstm inputs or outputs size error."; - return RET_INPUT_TENSOR_ERROR; - } - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto weight_i = inputs_.at(1); - MS_ASSERT(weight_i != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - for (int i = 0; i < kLstmOutputNum; i++) { - outputs_.at(i)->set_data_type(input->data_type()); - outputs_.at(i)->set_format(input->format()); - } - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - std::vector<int> in_shape = input->shape(); - std::vector<int> w_shape = weight_i->shape(); // layer, hidden_size * 4, input_size - if (in_shape.size() != 3 || w_shape.size() != 3) { - MS_LOG(ERROR) << "OpLstm input dims should be 3."; - return RET_ERROR; - } - - int hidden_size = w_shape[1] / 4; - // set output - std::vector<int> out_shape(in_shape); - out_shape[2] = hidden_size; - if (GetBidirection()) { - out_shape.insert(out_shape.begin() + 1, 2); - } else { - out_shape.insert(out_shape.begin() + 1, 1); - } - output->set_shape(out_shape); - // set hidden state, cell state - std::vector<int> state_shape(in_shape); - state_shape[0] = GetBidirection() ? 2 : 1; - state_shape[2] = hidden_size; - outputs_[1]->set_shape(state_shape); - outputs_[2]->set_shape(state_shape); - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/lstm.h b/mindspore/lite/src/ops/lstm.h deleted file mode 100644 index fd58a99a46..0000000000 --- a/mindspore/lite/src/ops/lstm.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_LSTM_H_ -#define LITE_MINDSPORE_LITE_C_OPS_LSTM_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Lstm : public PrimitiveC { - public: - Lstm() = default; - ~Lstm() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Lstm, PrimitiveC); - explicit Lstm(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetBidirection(bool bidirection); - void SetSmooth(float smooth); - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - bool GetBidirection() const; - float GetSmooth() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_LSTM_H_ diff --git a/mindspore/lite/src/ops/make_tuple.cc b/mindspore/lite/src/ops/make_tuple.cc deleted file mode 100644 index 63528302d5..0000000000 --- a/mindspore/lite/src/ops/make_tuple.cc +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/make_tuple.h" -#include <vector> -#include <memory> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int MakeTuple::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_MakeTuple; - } - if (this->primitive_->value.type != schema::PrimitiveType_MakeTuple) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::MakeTupleT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int MakeTuple::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateMakeTuple(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_MakeTuple, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *MakeTupleCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<MakeTuple>(primitive); -} -Registry MakeTupleRegistry(schema::PrimitiveType_MakeTuple, MakeTupleCreator); -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/make_tuple.h b/mindspore/lite/src/ops/make_tuple.h deleted file mode 100644 index 5a7611af48..0000000000 --- a/mindspore/lite/src/ops/make_tuple.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_MAKE_TUPLE_H_ -#define MINDSPORE_LITE_SRC_OPS_MAKE_TUPLE_H_ -#include <vector> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class MakeTuple : public PrimitiveC { - public: - MakeTuple() = default; - ~MakeTuple() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(MakeTuple, PrimitiveC); - explicit MakeTuple(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_MAKE_TUPLE_H_ diff --git a/mindspore/lite/src/ops/matmul.cc b/mindspore/lite/src/ops/matmul.cc deleted file mode 100644 index 9e50dc222e..0000000000 --- a/mindspore/lite/src/ops/matmul.cc +++ /dev/null @@ -1,152 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/matmul.h" -#include <memory> -#include <utility> -#ifdef PRIMITIVE_WRITEABLE -#include "src/param_value_lite.h" -#endif - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -bool MatMul::GetTransposeA() const { return this->primitive_->value.AsMatMul()->transposeA; } -bool MatMul::GetTransposeB() const { return this->primitive_->value.AsMatMul()->transposeB; } - -void MatMul::SetTransposeA(bool transpose_a) { this->primitive_->value.AsMatMul()->transposeA = transpose_a; } -void MatMul::SetTransposeB(bool transpose_b) { this->primitive_->value.AsMatMul()->transposeB = transpose_b; } - -int MatMul::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_MatMul; - } - if (this->primitive_->value.type != schema::PrimitiveType_MatMul) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::MatMulT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - attr->transposeA = GetValue<bool>(prim.GetAttr("transpose_a")); - attr->transposeB = GetValue<bool>(prim.GetAttr("transpose_b")); - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - - PopulaterQuantParam(prim, inputs); - return RET_OK; -} - -#else - -bool MatMul::GetTransposeA() const { return this->primitive_->value_as_MatMul()->transposeA(); } -bool MatMul::GetTransposeB() const { return this->primitive_->value_as_MatMul()->transposeB(); } - -int MatMul::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_MatMul(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_MatMul return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateMatMul(*fbb, attr->broadcast(), attr->transposeA(), attr->transposeB()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_MatMul, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *MatMulCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<MatMul>(primitive); } -Registry MatMulRegistry(schema::PrimitiveType_MatMul, MatMulCreator); -#endif - -int MatMul::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input0 = inputs_.front(); - MS_ASSERT(input0 != nullptr); - auto input1 = inputs_.at(1); - MS_ASSERT(input1 != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - output->set_data_type(input0->data_type()); - output->set_format(input0->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - std::vector<int> a_shape = input0->shape(); - std::vector<int> b_shape = input1->shape(); - - if (a_shape.size() == 4 && a_shape[2] == 1 && a_shape[3] == 1) { - a_shape.resize(2); - input0->set_shape(a_shape); - } - - bool del_start = false; - bool del_end = false; - if (a_shape.size() == 1) { - a_shape.insert(a_shape.begin(), 1); - input0->set_shape(a_shape); - del_start = true; - } - if (b_shape.size() == 1) { - b_shape.push_back(1); - input1->set_shape(b_shape); - del_end = true; - } - for (size_t i = 0; i < (a_shape.size() - 2) && i < (b_shape.size() - 2); ++i) { - if (a_shape.at(a_shape.size() - 3 - i) != b_shape.at(b_shape.size() - 3 - i)) { - MS_LOG(ERROR) << "Op MatMul's dimensions must be equal"; - return RET_INPUT_TENSOR_ERROR; - } - } - - if (GetTransposeA()) { - std::swap(a_shape[a_shape.size() - 1], a_shape[a_shape.size() - 2]); - } - if (GetTransposeB()) { - std::swap(b_shape[b_shape.size() - 1], b_shape[b_shape.size() - 2]); - } - std::vector<int> c_shape(a_shape); - c_shape[c_shape.size() - 1] = b_shape[b_shape.size() - 1]; - if (del_start) { - c_shape.erase(c_shape.begin()); - } - if (del_end) { - c_shape.pop_back(); - } - output->set_shape(c_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/matmul.h b/mindspore/lite/src/ops/matmul.h deleted file mode 100644 index 9c2d1b650a..0000000000 --- a/mindspore/lite/src/ops/matmul.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_MAT_MUL_H_ -#define LITE_MINDSPORE_LITE_C_OPS_MAT_MUL_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class MatMul : public PrimitiveC { - public: - MatMul() = default; - ~MatMul() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(MatMul, PrimitiveC); - explicit MatMul(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - void SetTransposeA(bool transpose_a); - void SetTransposeB(bool transpose_b); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - bool GetTransposeA() const; - bool GetTransposeB() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_MAT_MUL_H_ diff --git a/mindspore/lite/src/ops/maximum.cc b/mindspore/lite/src/ops/maximum.cc deleted file mode 100644 index 1d17d3e655..0000000000 --- a/mindspore/lite/src/ops/maximum.cc +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "include/errorcode.h" -#include "src/ops/maximum.h" -#include "src/common/log_adapter.h" -#ifdef PRIMITIVE_WRITEABLE -#include <float.h> -#include "src/param_value_lite.h" -#endif - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Maximum::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Maximum; - } - if (this->primitive_->value.type != schema::PrimitiveType_Maximum) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::MaximumT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int Maximum::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateMaximum(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Maximum, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *MaximumCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Maximum>(primitive); } -Registry MaximumRegistry(schema::PrimitiveType_Maximum, MaximumCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/maximum.h b/mindspore/lite/src/ops/maximum.h deleted file mode 100644 index 052088ebab..0000000000 --- a/mindspore/lite/src/ops/maximum.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_MAXIMUM_H_ -#define MINDSPORE_LITE_SRC_OPS_MAXIMUM_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic.h" -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Maximum : public Arithmetic { - public: - Maximum() = default; - ~Maximum() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Arithmetic, Arithmetic); - explicit Maximum(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_MAXIMUM_H_ diff --git a/mindspore/lite/src/ops/maximum_grad.cc b/mindspore/lite/src/ops/maximum_grad.cc deleted file mode 100644 index 634e4d5853..0000000000 --- a/mindspore/lite/src/ops/maximum_grad.cc +++ /dev/null @@ -1,126 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "include/errorcode.h" -#include "src/ops/maximum_grad.h" -#include "src/common/log_adapter.h" -#ifdef PRIMITIVE_WRITEABLE -#include <float.h> -#include "src/param_value_lite.h" -#endif - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int MaximumGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_MaximumGrad; - } - if (this->primitive_->value.type != schema::PrimitiveType_MaximumGrad) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::MaximumGradT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int MaximumGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateMaximumGrad(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_MaximumGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *MaximumGradCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<MaximumGrad>(primitive); -} -Registry MaximumGradRegistry(schema::PrimitiveType_MaximumGrad, MaximumGradCreator); - -#endif -int MaximumGrad::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - if (inputs_.size() != 3) { - MS_LOG(ERROR) << "The number of input must be 3"; - return RET_ERROR; - } - if (outputs_.size() != 2) { - MS_LOG(ERROR) << "The number of output must be 2"; - return RET_ERROR; - } - - auto x1 = inputs_[0]; - auto x2 = inputs_[1]; - auto dy = inputs_[2]; - auto dx1 = outputs_[0]; - auto dx2 = outputs_[1]; - - MS_ASSERT(dy != nullptr); - MS_ASSERT(x1 != nullptr); - MS_ASSERT(x2 != nullptr); - MS_ASSERT(dx1 != nullptr); - MS_ASSERT(dx2 != nullptr); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - auto inShape0 = x1->shape(); - auto inShape1 = x2->shape(); - auto outShape = dy->shape(); - - ndim_ = outShape.size(); - x1_shape_.resize(ndim_); - x2_shape_.resize(ndim_); - dy_shape_.resize(ndim_); - auto fillDimNum0 = outShape.size() - inShape0.size(); - auto fillDimNum1 = outShape.size() - inShape1.size(); - int j0 = 0; - int j1 = 0; - for (unsigned int i = 0; i < outShape.size(); i++) { - x1_shape_[i] = (i < fillDimNum0) ? 1 : inShape0[j0++]; - x2_shape_[i] = (i < fillDimNum1) ? 1 : inShape1[j1++]; - dy_shape_[i] = outShape[i]; - } - - dx1->set_shape(x1->shape()); - dx2->set_shape(x2->shape()); - dx1->set_data_type(dy->data_type()); - dx2->set_data_type(dy->data_type()); - dx1->set_format(dy->format()); - dx2->set_format(dy->format()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/maximum_grad.h b/mindspore/lite/src/ops/maximum_grad.h deleted file mode 100644 index 10e73b485a..0000000000 --- a/mindspore/lite/src/ops/maximum_grad.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_MAXIMUM_GRAD_H_ -#define MINDSPORE_LITE_SRC_OPS_MAXIMUM_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_grad.h" -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class MaximumGrad : public ArithmeticGrad { - public: -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(MaximumGrad, ArithmeticGrad); - MaximumGrad() = default; - explicit MaximumGrad(schema::PrimitiveT *primitive) : ArithmeticGrad(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - MaximumGrad() = default; - - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_MAXIMUM_GRAD_H_ diff --git a/mindspore/lite/src/ops/merge.cc b/mindspore/lite/src/ops/merge.cc deleted file mode 100644 index fcf8b505b7..0000000000 --- a/mindspore/lite/src/ops/merge.cc +++ /dev/null @@ -1,143 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/merge.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif -#include "src/tensorlist.h" - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Merge::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Merge; - } - if (this->primitive_->value.type != schema::PrimitiveType_Merge) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::MergeT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - PopulaterQuantParam(prim, inputs); - return RET_OK; -} - -#else -int Merge::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Merge(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Merge return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateMerge(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Merge, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *MergeCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Merge>(primitive); } -Registry MergeRegistry(schema::PrimitiveType_Merge, MergeCreator); -#endif - -InferStatus Merge::AbleToInfer(const std::vector<lite::Tensor *> &inputs) { - for (auto &input : inputs) { - if (input->shape().empty()) { - return HasZeroShape; - } - if (input->root_tensor() != nullptr && input->root_tensor()->data_c() != nullptr) { - continue; - } - if (input->data_c() == nullptr) { - return NotAble; - } - } - return Able; -} - -int Merge::Infer(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs) { - for (size_t i = 0; i < inputs.size(); i++) { - auto *input = inputs[i]; - auto *output = outputs[i]; - if (input == nullptr) { - MS_LOG(ERROR) << "input tensor is nullptr"; - return RET_ERROR; - } - if (output == nullptr) { - MS_LOG(ERROR) << "output tensor is nullptr"; - return RET_ERROR; - } - output->set_data_type(input->data_type()); - output->set_shape(input->shape()); - output->set_format(input->format()); - auto data_type = input->data_type(); - if (data_type != kObjectTypeTensorType) { - continue; - } else { - auto input_tensorlist = reinterpret_cast<TensorList *>(input); - auto output_tensorlist = reinterpret_cast<TensorList *>(output); - output_tensorlist->set_element_shape(input_tensorlist->element_shape()); - output_tensorlist->set_max_elements_num(input_tensorlist->max_elements_num()); - output_tensorlist->set_tensors_data_type(input_tensorlist->tensors_data_type()); - } - } - return RET_OK; -} - -int Merge::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(inputs_.size() == 2 * outputs_.size()); - for (size_t i = 0; i < outputs_.size(); ++i) { - outputs_[i]->set_data_type(inputs_[i]->data_type()); - } - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - std::vector<Tensor *> left_part_inputs{}; - left_part_inputs.assign(inputs_.begin(), inputs_.begin() + inputs_.size() / 2); - - std::vector<Tensor *> right_part_inputs{}; - right_part_inputs.assign(inputs_.begin() + inputs_.size() / 2, inputs_.end()); - - if (AbleToInfer(left_part_inputs) == Able) { - return Infer(left_part_inputs, outputs_); - } - - if (AbleToInfer(right_part_inputs) == Able) { - return Infer(right_part_inputs, outputs_); - } - - if (AbleToInfer(left_part_inputs) == HasZeroShape && AbleToInfer(right_part_inputs) == HasZeroShape) { - return Infer(left_part_inputs, outputs_); - } - - return RET_INFER_INVALID; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/merge.h b/mindspore/lite/src/ops/merge.h deleted file mode 100644 index fa177913ec..0000000000 --- a/mindspore/lite/src/ops/merge.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_MERGE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_MERGE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -enum InferStatus { Able, NotAble, HasZeroShape }; - -class Merge : public PrimitiveC { - public: - Merge() = default; - ~Merge() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Merge, PrimitiveC); - explicit Merge(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - - private: - static InferStatus AbleToInfer(const std::vector<lite::Tensor *> &inputs); - static int Infer(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs); -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_MERGE_H_ diff --git a/mindspore/lite/src/ops/mfcc.cc b/mindspore/lite/src/ops/mfcc.cc deleted file mode 100644 index 511e1cfc95..0000000000 --- a/mindspore/lite/src/ops/mfcc.cc +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/mfcc.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float Mfcc::GetFreqUpperLimit() const { return this->primitive_->value.AsMfcc()->freqUpperLimit; } -float Mfcc::GetFreqLowerLimit() const { return this->primitive_->value.AsMfcc()->freqLowerLimit; } -int Mfcc::GetFilterBankChannelNum() const { return this->primitive_->value.AsMfcc()->filterBankChannelNum; } -int Mfcc::GetDctCoeffNum() const { return this->primitive_->value.AsMfcc()->dctCoeffNum; } - -#else -float Mfcc::GetFreqUpperLimit() const { return this->primitive_->value_as_Mfcc()->freqUpperLimit(); } -float Mfcc::GetFreqLowerLimit() const { return this->primitive_->value_as_Mfcc()->freqLowerLimit(); } -int Mfcc::GetFilterBankChannelNum() const { return this->primitive_->value_as_Mfcc()->filterBankChannelNum(); } -int Mfcc::GetDctCoeffNum() const { return this->primitive_->value_as_Mfcc()->dctCoeffNum(); } -int Mfcc::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Mfcc(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Add return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateMfcc(*fbb, attr->freqUpperLimit(), attr->freqLowerLimit(), - attr->filterBankChannelNum(), attr->dctCoeffNum()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Mfcc, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *MfccCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Mfcc>(primitive); } -Registry MfccRegistry(schema::PrimitiveType_Mfcc, MfccCreator); -#endif -int Mfcc::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input_shape = input->shape(); - if (input_shape.size() != 3) { - MS_LOG(ERROR) << "first input shape is error, which need to be 3 dimensions, but the dimension is " - << input_shape.size(); - return RET_ERROR; - } - if (inputs_[1]->ElementsNum() != 1) { - MS_LOG(ERROR) << "second input element num is error, which need only a value, but the number is " - << inputs_[1]->ElementsNum(); - return RET_ERROR; - } - std::vector<int> output_shape(3); - output_shape[0] = input_shape[0]; - output_shape[1] = input_shape[1]; - output_shape[2] = GetDctCoeffNum(); - outputs_.front()->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/mfcc.h b/mindspore/lite/src/ops/mfcc.h deleted file mode 100644 index 8b94599226..0000000000 --- a/mindspore/lite/src/ops/mfcc.h +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_MFCC_H_ -#define LITE_MINDSPORE_LITE_C_OPS_MFCC_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Mfcc : public PrimitiveC { - public: - Mfcc() = default; - ~Mfcc() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Mfcc, PrimitiveC); - explicit Mfcc(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetFreqUpperLimit(float freq_upper_limit) { - this->primitive_->value.AsMfcc()->freqUpperLimit = freq_upper_limit; - } - void SetFreqLowerLimit(float freq_lower_limit) { - this->primitive_->value.AsMfcc()->freqLowerLimit = freq_lower_limit; - } - void SetFilterBankChannelNum(int filter_bank_channel_num) { - this->primitive_->value.AsMfcc()->filterBankChannelNum = filter_bank_channel_num; - } - void SetDctCoeffNum(int dct_coeff_num) { this->primitive_->value.AsMfcc()->dctCoeffNum = dct_coeff_num; } -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - float GetFreqUpperLimit() const; - float GetFreqLowerLimit() const; - int GetFilterBankChannelNum() const; - int GetDctCoeffNum() const; - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_MFCC_H_ diff --git a/mindspore/lite/src/ops/minimum.cc b/mindspore/lite/src/ops/minimum.cc deleted file mode 100644 index 5881976ad3..0000000000 --- a/mindspore/lite/src/ops/minimum.cc +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/minimum.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Minimum::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Minimum; - } - if (this->primitive_->value.type != schema::PrimitiveType_Minimum) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::MinimumT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int Minimum::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateMinimum(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Minimum, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *MinimumCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Minimum>(primitive); } -Registry MinimumRegistry(schema::PrimitiveType_Minimum, MinimumCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/minimum.h b/mindspore/lite/src/ops/minimum.h deleted file mode 100644 index de69645c70..0000000000 --- a/mindspore/lite/src/ops/minimum.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_MINIMUM_H_ -#define MINDSPORE_LITE_SRC_OPS_MINIMUM_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic.h" - -namespace mindspore { -namespace lite { -class Minimum : public Arithmetic { - public: - Minimum() = default; - ~Minimum() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Arithmetic, Arithmetic); - explicit Minimum(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_MINIMUM_H_ diff --git a/mindspore/lite/src/ops/minimum_grad.cc b/mindspore/lite/src/ops/minimum_grad.cc deleted file mode 100644 index 6c5df183f5..0000000000 --- a/mindspore/lite/src/ops/minimum_grad.cc +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "include/errorcode.h" -#include "src/ops/minimum_grad.h" -#include "src/common/log_adapter.h" -#ifdef PRIMITIVE_WRITEABLE -#include <float.h> -#include "src/param_value_lite.h" -#endif - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int MinimumGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_MinimumGrad; - } - if (this->primitive_->value.type != schema::PrimitiveType_MinimumGrad) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::MinimumGradT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else -PrimitiveC *MinimumGradCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<MinimumGrad>(primitive); -} -Registry MinimumGradRegistry(schema::PrimitiveType_MinimumGrad, MinimumGradCreator); - -int MinimumGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateMinimumGrad(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_MinimumGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -#endif - -int MinimumGrad::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - if (inputs_.size() != 3) { - MS_LOG(ERROR) << "The number of input must be 3"; - return RET_ERROR; - } - if (outputs_.size() != 2) { - MS_LOG(ERROR) << "The number of output must be 2"; - return RET_ERROR; - } - - auto x1 = inputs_[0]; - auto x2 = inputs_[1]; - auto dy = inputs_[2]; - auto dx1 = outputs_[0]; - auto dx2 = outputs_[1]; - - MS_ASSERT(dy != nullptr); - MS_ASSERT(x1 != nullptr); - MS_ASSERT(x2 != nullptr); - MS_ASSERT(dx1 != nullptr); - MS_ASSERT(dx2 != nullptr); - if (!infer_flag()) { - return RET_OK; - } - - auto inShape0 = x1->shape(); - auto inShape1 = x2->shape(); - auto outShape = dy->shape(); - - ndim_ = outShape.size(); - x1_shape_.resize(ndim_); - x2_shape_.resize(ndim_); - dy_shape_.resize(ndim_); - auto fillDimNum0 = outShape.size() - inShape0.size(); - auto fillDimNum1 = outShape.size() - inShape1.size(); - int j0 = 0; - int j1 = 0; - for (unsigned int i = 0; i < outShape.size(); i++) { - x1_shape_[i] = (i < fillDimNum0) ? 1 : inShape0[j0++]; - x2_shape_[i] = (i < fillDimNum1) ? 1 : inShape1[j1++]; - dy_shape_[i] = outShape[i]; - } - - dx1->set_shape(x1->shape()); - dx2->set_shape(x2->shape()); - dx1->set_data_type(dy->data_type()); - dx2->set_data_type(dy->data_type()); - dx1->set_format(dy->format()); - dx2->set_format(dy->format()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/minimum_grad.h b/mindspore/lite/src/ops/minimum_grad.h deleted file mode 100644 index 83418897b2..0000000000 --- a/mindspore/lite/src/ops/minimum_grad.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_MINIMUM_GRAD_H_ -#define MINDSPORE_LITE_SRC_OPS_MINIMUM_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_grad.h" -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class MinimumGrad : public ArithmeticGrad { - public: -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(MinimumGrad, ArithmeticGrad); - MinimumGrad() = default; - explicit MinimumGrad(schema::PrimitiveT *primitive) : ArithmeticGrad(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - MinimumGrad() = default; - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_MINIMUM_GRAD_H_ diff --git a/mindspore/lite/src/ops/mod.cc b/mindspore/lite/src/ops/mod.cc deleted file mode 100644 index c0024408ac..0000000000 --- a/mindspore/lite/src/ops/mod.cc +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/mod.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Mod::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Mod; - } - if (this->primitive_->value.type != schema::PrimitiveType_Mod) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::ModT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - this->primitive_->value.value = attr; - } - return RET_OK; -} - -#else - -int Mod::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateMod(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Mod, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *ModCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Mod>(primitive); } -Registry ModRegistry(schema::PrimitiveType_Mod, ModCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/mod.h b/mindspore/lite/src/ops/mod.h deleted file mode 100644 index 3a351e6889..0000000000 --- a/mindspore/lite/src/ops/mod.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_MOD_H_ -#define LITE_MINDSPORE_LITE_C_OPS_MOD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/arithmetic.h" - -namespace mindspore { -namespace lite { -class Mod : public Arithmetic { - public: - Mod() = default; - ~Mod() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Mod, Arithmetic); - explicit Mod(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_FLOOR_MOD_H_ diff --git a/mindspore/lite/src/ops/mul.cc b/mindspore/lite/src/ops/mul.cc deleted file mode 100644 index bb46cb0382..0000000000 --- a/mindspore/lite/src/ops/mul.cc +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/mul.h" -#include <memory> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Mul::GetActivationType() const { return this->primitive_->value.AsMul()->activationType; } - -void Mul::SetActivationType(int activation_type) { - this->primitive_->value.AsMul()->activationType = (schema::ActivationType)activation_type; -} -int Mul::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Mul; - } - if (this->primitive_->value.type != schema::PrimitiveType_Mul) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::MulT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - - return RET_OK; -} - -#else - -int Mul::GetActivationType() const { return this->primitive_->value_as_Mul()->activationType(); } - -int Mul::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Mul(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Mul return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateMul(*fbb, attr->activationType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Mul, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *MulCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Mul>(primitive); } -Registry MulRegistry(schema::PrimitiveType_Mul, MulCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/mul.h b/mindspore/lite/src/ops/mul.h deleted file mode 100644 index 65b31556d7..0000000000 --- a/mindspore/lite/src/ops/mul.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_MUL_H_ -#define LITE_MINDSPORE_LITE_C_OPS_MUL_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic.h" - -namespace mindspore { -namespace lite { -class Mul : public Arithmetic { - public: - Mul() = default; - ~Mul() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Mul, Arithmetic); - explicit Mul(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} - void SetActivationType(int activation_type); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int GetActivationType() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_MUL_H_ diff --git a/mindspore/lite/src/ops/nchw2nhwc.cc b/mindspore/lite/src/ops/nchw2nhwc.cc deleted file mode 100644 index ff80c7aba8..0000000000 --- a/mindspore/lite/src/ops/nchw2nhwc.cc +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/nchw2nhwc.h" -#include "src/common/common.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -#else -int Nchw2Nhwc::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateNchw2Nhwc(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Nchw2Nhwc, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *Nchw2NhwcCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<Nchw2Nhwc>(primitive); -} -Registry Nchw2NhwcRegistry(schema::PrimitiveType_Nchw2Nhwc, Nchw2NhwcCreator); -#endif - -int Nchw2Nhwc::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_format(schema::Format::Format_NHWC); - output->set_data_type(input->data_type()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - std::vector<int> nchw_shape = input->shape(); - if (nchw_shape.size() != 4) { - output->set_shape(nchw_shape); - } else { - std::vector<int> nhwc_shape{nchw_shape}; - nhwc_shape[NHWC_N] = nchw_shape[NCHW_N]; - nhwc_shape[NHWC_H] = nchw_shape[NCHW_H]; - nhwc_shape[NHWC_W] = nchw_shape[NCHW_W]; - nhwc_shape[NHWC_C] = nchw_shape[NCHW_C]; - output->set_shape(nhwc_shape); - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/nchw2nhwc.h b/mindspore/lite/src/ops/nchw2nhwc.h deleted file mode 100644 index 5894e993b5..0000000000 --- a/mindspore/lite/src/ops/nchw2nhwc.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_NCHW_2_NHWC_H_ -#define LITE_MINDSPORE_LITE_C_OPS_NCHW_2_NHWC_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Nchw2Nhwc : public PrimitiveC { - public: - Nchw2Nhwc() = default; - ~Nchw2Nhwc() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Nchw2Nhwc, PrimitiveC); - explicit Nchw2Nhwc(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_NCHW_2_NHWC_H_ diff --git a/mindspore/lite/src/ops/neg.cc b/mindspore/lite/src/ops/neg.cc deleted file mode 100644 index 8f52f69dcc..0000000000 --- a/mindspore/lite/src/ops/neg.cc +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/neg.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Neg::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Neg; - } - if (this->primitive_->value.type != schema::PrimitiveType_Neg) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::NegT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else -int Neg::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(primitive != nullptr); - MS_ASSERT(fbb != nullptr); - auto val_offset = schema::CreateNeg(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Neg, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *NegCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Neg>(primitive); } -Registry NegRegistry(schema::PrimitiveType_Neg, NegCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/neg.h b/mindspore/lite/src/ops/neg.h deleted file mode 100644 index e22c346d12..0000000000 --- a/mindspore/lite/src/ops/neg.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_NEG_H_ -#define LITE_MINDSPORE_LITE_C_OPS_NEG_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -class Neg : public ArithmeticSelf { - public: - Neg() = default; - ~Neg() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Neg, ArithmeticSelf); - explicit Neg(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_NEG_H_ diff --git a/mindspore/lite/src/ops/neg_grad.cc b/mindspore/lite/src/ops/neg_grad.cc deleted file mode 100644 index 4c74be9953..0000000000 --- a/mindspore/lite/src/ops/neg_grad.cc +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/neg_grad.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifndef PRIMITIVE_WRITEABLE -int NegGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(primitive != nullptr); - MS_ASSERT(fbb != nullptr); - auto val_offset = schema::CreateNegGrad(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_NegGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *NegGradCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<NegGrad>(primitive); } -Registry NegGradRegistry(schema::PrimitiveType_NegGrad, NegGradCreator); - -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/neg_grad.h b/mindspore/lite/src/ops/neg_grad.h deleted file mode 100644 index dd31995eaa..0000000000 --- a/mindspore/lite/src/ops/neg_grad.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_NEG_GRAD_H_ -#define LITE_MINDSPORE_LITE_C_OPS_NEG_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -class NegGrad : public ArithmeticSelf { - public: - NegGrad() = default; - ~NegGrad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(NegGrad, ArithmeticSelf); - explicit NegGrad(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_NEG_GRAD_H_ diff --git a/mindspore/lite/src/ops/nhwc2nchw.cc b/mindspore/lite/src/ops/nhwc2nchw.cc deleted file mode 100644 index 9e7648be72..0000000000 --- a/mindspore/lite/src/ops/nhwc2nchw.cc +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/nhwc2nchw.h" -#include "src/common/common.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { - -#ifdef PRIMITIVE_WRITEABLE -#else -int Nhwc2Nchw::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateNhwc2Nchw(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Nhwc2Nchw, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *Nhwc2NchwCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<Nhwc2Nchw>(primitive); -} -Registry Nhwc2NchwRegistry(schema::PrimitiveType_Nhwc2Nchw, Nhwc2NchwCreator); -#endif - -int Nhwc2Nchw::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_format(schema::Format::Format_NCHW); - output->set_data_type(input->data_type()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - std::vector<int> nhwc_shape = input->shape(); - if (nhwc_shape.size() != 4) { - output->set_shape(nhwc_shape); - } else { - std::vector<int> nchw_shape{nhwc_shape}; - nchw_shape[NCHW_N] = nhwc_shape[NHWC_N]; - nchw_shape[NCHW_C] = nhwc_shape[NHWC_C]; - nchw_shape[NCHW_H] = nhwc_shape[NHWC_H]; - nchw_shape[NCHW_W] = nhwc_shape[NHWC_W]; - output->set_shape(nchw_shape); - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/nhwc2nchw.h b/mindspore/lite/src/ops/nhwc2nchw.h deleted file mode 100644 index f76d22695a..0000000000 --- a/mindspore/lite/src/ops/nhwc2nchw.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_NHWC_2_NCHW_H_ -#define LITE_MINDSPORE_LITE_C_OPS_NHWC_2_NCHW_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Nhwc2Nchw : public PrimitiveC { - public: - Nhwc2Nchw() = default; - ~Nhwc2Nchw() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Nhwc2Nchw, PrimitiveC); - explicit Nhwc2Nchw(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_NHWC_2_NCHW_H_ diff --git a/mindspore/lite/src/ops/non_max_suppression.cc b/mindspore/lite/src/ops/non_max_suppression.cc deleted file mode 100644 index 131a8594c1..0000000000 --- a/mindspore/lite/src/ops/non_max_suppression.cc +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/non_max_suppression.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -void NonMaxSuppression::SetCenterPointBox(int centerPointBox) { - this->primitive_->value.AsNonMaxSuppression()->centerPointBox = centerPointBox; -} - -int NonMaxSuppression::GetCenterPointBox() const { - return this->primitive_->value.AsNonMaxSuppression()->centerPointBox; -} -#else -int NonMaxSuppression::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_NonMaxSuppression(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_NonMaxSuppression return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateNonMaxSuppression(*fbb, attr->centerPointBox()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_NonMaxSuppression, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -int NonMaxSuppression::GetCenterPointBox() const { - return this->primitive_->value_as_NonMaxSuppression()->centerPointBox(); -} - -PrimitiveC *NonMaxSuppressionCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<NonMaxSuppression>(primitive); -} - -Registry NonMaxSuppressionRegistry(schema::PrimitiveType_NonMaxSuppression, NonMaxSuppressionCreator); - -#endif -int NonMaxSuppression::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(kNumberTypeInt32); - output->set_format(input->format()); - MS_LOG(INFO) << "NonMaxSuppression infer shape in runtime."; - return RET_INFER_INVALID; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/non_max_suppression.h b/mindspore/lite/src/ops/non_max_suppression.h deleted file mode 100644 index ecfcaa3fbc..0000000000 --- a/mindspore/lite/src/ops/non_max_suppression.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_NON_MAX_SUPPRESSION_H_ -#define LITE_MINDSPORE_LITE_NON_MAX_SUPPRESSION_H_ - -#include <vector> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class NonMaxSuppression : public PrimitiveC { - public: - NonMaxSuppression() = default; - ~NonMaxSuppression() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(NonMaxSuppression, PrimitiveC); - explicit NonMaxSuppression(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetCenterPointBox(int centerPointBox); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetCenterPointBox() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_NOT_EQUAL_H_ diff --git a/mindspore/lite/src/ops/nonzero.cc b/mindspore/lite/src/ops/nonzero.cc deleted file mode 100644 index d4e7a3793c..0000000000 --- a/mindspore/lite/src/ops/nonzero.cc +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/nonzero.h" -#include <algorithm> -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int NonZero::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_NonZero; - } - if (this->primitive_->value.type != schema::PrimitiveType_NonZero) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::NonZeroT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - PopulaterQuantParam(prim, inputs); - return RET_OK; -} -#else -int NonZero::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_NonZero(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_NonZero return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateNonZero(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_NonZero, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *NonZeroCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<NonZero>(primitive); } -Registry NonZeroRegistry(schema::PrimitiveType_NonZero, NonZeroCreator); -#endif -template <typename T> -void CalShape(const T *data, const std::vector<Tensor *> &inputs, std::vector<int> *out_shape) { - int input_count = inputs[0]->ElementsNum(); - int input_dim_size = inputs[0]->shape().empty() ? 1 : inputs[0]->shape().size(); - out_shape->emplace_back(input_dim_size); - int nonzero_size = 0; - for (int i = 0; i < input_count; i++) { - if (static_cast<int>(data[i]) != 0) { - nonzero_size++; - } - } - if (nonzero_size == 0) { - return; - } else { - out_shape->emplace_back(nonzero_size); - } -} -int NonZero::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - MS_ASSERT(inputs_.size() == 1); - auto input_tensor = inputs_.front(); - MS_ASSERT(input_tensor != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(TypeId::kNumberTypeInt32); - output->set_format(input_tensor->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - std::vector<int> out_shape; - if (inputs_.size() == kSingleNum) { - if (input_tensor->data_c() == nullptr) { - MS_LOG(INFO) << "Do infer shape in runtime."; - return RET_INFER_INVALID; - } - switch (input_tensor->data_type()) { - case kNumberTypeBool: { - auto data = reinterpret_cast<bool *>(input_tensor->MutableData()); - CalShape<bool>(data, inputs_, &out_shape); - } break; - default: { - MS_LOG(ERROR) << "NonZero weight tensor has unsupported dataType: " << input_tensor->data_type(); - return RET_INFER_ERR; - } - } - } else { - MS_LOG(ERROR) << "inputs tensor size invalid."; - return RET_INFER_ERR; - } - output->set_shape(out_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/nonzero.h b/mindspore/lite/src/ops/nonzero.h deleted file mode 100644 index ba769e8076..0000000000 --- a/mindspore/lite/src/ops/nonzero.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_NONZERO_H_ -#define MINDSPORE_LITE_SRC_OPS_NONZERO_H_ - -#include <cmath> -#include <memory> -#include <set> -#include <vector> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class NonZero : public PrimitiveC { - public: - NonZero() = default; - ~NonZero() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(NonZero, PrimitiveC); - explicit NonZero(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_NONZERO_H_ diff --git a/mindspore/lite/src/ops/not_equal.cc b/mindspore/lite/src/ops/not_equal.cc deleted file mode 100644 index 618025c400..0000000000 --- a/mindspore/lite/src/ops/not_equal.cc +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/not_equal.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -#else -int NotEqual::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateNotEqual(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_NotEqual, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *NotEqualCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<NotEqual>(primitive); -} -Registry NotEqualRegistry(schema::PrimitiveType_NotEqual, NotEqualCreator); - -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/not_equal.h b/mindspore/lite/src/ops/not_equal.h deleted file mode 100644 index 464d27d685..0000000000 --- a/mindspore/lite/src/ops/not_equal.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_NOT_EQUAL_H_ -#define LITE_MINDSPORE_LITE_C_OPS_NOT_EQUAL_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_compare.h" - -namespace mindspore { -namespace lite { -class NotEqual : public ArithmeticCompare { - public: - NotEqual() = default; - ~NotEqual() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(NotEqual, ArithmeticCompare); - explicit NotEqual(schema::PrimitiveT *primitive) : ArithmeticCompare(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_NOT_EQUAL_H_ diff --git a/mindspore/lite/src/ops/one_hot.cc b/mindspore/lite/src/ops/one_hot.cc deleted file mode 100644 index c580134ba0..0000000000 --- a/mindspore/lite/src/ops/one_hot.cc +++ /dev/null @@ -1,132 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/one_hot.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int OneHot::GetAxis() const { return this->primitive_->value.AsOneHot()->axis; } - -void OneHot::SetAxis(int axis) { this->primitive_->value.AsOneHot()->axis = axis; } - -int OneHot::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_OneHot; - } - if (this->primitive_->value.type != schema::PrimitiveType_OneHot) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::OneHotT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - attr->axis = -1; - if (prim.GetAttr("axis") != nullptr) { - attr->axis = CastToInt(prim.GetAttr("axis")).front(); - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else - -int OneHot::GetAxis() const { return this->primitive_->value_as_OneHot()->axis(); } - -int OneHot::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_OneHot(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_OneHot return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateOneHot(*fbb, attr->axis()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_OneHot, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *OneHotCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<OneHot>(primitive); } -Registry OneHotRegistry(schema::PrimitiveType_OneHot, OneHotCreator); -#endif - -namespace { -constexpr size_t kOneHotInputNum = 4; -constexpr size_t kOneHotInputNumOpt = 3; -} // namespace -int OneHot::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - if (this->primitive_ == nullptr) { - return RET_NULL_PTR; - } - - int axis = GetAxis(); - // indices, depth, on_value, off_value - if (inputs.size() != kOneHotInputNum && inputs.size() != kOneHotInputNumOpt) { - MS_LOG(ERROR) << "OneHot got inputs num " << inputs.size() << ", should be " << kOneHotInputNum << " or " - << kOneHotInputNumOpt; - return RET_ERROR; - } - auto depth_tensor = inputs.at(1); - if (depth_tensor == nullptr) { - return RET_NULL_PTR; - } - const int *depth = static_cast<int *>(depth_tensor->MutableData()); - auto input = inputs.front(); - if (input == nullptr) { - return RET_NULL_PTR; - } - auto on_value = inputs.at(2); - if (on_value == nullptr) { - return RET_NULL_PTR; - } - auto output = outputs.front(); - if (output == nullptr) { - return RET_NULL_PTR; - } - output->set_data_type(on_value->data_type()); - output->set_format(on_value->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - const auto input_shape = input->shape(); - int input_rank = static_cast<int>(input_shape.size()); - if (axis < 0) { - axis += input_rank + 1; - } - std::vector<int> output_shape(input_shape); - output_shape.insert(output_shape.cbegin() + axis, *depth); - output->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/one_hot.h b/mindspore/lite/src/ops/one_hot.h deleted file mode 100644 index 61b3dc522c..0000000000 --- a/mindspore/lite/src/ops/one_hot.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_ONE_HOT_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ONE_HOT_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class OneHot : public PrimitiveC { - public: - OneHot() = default; - ~OneHot() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(OneHot, PrimitiveC); - explicit OneHot(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAxis(int axis); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetAxis() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_ONE_HOT_H_ diff --git a/mindspore/lite/src/ops/oneslike.cc b/mindspore/lite/src/ops/oneslike.cc deleted file mode 100644 index f564195eb0..0000000000 --- a/mindspore/lite/src/ops/oneslike.cc +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/oneslike.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int OnesLike::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitive error"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_OnesLike; - } - if (this->primitive_->value.type != schema::PrimitiveType_OnesLike) { - MS_LOG(ERROR) << "PrimitiveType_OnesLike primitive value type : " - << schema::EnumNamePrimitiveType(primitive_->value.type) << "is not equal" - << schema::EnumNamePrimitiveType(schema::PrimitiveType_OnesLike); - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::OnesLikeT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int OnesLike::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_OnesLike(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_OnesLike return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateOnesLike(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_OnesLike, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *OnesLikeCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<OnesLike>(primitive); -} -Registry OnesLikeRegistry(schema::PrimitiveType_OnesLike, OnesLikeCreator); -#endif -int OnesLike::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - Tensor *x = inputs_.at(0); - Tensor *out = outputs_.at(0); - std::vector<int> x_shape = x->shape(); - std::vector<int> output_shape(x_shape.size()); - output_shape.assign(x_shape.begin(), x_shape.end()); - out->set_shape(output_shape); - out->set_format(x->format()); - out->set_data_type(x->data_type()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/oneslike.h b/mindspore/lite/src/ops/oneslike.h deleted file mode 100644 index e89095e0d4..0000000000 --- a/mindspore/lite/src/ops/oneslike.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -#ifndef LITE_SRC_OPS_ONESLIKE_H_ -#define LITE_SRC_OPS_ONESLIKE_H_ -namespace mindspore { -namespace lite { -class OnesLike : public PrimitiveC { - public: - OnesLike() = default; - ~OnesLike() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(OnesLike, PrimitiveC); - explicit OnesLike(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore -#endif // LITE_SRC_OPS_ONESLIKE_H_ diff --git a/mindspore/lite/src/ops/ops_def.cc b/mindspore/lite/src/ops/ops_def.cc index 720662a05a..590bf935e9 100644 --- a/mindspore/lite/src/ops/ops_def.cc +++ b/mindspore/lite/src/ops/ops_def.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,5 +13,1055 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +#include "src/ops/ops_def.h" +#include "src/ops/ops_func_declare.h" -#include "src/ops/schema_def.h" +OP_TYPE_DEF_BEGIN(PrimitiveType) +OP_TYPE(Abs) +OP_TYPE(Activation) +OP_TYPE(ActivationGrad) +OP_TYPE(Adam) +OP_TYPE(AddFusion) +OP_TYPE(AdderFusion) +OP_TYPE(AddGrad) +OP_TYPE(AddN) +OP_TYPE(All) +OP_TYPE(ApplyMomentum) +OP_TYPE(ArgMaxFusion) +OP_TYPE(ArgMinFusion) +OP_TYPE(Assert) +OP_TYPE(Assign) +OP_TYPE(AssignAdd) +OP_TYPE(AudioSpectrogram) +OP_TYPE(AvgPoolFusion) +OP_TYPE(AvgPoolGrad) +OP_TYPE(BatchNorm) +OP_TYPE(BatchNormGrad) +OP_TYPE(BatchToSpace) +OP_TYPE(BatchToSpaceND) +OP_TYPE(BiasAdd) +OP_TYPE(BinaryCrossEntropy) +OP_TYPE(BinaryCrossEntropyGrad) +OP_TYPE(BiasAddGrad) +OP_TYPE(BroadcastTo) +OP_TYPE(Cast) +OP_TYPE(Ceil) +OP_TYPE(Clip) +OP_TYPE(Concat) +OP_TYPE(ControlDepend) +OP_TYPE(Conv2DBackpropFilterFusion) +OP_TYPE(Conv2DBackpropInputFusion) +OP_TYPE(Conv2DFusion) +OP_TYPE(Conv2dTransposeFusion) +OP_TYPE(Cos) +OP_TYPE(ConstantOfShape) +OP_TYPE(Crop) +OP_TYPE(CustomExtractFeatures) +OP_TYPE(CustomNormalize) +OP_TYPE(CustomPredict) +OP_TYPE(DeConv2DGradFilter) +OP_TYPE(Depend) +OP_TYPE(DepthToSpace) +OP_TYPE(DetectionPostProcess) +OP_TYPE(DivFusion) +OP_TYPE(DivGrad) +OP_TYPE(Dropout) +OP_TYPE(DropoutGrad) +OP_TYPE(Elu) +OP_TYPE(Eltwise) +OP_TYPE(Equal) +OP_TYPE(EmbeddingLookupFusion) +OP_TYPE(ExpFusion) +OP_TYPE(ExpandDims) +OP_TYPE(FakeQuantWithMinMaxVars) +OP_TYPE(FakeQuantWithMinMaxVarsPerChannel) +OP_TYPE(FftReal) +OP_TYPE(FftImag) +OP_TYPE(Flatten) +OP_TYPE(FlattenGrad) +OP_TYPE(Floor) +OP_TYPE(FloorDiv) +OP_TYPE(FloorMod) +OP_TYPE(Fill) +OP_TYPE(FullConnection) +OP_TYPE(FusedBatchNorm) +OP_TYPE(Gather) +OP_TYPE(GatherNd) +OP_TYPE(Greater) +OP_TYPE(GreaterEqual) +OP_TYPE(HashtableLookup) +OP_TYPE(InstanceNorm) +OP_TYPE(LayerNormFusion) +OP_TYPE(LeakyRelu) +OP_TYPE(Less) +OP_TYPE(LessEqual) +OP_TYPE(Log) +OP_TYPE(LogGrad) +OP_TYPE(LogicalAnd) +OP_TYPE(LogicalNot) +OP_TYPE(LogicalOr) +OP_TYPE(LpNormalization) +OP_TYPE(LRN) +OP_TYPE(LshProjection) +OP_TYPE(LSTM) +OP_TYPE(L2NormalizeFusion) +OP_TYPE(MatMul) +OP_TYPE(Maximum) +OP_TYPE(MaximumGrad) +OP_TYPE(MaxPoolFusion) +OP_TYPE(MaxPoolGrad) +OP_TYPE(Merge) +OP_TYPE(Mfcc) +OP_TYPE(Minimum) +OP_TYPE(MinimumGrad) +OP_TYPE(Mod) +OP_TYPE(MulFusion) +OP_TYPE(MulGrad) +OP_TYPE(Neg) +OP_TYPE(NegGrad) +OP_TYPE(NotEqual) +OP_TYPE(NonMaxSuppression) +OP_TYPE(OneHot) +OP_TYPE(OnesLike) +OP_TYPE(PadFusion) +OP_TYPE(PartialFusion) +OP_TYPE(PowerGrad) +OP_TYPE(PowFusion) +OP_TYPE(PriorBox) +OP_TYPE(PReLUFusion) +OP_TYPE(QuantDTypeCast) +OP_TYPE(Rank) +OP_TYPE(Range) +OP_TYPE(Reciprocal) +OP_TYPE(RealDiv) +OP_TYPE(ReduceFusion) +OP_TYPE(Reshape) +OP_TYPE(Resize) +OP_TYPE(ReverseSequence) +OP_TYPE(ReverseV2) +OP_TYPE(Rfft) +OP_TYPE(ROIPooling) +OP_TYPE(Round) +OP_TYPE(Rsqrt) +OP_TYPE(ScaleFusion) +OP_TYPE(ScatterNd) +OP_TYPE(SGD) +OP_TYPE(Shape) +OP_TYPE(SigmoidCrossEntropyWithLogits) +OP_TYPE(SigmoidCrossEntropyWithLogitsGrad) +OP_TYPE(Sin) +OP_TYPE(SkipGram) +OP_TYPE(SliceFusion) +OP_TYPE(SmoothL1Loss) +OP_TYPE(SmoothL1LossGrad) +OP_TYPE(Softmax) +OP_TYPE(SoftmaxCrossEntropyWithLogits) +OP_TYPE(SpaceToBatch) +OP_TYPE(SpaceToBatchND) +OP_TYPE(SpaceToDepth) +OP_TYPE(SparseSoftmaxCrossEntropy) +OP_TYPE(SparseToDense) +OP_TYPE(Split) +OP_TYPE(Sqrt) +OP_TYPE(Squeeze) +OP_TYPE(Square) +OP_TYPE(SquaredDifference) +OP_TYPE(Stack) +OP_TYPE(StridedSlice) +OP_TYPE(SubFusion) +OP_TYPE(SubGrad) +OP_TYPE(Switch) +OP_TYPE(TensorListFromTensor) +OP_TYPE(TensorListGetItem) +OP_TYPE(TensorListReserve) +OP_TYPE(TensorListSetItem) +OP_TYPE(TensorListStack) +OP_TYPE(TileFusion) +OP_TYPE(TopKFusion) +OP_TYPE(Transpose) +OP_TYPE(Unique) +OP_TYPE(UnsortedSegmentSum) +OP_TYPE(Unsqueeze) +OP_TYPE(Unstack) +OP_TYPE(While) +OP_TYPE(Where) +OP_TYPE(ZerosLike) +OP_TYPE(Select) +OP_TYPE(If) +OP_TYPE(Gru) +OP_TYPE(NonZero) +OP_TYPE(InvertPermutation) +OP_TYPE(Size) +OP_TYPE(RandomStandardNormal) +OP_TYPE(CropAndResize) +OP_TYPE(Erf) +OP_TYPE(StridedSliceGrad) +OP_TYPE(IsFinite) +OP_TYPE(LinSpace) +OP_TYPE(UniformReal) +OP_TYPE(AbsGrad) +OP_TYPE_DEF_END(PrimitiveType) + +OP_SCHEMA_DEF(Abs) +OP_SCHEMA_DEF_END(Abs) + +OP_SCHEMA_DEF(Activation) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_ATTR(alpha, float) +OP_ATTR(min_val, float) +OP_ATTR(max_val, float) +OP_SCHEMA_DEF_END(Activation) + +OP_SCHEMA_DEF(ActivationGrad) +OP_ATTR_ENUM(activation_type, ActivationType) +OP_ATTR(alpha, float) +OP_SCHEMA_DEF_END(ActivationGrad) + +OP_SCHEMA_DEF(Adam) +OP_ATTR(use_locking, bool) +OP_ATTR(use_nesterov, bool) +OP_SCHEMA_DEF_END(Adam) + +OP_SCHEMA_DEF(AddFusion) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_SCHEMA_DEF_END(AddFusion) + +OP_SCHEMA_DEF(AdderFusion) +OP_ATTR_ENUM_WITH_VALUE(format, Format, 0) +OP_ATTR(kernel_size, [long]) +OP_ATTR(stride, [long]) +OP_ATTR(dilation, [long]) +OP_ATTR_ENUM(pad_mode, PadMode) +OP_ATTR(pad_list, [long]) +OP_ATTR(group, long) +OP_ATTR(in_channel, long) +OP_ATTR(out_channel, long) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_SCHEMA_DEF_END(AdderFusion) + +OP_SCHEMA_DEF(AddGrad) +OP_SCHEMA_DEF_END(AddGrad) + +OP_SCHEMA_DEF(AddN) +OP_SCHEMA_DEF_END(AddN) + +OP_SCHEMA_DEF(All) +OP_ATTR(keep_dims, long) +OP_SCHEMA_DEF_END(All) + +OP_SCHEMA_DEF(ApplyMomentum) +OP_ATTR(use_nesterov, bool) +OP_ATTR(use_locking, bool) +OP_ATTR(gradient_scale, float) +OP_SCHEMA_DEF_END(ApplyMomentum) + +OP_SCHEMA_DEF(ArgMaxFusion) +OP_ATTR(axis, long) +OP_ATTR_WITH_VALUE(top_k, long, 1) +OP_ATTR(keep_dims, bool) +OP_ATTR(out_max_value, bool) +OP_SCHEMA_DEF_END(ArgMaxFusion) + +OP_SCHEMA_DEF(ArgMinFusion) +OP_ATTR(axis, long) +OP_ATTR(top_k, long) +OP_ATTR(keep_dims, bool) +OP_ATTR(out_max_value, bool) +OP_SCHEMA_DEF_END(ArgMinFusion) + +OP_SCHEMA_DEF(Assert) +OP_ATTR(summarize, long) +OP_SCHEMA_DEF_END(Assert) + +OP_SCHEMA_DEF(Assign) +OP_SCHEMA_DEF_END(Assign) + +OP_SCHEMA_DEF(AssignAdd) +OP_SCHEMA_DEF_END(AssignAdd) + +OP_SCHEMA_DEF(AudioSpectrogram) +OP_ATTR(window_size, long) +OP_ATTR(stride, long) +OP_ATTR(mag_square, bool) +OP_SCHEMA_DEF_END(AudioSpectrogram) + +OP_SCHEMA_DEF(AvgPoolFusion) +OP_ATTR(kernel_size, [long]) +OP_ATTR(strides, [long]) +OP_ATTR(pad, [long]) +OP_ATTR_ENUM(pad_mode, PadMode) +OP_ATTR_ENUM(round_mode, RoundMode) +OP_ATTR_ENUM(format, Format) +OP_ATTR(global, bool) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_SCHEMA_DEF_END(AvgPoolFusion) + +OP_SCHEMA_DEF(AvgPoolGrad) +OP_ATTR(kernel_size, [long]) +OP_ATTR(strides, [long]) +OP_ATTR_ENUM(pad_mode, PadMode) +OP_ATTR_ENUM(format, Format) +OP_SCHEMA_DEF_END(AvgPoolGrad) + +OP_SCHEMA_DEF(BatchNorm) +OP_ATTR(epsilon, float) +OP_ATTR_ENUM(format, Format) +OP_ATTR(is_training, bool) +OP_SCHEMA_DEF_END(BatchNorm) + +OP_SCHEMA_DEF(BatchNormGrad) +OP_ATTR(epsilon, float) +OP_SCHEMA_DEF_END(BatchNormGrad) + +OP_SCHEMA_DEF(BatchToSpace) +OP_ATTR(block_size, [long]) +OP_ATTR_VEC2D(crops, Vec2D); +OP_SCHEMA_DEF_END(BatchToSpace) + +OP_SCHEMA_DEF(BatchToSpaceND) +OP_ATTR(block_shape, [long]) +OP_ATTR_VEC2D(crops, Vec2D); +OP_SCHEMA_DEF_END(BatchToSpaceND) + +OP_SCHEMA_DEF(BiasAdd) +OP_ATTR_ENUM(format, Format) +OP_SCHEMA_DEF_END(BiasAdd) + +OP_SCHEMA_DEF(BinaryCrossEntropy) +OP_ATTR_ENUM(reduction, Reduction) +OP_SCHEMA_DEF_END(BinaryCrossEntropy) + +OP_SCHEMA_DEF(BinaryCrossEntropyGrad) +OP_ATTR_ENUM_WITH_VALUE(reduction, Reduction, 1) +OP_SCHEMA_DEF_END(BinaryCrossEntropyGrad) + +OP_SCHEMA_DEF(BiasAddGrad) +OP_SCHEMA_DEF_END(BiasAddGrad) + +OP_SCHEMA_DEF(BroadcastTo) +OP_ATTR(shape, [long]) +OP_SCHEMA_DEF_END(BroadcastTo) + +OP_SCHEMA_DEF(Cast) +OP_SCHEMA_DEF_END(Cast) + +OP_SCHEMA_DEF(Ceil) +OP_SCHEMA_DEF_END(Ceil) + +OP_SCHEMA_DEF(Clip) +OP_ATTR(max, float) +OP_ATTR(min, float) +OP_SCHEMA_DEF_END(Clip) + +OP_SCHEMA_DEF(Concat) +OP_ATTR(axis, long) +OP_SCHEMA_DEF_END(Concat) + +OP_SCHEMA_DEF(ControlDepend) +OP_ATTR(depend_mode, long) +OP_SCHEMA_DEF_END(ControlDepend) + +OP_SCHEMA_DEF(Conv2DBackpropFilterFusion) +OP_ATTR_ENUM_WITH_VALUE(format, Format, 0) +OP_ATTR(kernel_size, [long]) +OP_ATTR(stride, [long]) +OP_ATTR(dilation, [long]) +OP_ATTR_ENUM(pad_mode, PadMode) +OP_ATTR(pad_list, [long]) +OP_ATTR(mode, long) +OP_ATTR(group, long) +OP_ATTR(in_channel, long) +OP_ATTR(out_channel, long) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_SCHEMA_DEF_END(Conv2DBackpropFilterFusion) + +OP_SCHEMA_DEF(Conv2DBackpropInputFusion) +OP_ATTR_ENUM_WITH_VALUE(format, Format, 0) +OP_ATTR(kernel_size, [long]) +OP_ATTR(stride, [long]) +OP_ATTR(dilation, [long]) +OP_ATTR_ENUM(pad_mode, PadMode) +OP_ATTR(pad, [long]) +OP_ATTR(pad_list, [long]) +OP_ATTR(mode, long) +OP_ATTR(group, long) +OP_ATTR(in_channel, long) +OP_ATTR(out_channel, long) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_SCHEMA_DEF_END(Conv2DBackpropInputFusion) + +OP_SCHEMA_DEF(Conv2DFusion) +OP_ATTR_ENUM_WITH_VALUE(format, Format, 0) +OP_ATTR(kernel_size, [long]) +OP_ATTR(stride, [long]) +OP_ATTR(dilation, [long]) +OP_ATTR_ENUM(pad_mode, PadMode) +OP_ATTR(pad_list, [long]) +OP_ATTR(mode, long) +OP_ATTR(group, long) +OP_ATTR(in_channel, long) +OP_ATTR(out_channel, long) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_SCHEMA_DEF_END(Conv2DFusion) + +OP_SCHEMA_DEF(Conv2dTransposeFusion) +OP_ATTR_ENUM_WITH_VALUE(format, Format, 0) +OP_ATTR(kernel_size, [long]) +OP_ATTR(stride, [long]) +OP_ATTR(dilation, [long]) +OP_ATTR_ENUM(pad_mode, PadMode) +OP_ATTR(pad, [long]) +OP_ATTR(pad_list, [long]) +OP_ATTR(mode, long) +OP_ATTR(group, long) +OP_ATTR(in_channel, long) +OP_ATTR(out_channel, long) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_SCHEMA_DEF_END(Conv2dTransposeFusion) + +OP_SCHEMA_DEF(Cos) +OP_SCHEMA_DEF_END(Cos) + +OP_SCHEMA_DEF(ConstantOfShape) +OP_ATTR(data_type, long) +OP_ATTR(value, [float]) +OP_SCHEMA_DEF_END(ConstantOfShape) + +OP_SCHEMA_DEF(Crop) +OP_ATTR(axis, long) +OP_ATTR(offsets, [long]) +OP_SCHEMA_DEF_END(Crop) + +OP_SCHEMA_DEF(CustomExtractFeatures) +OP_SCHEMA_DEF_END(CustomExtractFeatures) + +OP_SCHEMA_DEF(CustomNormalize) +OP_SCHEMA_DEF_END(CustomNormalize) + +OP_SCHEMA_DEF(CustomPredict) +OP_ATTR(output_num, long) +OP_ATTR(weight_threshold, float) +OP_SCHEMA_DEF_END(CustomPredict) + +OP_SCHEMA_DEF(DeConv2DGradFilter) +OP_ATTR(in_channel, long); +OP_ATTR(out_channel, long); +OP_ATTR(kernel_size, [long]); +OP_ATTR_ENUM(pad_mode, PadMode); +OP_ATTR(pad_list, [long]); +OP_ATTR(stride, [long]); +OP_ATTR(dilation, [long]); +OP_ATTR(group, long); +OP_ATTR_ENUM(format, Format); +OP_ATTR_ENUM(activation_type, ActivationType); +OP_SCHEMA_DEF_END(DeConv2DGradFilter) + +OP_SCHEMA_DEF(Depend) +OP_SCHEMA_DEF_END(Depend) + +OP_SCHEMA_DEF(DepthToSpace) +OP_ATTR(block_size, long) +OP_ATTR_ENUM_WITH_VALUE(format, Format, 0) +OP_SCHEMA_DEF_END(DepthToSpace) + +OP_SCHEMA_DEF(DetectionPostProcess) +OP_ATTR_ENUM_WITH_VALUE(format, Format, 0) +OP_ATTR(input_size, long) +OP_ATTR(scale, [float]) +OP_ATTR(nms_iou_threshold, float) +OP_ATTR(nms_score_threshold, float) +OP_ATTR(max_detections, long) +OP_ATTR(detections_per_class, long) +OP_ATTR(max_classes_per_detection, long) +OP_ATTR(num_classes, long) +OP_ATTR(use_regular_nms, bool) +OP_ATTR(out_quantized, bool) +OP_SCHEMA_DEF_END(DetectionPostProcess) + +OP_SCHEMA_DEF(DivFusion) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_SCHEMA_DEF_END(DivFusion) + +OP_SCHEMA_DEF(DivGrad) +OP_SCHEMA_DEF_END(DivGrad) + +OP_SCHEMA_DEF(Dropout) +OP_ATTR_WITH_VALUE(keep_prob, float, 0.5) +OP_SCHEMA_DEF_END(Dropout) + +OP_SCHEMA_DEF(DropoutGrad) +OP_ATTR(keep_prob, float) +OP_SCHEMA_DEF_END(DropoutGrad) + +OP_SCHEMA_DEF(Elu) +OP_ATTR(alpha, float) +OP_SCHEMA_DEF_END(Elu) + +OP_SCHEMA_DEF(Eltwise) +OP_ATTR_ENUM(mode, EltwiseMode) +OP_SCHEMA_DEF_END(Eltwise) + +OP_SCHEMA_DEF(Equal) +OP_SCHEMA_DEF_END(Equal) + +OP_SCHEMA_DEF(EmbeddingLookupFusion) +OP_ATTR(max_norm, float) +OP_SCHEMA_DEF_END(EmbeddingLookupFusion) + +OP_SCHEMA_DEF(ExpFusion) +OP_ATTR_WITH_VALUE(base, float, -1) +OP_ATTR_WITH_VALUE(scale, float, 1.0) +OP_ATTR_WITH_VALUE(shift, float, 0.0) +OP_SCHEMA_DEF_END(ExpFusion) + +OP_SCHEMA_DEF(ExpandDims) +OP_SCHEMA_DEF_END(ExpandDims) + +OP_SCHEMA_DEF(FakeQuantWithMinMaxVars) +OP_ATTR(num_bits, long) +OP_ATTR(narrow_range, bool) +OP_SCHEMA_DEF_END(FakeQuantWithMinMaxVars) + +OP_SCHEMA_DEF(FakeQuantWithMinMaxVarsPerChannel) +OP_ATTR(num_bits, long) +OP_ATTR(narrow_range, bool) +OP_SCHEMA_DEF_END(FakeQuantWithMinMaxVarsPerChannel) + +OP_SCHEMA_DEF(FftReal) +OP_SCHEMA_DEF_END(FftReal) + +OP_SCHEMA_DEF(FftImag) +OP_SCHEMA_DEF_END(FftImag) + +OP_SCHEMA_DEF(Flatten) +OP_SCHEMA_DEF_END(Flatten) + +OP_SCHEMA_DEF(FlattenGrad) +OP_SCHEMA_DEF_END(FlattenGrad) + +OP_SCHEMA_DEF(Floor) +OP_SCHEMA_DEF_END(Floor) + +OP_SCHEMA_DEF(FloorDiv) +OP_SCHEMA_DEF_END(FloorDiv) + +OP_SCHEMA_DEF(FloorMod) +OP_SCHEMA_DEF_END(FloorMod) + +OP_SCHEMA_DEF(Fill) +OP_SCHEMA_DEF_END(Fill) + +OP_SCHEMA_DEF(FullConnection) +OP_ATTR(has_bias, bool) +OP_ATTR(use_axis, bool) +OP_ATTR(axis, long) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_SCHEMA_DEF_END(FullConnection) + +OP_SCHEMA_DEF(FusedBatchNorm) +OP_ATTR_WITH_VALUE(epsilon, float, 0.0001) +OP_ATTR_WITH_VALUE(momentum, float, 0.9) +OP_ATTR(mode, long) +OP_SCHEMA_DEF_END(FusedBatchNorm) + +OP_SCHEMA_DEF(Gather) +OP_SCHEMA_DEF_END(Gather) + +OP_SCHEMA_DEF(GatherNd) +OP_SCHEMA_DEF_END(GatherNd) + +OP_SCHEMA_DEF(Greater) +OP_SCHEMA_DEF_END(Greater) + +OP_SCHEMA_DEF(GreaterEqual) +OP_SCHEMA_DEF_END(GreaterEqual) + +OP_SCHEMA_DEF(HashtableLookup) +OP_SCHEMA_DEF_END(HashtableLookup) + +OP_SCHEMA_DEF(InstanceNorm) +OP_ATTR(epsilon, float) +OP_SCHEMA_DEF_END(InstanceNorm) + +OP_SCHEMA_DEF(LayerNormFusion) +OP_ATTR(begin_norm_axis, long) +OP_ATTR_WITH_VALUE(epsilon, float, 0.00001) +OP_ATTR(elementwise_affine, bool) +OP_ATTR(begin_params_axis, long) +OP_SCHEMA_DEF_END(LayerNormFusion) + +OP_SCHEMA_DEF(LeakyRelu) +OP_ATTR(negative_slope, float) +OP_SCHEMA_DEF_END(LeakyRelu) + +OP_SCHEMA_DEF(Less) +OP_SCHEMA_DEF_END(Less) + +OP_SCHEMA_DEF(LessEqual) +OP_SCHEMA_DEF_END(LessEqual) + +OP_SCHEMA_DEF(Log) +OP_SCHEMA_DEF_END(Log) + +OP_SCHEMA_DEF(LogGrad) +OP_SCHEMA_DEF_END(LogGrad) + +OP_SCHEMA_DEF(LogicalAnd) +OP_SCHEMA_DEF_END(LogicalAnd) + +OP_SCHEMA_DEF(LogicalNot) +OP_SCHEMA_DEF_END(LogicalNot) + +OP_SCHEMA_DEF(LogicalOr) +OP_SCHEMA_DEF_END(LogicalOr) + +OP_SCHEMA_DEF(LpNormalization) +OP_ATTR(axis, long) +OP_ATTR(p, long) +OP_SCHEMA_DEF_END(LpNormalization) + +OP_SCHEMA_DEF(LRN) +OP_ATTR(depth_radius, long) +OP_ATTR(bias, float) +OP_ATTR(alpha, float) +OP_ATTR(beta, float) +OP_ATTR(norm_region, string) +OP_SCHEMA_DEF_END(LRN) + +OP_SCHEMA_DEF(LshProjection) +OP_ATTR_ENUM(type, LshProjectionType) +OP_SCHEMA_DEF_END(LshProjection) + +OP_SCHEMA_DEF(LSTM) +OP_ATTR(bidirectional, bool) +OP_ATTR(has_bias, bool) +OP_ATTR(input_size, long) +OP_ATTR(hidden_size, long) +OP_ATTR(num_layers, long) +OP_ATTR(num_directions, long) +OP_ATTR(dropout, float) +OP_ATTR_WITH_VALUE(zoneout_cell, float, 0) +OP_ATTR_WITH_VALUE(zoneout_hidden, float, 0) +OP_SCHEMA_DEF_END(LSTM) + +OP_SCHEMA_DEF(L2NormalizeFusion) +OP_ATTR(axis, [long]) +OP_ATTR(epsilon, float) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_SCHEMA_DEF_END(L2NormalizeFusion) + +OP_SCHEMA_DEF(MatMul) +OP_ATTR_WITH_VALUE(transpose_a, bool, false) +OP_ATTR_WITH_VALUE(transpose_b, bool, false) +OP_SCHEMA_DEF_END(MatMul) + +OP_SCHEMA_DEF(Maximum) +OP_SCHEMA_DEF_END(Maximum) + +OP_SCHEMA_DEF(MaximumGrad) +OP_ATTR(grad_x, bool) +OP_ATTR(grad_y, bool) +OP_SCHEMA_DEF_END(MaximumGrad) + +OP_SCHEMA_DEF(MaxPoolFusion) +OP_ATTR(kernel_size, [long]) +OP_ATTR(strides, [long]) +OP_ATTR(pad, [long]) +OP_ATTR_ENUM(pad_mode, PadMode) +OP_ATTR_ENUM(round_mode, RoundMode) +OP_ATTR_ENUM(format, Format) +OP_ATTR(global, bool) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_SCHEMA_DEF_END(MaxPoolFusion) + +OP_SCHEMA_DEF(MaxPoolGrad) +OP_ATTR(kernel_size, [long]) +OP_ATTR(strides, [long]) +OP_ATTR_ENUM(pad_mode, PadMode) +OP_ATTR_ENUM(format, Format) +OP_SCHEMA_DEF_END(MaxPoolGrad) + +OP_SCHEMA_DEF(Merge) +OP_SCHEMA_DEF_END(Merge) + +OP_SCHEMA_DEF(Mfcc) +OP_ATTR(freq_upper_limit, float) +OP_ATTR(freq_lower_limit, float) +OP_ATTR(filter_bank_channel_num, long) +OP_ATTR(dct_coeff_num, long) +OP_SCHEMA_DEF_END(Mfcc) + +OP_SCHEMA_DEF(Minimum) +OP_SCHEMA_DEF_END(Minimum) + +OP_SCHEMA_DEF(MinimumGrad) +OP_ATTR(grad_x, bool) +OP_ATTR(grad_y, bool) +OP_SCHEMA_DEF_END(MinimumGrad) + +OP_SCHEMA_DEF(Mod) +OP_SCHEMA_DEF_END(Mod) + +OP_SCHEMA_DEF(MulFusion) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_SCHEMA_DEF_END(MulFusion) + +OP_SCHEMA_DEF(MulGrad) +OP_SCHEMA_DEF_END(MulGrad) + +OP_SCHEMA_DEF(Neg) +OP_SCHEMA_DEF_END(Neg) + +OP_SCHEMA_DEF(NegGrad) +OP_SCHEMA_DEF_END(NegGrad) + +OP_SCHEMA_DEF(NotEqual) +OP_SCHEMA_DEF_END(NotEqual) + +OP_SCHEMA_DEF(NonMaxSuppression) +OP_ATTR(center_point_box, long) +OP_SCHEMA_DEF_END(NonMaxSuppression) + +OP_SCHEMA_DEF(OneHot) +OP_ATTR(axis, long) +OP_SCHEMA_DEF_END(OneHot) + +OP_SCHEMA_DEF(OnesLike) +OP_SCHEMA_DEF_END(OnesLike) + +OP_SCHEMA_DEF(PadFusion) +OP_ATTR_VEC2D(paddings, Vec2D); +OP_ATTR_ENUM(padding_mode, PaddingMode) +OP_ATTR(constant_value, float) +OP_SCHEMA_DEF_END(PadFusion) + +OP_SCHEMA_DEF(PartialFusion) +OP_ATTR(sub_graph_index, long) +OP_SCHEMA_DEF_END(PartialFusion) + +OP_SCHEMA_DEF(PowerGrad) +OP_ATTR(power, float) +OP_ATTR(scale, float) +OP_ATTR(shift, float) +OP_SCHEMA_DEF_END(PowerGrad) + +OP_SCHEMA_DEF(PowFusion) +OP_ATTR_WITH_VALUE(scale, float, 1) +OP_ATTR_WITH_VALUE(shift, float, 0) +OP_SCHEMA_DEF_END(PowFusion) + +OP_SCHEMA_DEF(PriorBox) +OP_ATTR(min_sizes, [long]) +OP_ATTR(max_sizes, [long]) +OP_ATTR(aspect_ratios, [float]) +OP_ATTR(variances, [float]) +OP_ATTR(image_size_w, long) +OP_ATTR(image_size_h, long) +OP_ATTR(step_w, float) +OP_ATTR(step_h, float) +OP_ATTR(clip, bool) +OP_ATTR(flip, bool) +OP_ATTR(offset, float) +OP_SCHEMA_DEF_END(PriorBox) + +OP_SCHEMA_DEF(PReLUFusion) +OP_ATTR(channel_shared, bool) +OP_SCHEMA_DEF_END(PReLUFusion) + +OP_SCHEMA_DEF(Rank) +OP_SCHEMA_DEF_END(Rank) + +OP_SCHEMA_DEF(Range) +OP_ATTR(d_type, long) +OP_ATTR(start, long) +OP_ATTR(limit, long) +OP_ATTR_WITH_VALUE(delta, long, 1) +OP_SCHEMA_DEF_END(Range) + +OP_SCHEMA_DEF(Reciprocal) +OP_SCHEMA_DEF_END(Reciprocal) + +OP_SCHEMA_DEF(RealDiv) +OP_SCHEMA_DEF_END(RealDiv) + +OP_SCHEMA_DEF(ReduceFusion) +OP_ATTR(keep_dims, bool) +OP_ATTR_ENUM(mode, ReduceMode) +OP_ATTR(reduce_to_end, bool) +OP_ATTR(coeff, float) +OP_SCHEMA_DEF_END(ReduceFusion) + +OP_SCHEMA_DEF(Reshape) +OP_SCHEMA_DEF_END(Reshape) + +OP_SCHEMA_DEF(Resize) +OP_ATTR_ENUM_WITH_VALUE(format, Format, 0) +OP_ATTR_ENUM(method, ResizeMethod) +OP_ATTR(new_height, long) +OP_ATTR(new_width, long) +OP_ATTR_WITH_VALUE(preserve_aspect_ratio, bool, false) +OP_ATTR_ENUM(coordinate_transform_mode, CoordinateTransformMode) +OP_ATTR(cubic_coeff, float) +OP_ATTR(exclude_outside, long) +OP_ATTR(extrapolation_value, float) +OP_ATTR_ENUM(nearest_mode, NearestMode) +OP_SCHEMA_DEF_END(Resize) + +OP_SCHEMA_DEF(ReverseSequence) +OP_ATTR(seq_dim, long) +OP_ATTR(batch_dim, long) +OP_SCHEMA_DEF_END(ReverseSequence) + +OP_SCHEMA_DEF(ReverseV2) +OP_ATTR(axis, [long]) +OP_SCHEMA_DEF_END(ReverseV2) + +OP_SCHEMA_DEF(Rfft) +OP_ATTR(fft_length, long) +OP_SCHEMA_DEF_END(Rfft) + +OP_SCHEMA_DEF(ROIPooling) +OP_ATTR(pooled_h, long) +OP_ATTR(pooled_w, long) +OP_ATTR(scale, float) +OP_SCHEMA_DEF_END(ROIPooling) + +OP_SCHEMA_DEF(Round) +OP_SCHEMA_DEF_END(Round) + +OP_SCHEMA_DEF(Rsqrt) +OP_SCHEMA_DEF_END(Rsqrt) + +OP_SCHEMA_DEF(QuantDTypeCast) +OP_ATTR(src_t, long) +OP_ATTR(dst_t, long) +OP_SCHEMA_DEF_END(QuantDTypeCast) + +OP_SCHEMA_DEF(ScaleFusion) +OP_ATTR(axis, long) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_SCHEMA_DEF_END(ScaleFusion) + +OP_SCHEMA_DEF(ScatterNd) +OP_SCHEMA_DEF_END(ScatterNd) + +OP_SCHEMA_DEF(SGD) +OP_ATTR(nesterov, bool) +OP_ATTR(dampening, float) +OP_ATTR(weight_decay, float) +OP_SCHEMA_DEF_END(SGD) + +OP_SCHEMA_DEF(Shape) +OP_SCHEMA_DEF_END(Shape) + +OP_SCHEMA_DEF(SigmoidCrossEntropyWithLogits) +OP_SCHEMA_DEF_END(SigmoidCrossEntropyWithLogits) + +OP_SCHEMA_DEF(SigmoidCrossEntropyWithLogitsGrad) +OP_SCHEMA_DEF_END(SigmoidCrossEntropyWithLogitsGrad) + +OP_SCHEMA_DEF(Sin) +OP_SCHEMA_DEF_END(Sin) + +OP_SCHEMA_DEF(SkipGram) +OP_ATTR(include_all_grams, bool) +OP_ATTR(max_skip_size, long) +OP_ATTR(ngram_size, long) +OP_SCHEMA_DEF_END(SkipGram) + +OP_SCHEMA_DEF(SliceFusion) +OP_ATTR(axes, [long]) +OP_SCHEMA_DEF_END(SliceFusion) + +OP_SCHEMA_DEF(SmoothL1Loss) +OP_ATTR(beta, float) +OP_SCHEMA_DEF_END(SmoothL1Loss) + +OP_SCHEMA_DEF(SmoothL1LossGrad) +OP_ATTR(beta, float) +OP_SCHEMA_DEF_END(SmoothL1LossGrad) + +OP_SCHEMA_DEF(Softmax) +OP_ATTR(axis, [long]) +OP_SCHEMA_DEF_END(Softmax) + +OP_SCHEMA_DEF(SoftmaxCrossEntropyWithLogits) +OP_SCHEMA_DEF_END(SoftmaxCrossEntropyWithLogits) + +OP_SCHEMA_DEF(SpaceToBatch) +OP_ATTR(block_size, [long]) +OP_ATTR_VEC2D(paddings, Vec2D); +OP_SCHEMA_DEF_END(SpaceToBatch) + +OP_SCHEMA_DEF(SpaceToBatchND) +OP_ATTR(block_shape, [long]) +OP_ATTR_VEC2D(paddings, Vec2D); +OP_SCHEMA_DEF_END(SpaceToBatchND) + +OP_SCHEMA_DEF(SpaceToDepth) +OP_ATTR(block_size, long) +OP_ATTR_ENUM(format, Format) +OP_SCHEMA_DEF_END(SpaceToDepth) + +OP_SCHEMA_DEF(SparseSoftmaxCrossEntropy) +OP_ATTR(grad, bool) +OP_SCHEMA_DEF_END(SparseSoftmaxCrossEntropy) + +OP_SCHEMA_DEF(SparseToDense) +OP_SCHEMA_DEF_END(SparseToDense) + +OP_SCHEMA_DEF(Split) +OP_ATTR(output_num, long) +OP_ATTR(size_splits, [long]) +OP_ATTR(axis, long) +OP_SCHEMA_DEF_END(Split) + +OP_SCHEMA_DEF(Sqrt) +OP_SCHEMA_DEF_END(Sqrt) + +OP_SCHEMA_DEF(Squeeze) +OP_ATTR(axis, [long]) +OP_SCHEMA_DEF_END(Squeeze) + +OP_SCHEMA_DEF(Square) +OP_SCHEMA_DEF_END(Square) + +OP_SCHEMA_DEF(SquaredDifference) +OP_SCHEMA_DEF_END(SquaredDifference) + +OP_SCHEMA_DEF(Stack) +OP_ATTR(axis, long) +OP_SCHEMA_DEF_END(Stack) + +OP_SCHEMA_DEF(StridedSlice) +OP_ATTR(begin_mask, long) +OP_ATTR(end_mask, long) +OP_ATTR(ellipsis_mask, long) +OP_ATTR(new_axis_mask, long) +OP_ATTR(shrink_axis_mask, long) +OP_SCHEMA_DEF_END(StridedSlice) + +OP_SCHEMA_DEF(SubFusion) +OP_ATTR_ENUM_WITH_VALUE(activation_type, ActivationType, 0) +OP_SCHEMA_DEF_END(SubFusion) + +OP_SCHEMA_DEF(SubGrad) +OP_SCHEMA_DEF_END(SubGrad) + +OP_SCHEMA_DEF(Switch) +OP_SCHEMA_DEF_END(Switch) + +OP_SCHEMA_DEF(TensorListFromTensor) +OP_ATTR(element_dtype, long) +OP_ATTR(shape_type, long) +OP_SCHEMA_DEF_END(TensorListFromTensor) + +OP_SCHEMA_DEF(TensorListGetItem) +OP_ATTR(element_dtype, long) +OP_SCHEMA_DEF_END(TensorListGetItem) + +OP_SCHEMA_DEF(TensorListReserve) +OP_ATTR(element_dtype, long) +OP_ATTR(shape_type, long) +OP_SCHEMA_DEF_END(TensorListReserve) + +OP_SCHEMA_DEF(TensorListSetItem) +OP_ATTR(element_dtype, long) +OP_SCHEMA_DEF_END(TensorListSetItem) + +OP_SCHEMA_DEF(TensorListStack) +OP_ATTR(num_elements, long) +OP_ATTR(element_dtype, long) +OP_SCHEMA_DEF_END(TensorListStack) + +OP_SCHEMA_DEF(TileFusion) +OP_ATTR(dims, [long]) +OP_SCHEMA_DEF_END(TileFusion) + +OP_SCHEMA_DEF(TopKFusion) +OP_ATTR_WITH_VALUE(sorted, bool, true) +OP_ATTR(axis, long) +OP_ATTR(largest, long) +OP_SCHEMA_DEF_END(TopKFusion) + +OP_SCHEMA_DEF(Transpose) +OP_SCHEMA_DEF_END(Transpose) + +OP_SCHEMA_DEF(Unique) +OP_SCHEMA_DEF_END(Unique) + +OP_SCHEMA_DEF(UnsortedSegmentSum) +OP_SCHEMA_DEF_END(UnsortedSegmentSum) + +OP_SCHEMA_DEF(Unsqueeze) +OP_ATTR(axis, [long]) +OP_SCHEMA_DEF_END(Unsqueeze) + +OP_SCHEMA_DEF(Unstack) +OP_ATTR_WITH_VALUE(axis, long, 0) +OP_SCHEMA_DEF_END(Unstack) + +OP_SCHEMA_DEF(While) +OP_ATTR(cond_subgraph_index, long) +OP_ATTR(body_subgraph_index, long) +OP_SCHEMA_DEF_END(While) + +OP_SCHEMA_DEF(Where) +OP_SCHEMA_DEF_END(Where) + +OP_SCHEMA_DEF(ZerosLike) +OP_SCHEMA_DEF_END(ZerosLike) + +OP_SCHEMA_DEF(Select) +OP_SCHEMA_DEF_END(Select) + +OP_SCHEMA_DEF(If) +OP_SCHEMA_DEF_END(If) + +OP_SCHEMA_DEF(GRU) +OP_ATTR_WITH_VALUE(bidirectional, bool, false) +OP_SCHEMA_DEF_END(GRU) + +OP_SCHEMA_DEF(NonZero) +OP_SCHEMA_DEF_END(NonZero) + +OP_SCHEMA_DEF(InvertPermutation) +OP_SCHEMA_DEF_END(InvertPermutation) + +OP_SCHEMA_DEF(Size) +OP_SCHEMA_DEF_END(Size) + +OP_SCHEMA_DEF(RandomStandardNormal) +OP_ATTR(seed, long) +OP_ATTR(seed2, long) +OP_SCHEMA_DEF_END(RandomStandardNormal) + +OP_SCHEMA_DEF(CropAndResize) +OP_ATTR_ENUM(method, ResizeMethod) +OP_ATTR(extrapolation_value, float) +OP_SCHEMA_DEF_END(CropAndResize) + +OP_SCHEMA_DEF(Erf) +OP_SCHEMA_DEF_END(Erf) + +OP_SCHEMA_DEF(StridedSliceGrad) +OP_ATTR(begin_mask, long) +OP_ATTR(end_mask, long) +OP_ATTR(ellipsis_mask, long) +OP_ATTR(new_axis_mask, long) +OP_ATTR(shrink_axis_mask, long) +OP_SCHEMA_DEF_END(StridedSliceGrad) + +OP_SCHEMA_DEF(IsFinite) +OP_SCHEMA_DEF_END(IsFinite) + +OP_SCHEMA_DEF(LinSpace) +OP_SCHEMA_DEF_END(LinSpace) + +OP_SCHEMA_DEF(UniformReal) +OP_ATTR(seed, long) +OP_ATTR(seed2, long) +OP_SCHEMA_DEF_END(UniformReal) + +OP_SCHEMA_DEF(AbsGrad) +OP_SCHEMA_DEF_END(AbsGrad) diff --git a/mindspore/lite/src/ops/ops_def.h b/mindspore/lite/src/ops/ops_def.h new file mode 100644 index 0000000000..9f7c1f875b --- /dev/null +++ b/mindspore/lite/src/ops/ops_def.h @@ -0,0 +1,157 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_OPS_OPS_DEF_H_ +#define MINDSPORE_LITE_SRC_OPS_OPS_DEF_H_ +#include <string> +#include <map> +#include <memory> +#include <utility> +#include "src/ops/ops_func_declare.h" +#include "src/ops/schema_register.h" + +#ifdef PRIMITIVE_WRITEABLE +#include "mindspore/core/utils/check_convert_utils.h" +#include "schema/inner/model_generated.h" +#include "schema/inner/ops_types_generated.h" +#endif + +#ifdef GEN_SCHEMA_DEF +#define OP_TYPE_DEF_BEGIN(type) \ + namespace mindspore::lite::ops { \ + std::string Gen##type() { \ + std::string prims_type = "union "; \ + prims_type.append(#type).append(" {\n"); + +#define OP_TYPE(OP) prims_type.append(" ").append(#OP).append(",\n"); + +#define OP_TYPE_DEF_END(type) \ + prims_type.append("}\n\n"); \ + return prims_type; \ + } \ + PrimitiveTypeRegister g_gen##type(Gen##type); \ + } // namespace mindspore::lite::ops +#else +#define OP_TYPE_DEF_BEGIN(type) +#define OP_TYPE(OP) +#define OP_TYPE_DEF_END(type) +#endif + +#ifdef GEN_SCHEMA_DEF +#define OP_SCHEMA_DEF(OP) \ + namespace mindspore::lite::ops { \ + std::string Gen##OP##Def() { \ + std::string op_def = "table "; \ + op_def.append(#OP); \ + op_def.append(" {\n"); + +#elif PRIMITIVE_WRITEABLE +#define OP_SCHEMA_DEF(OP) \ + namespace mindspore::lite::ops { \ + mindspore::schema::PrimitiveT *MSOp2SchemaOp(const mindspore::ops::OP *op) { \ + mindspore::schema::OP##T *schema_op = new (std::nothrow) mindspore::schema::OP##T(); +#else +#define OP_SCHEMA_DEF(OP) +#endif + +#ifdef GEN_SCHEMA_DEF +#define OP_ATTR(key, type) op_def.append(" ").append(#key).append(": ").append(#type).append(";\n"); +#define OP_ATTR_ENUM(key, type) op_def.append(" ").append(#key).append(": ").append(#type).append(";\n"); +#define OP_ATTR_VEC2D(key, type) op_def.append(" ").append(#key).append(": ").append(#type).append(";\n"); +#elif PRIMITIVE_WRITEABLE +#define OP_ATTR(key, type) \ + if (schema_op != nullptr) { \ + if (op->GetAttr(#key) != nullptr) { \ + schema_op->key = op->get_##key(); \ + } \ + } else { \ + return nullptr; \ + } + +#define OP_ATTR_ENUM(key, type) \ + if (schema_op != nullptr) { \ + if (op->GetAttr(#key) != nullptr) { \ + schema_op->key = static_cast<schema::type>(op->get_##key()); \ + } \ + } + +#define OP_ATTR_VEC2D(key, type) \ + if (schema_op != nullptr) { \ + auto vec2d = std::make_unique<schema::Vec2DT>(); \ + if (op->GetAttr(#key) != nullptr) { \ + auto data = op->get_##key(); \ + for (size_t i = 0; i < data.size(); ++i) { \ + auto vec = std::make_unique<schema::VecT>(); \ + vec->data.assign(data.at(i).begin(), data.at(i).end()); \ + vec2d->data.push_back(std::move(vec)); \ + } \ + schema_op->key = std::move(vec2d); \ + } \ + } + +#else +#define OP_ATTR(key, type) +#define OP_ATTR_ENUM(key, type) +#define OP_ATTR_VEC2D(key, type) +#endif + +#ifdef GEN_SCHEMA_DEF +#define OP_ATTR_WITH_VALUE(key, type, value) \ + op_def.append(" ").append(#key).append(": ").append(#type).append(" = ").append(#value).append(";\n"); +#define OP_ATTR_ENUM_WITH_VALUE(key, type, value) \ + op_def.append(" ").append(#key).append(": ").append(#type).append(" = ").append(#value).append(";\n"); +#elif PRIMITIVE_WRITEABLE +#define OP_ATTR_WITH_VALUE(key, type, value) \ + if (schema_op != nullptr) { \ + if (op->GetAttr(#key) != nullptr) { \ + schema_op->key = op->get_##key(); \ + } \ + } else { \ + return nullptr; \ + } + +#define OP_ATTR_ENUM_WITH_VALUE(key, type, value) \ + if (schema_op != nullptr) { \ + if (op->GetAttr(#key) != nullptr) { \ + schema_op->key = static_cast<schema::type>(op->get_##key()); \ + } \ + } +#else +#define OP_ATTR_WITH_VALUE(key, type, value) +#define OP_ATTR_ENUM_WITH_VALUE(key, type, value) +#endif + +#ifdef GEN_SCHEMA_DEF +#define OP_SCHEMA_DEF_END(OP) \ + op_def.append("}\n\n"); \ + return op_def; \ + } \ + SchemaOpRegister g_schema_op_##OP(Gen##OP##Def); \ + } // namespace mindspore::lite::ops +#elif PRIMITIVE_WRITEABLE +#define OP_SCHEMA_DEF_END(OP) \ + schema::PrimitiveT *prim = new (std::nothrow) schema::PrimitiveT(); \ + if (prim == nullptr) { \ + return nullptr; \ + } \ + prim->value.value = schema_op; \ + prim->value.type = schema::PrimitiveType_##OP; \ + return prim; \ + } \ + } // namespace mindspore::lite::ops +#else +#define OP_SCHEMA_DEF_END(OP) +#endif +#endif // MINDSPORE_LITE_SRC_OPS_OPS_DEF_H_ diff --git a/mindspore/lite/src/ops/ops_func_declare.h b/mindspore/lite/src/ops/ops_func_declare.h new file mode 100644 index 0000000000..d0525d3744 --- /dev/null +++ b/mindspore/lite/src/ops/ops_func_declare.h @@ -0,0 +1,453 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_OPS_OPS_FUNC_DECLARE_H_ +#define MINDSPORE_LITE_SRC_OPS_OPS_FUNC_DECLARE_H_ + +#ifdef PRIMITIVE_WRITEABLE +#include "schema/inner/model_generated.h" +#include "ops/abs.h" +#include "ops/adam.h" +#include "ops/add.h" +#include "ops/adder.h" +#include "ops/addn.h" +#include "ops/all.h" +#include "ops/apply_momentum.h" +#include "ops/arg_max.h" +#include "ops/arg_min.h" +#include "ops/asin.h" +#include "ops/assert.h" +#include "ops/assign.h" +#include "ops/assign_add.h" +#include "ops/atan.h" +#include "ops/audio_spectrogram.h" +#include "ops/avg_pool.h" +#include "ops/batch_norm.h" +#include "ops/batch_to_space.h" +#include "ops/batch_to_space_nd.h" +#include "ops/bias_add.h" +#include "ops/binary_cross_entropy.h" +#include "ops/black_box.h" +#include "ops/broadcast_to.h" +#include "ops/broadcast.h" +#include "ops/cast.h" +#include "ops/ceil.h" +#include "ops/clip.h" +#include "ops/custom.h" +#include "ops/custom_normalize.h" +#include "ops/custom_predict.h" +#include "ops/custom_extract_features.h" +#include "ops/concat.h" +#include "ops/constant.h" +#include "ops/constant_of_shape.h" +#include "ops/control_depend.h" +#include "ops/cos.h" +#include "ops/crop.h" +#include "ops/depth_to_space.h" +#include "ops/depend.h" +#include "ops/detection_post_process.h" +#include "ops/div.h" +#include "ops/dropout.h" +#include "ops/eltwise.h" +#include "ops/elu.h" +#include "ops/embedding_lookup.h" +#include "ops/equal.h" +#include "ops/expand_dims.h" +#include "ops/exp.h" +#include "ops/fake_quant_with_min_max_vars.h" +#include "ops/fake_quant_with_min_max_vars_per_channel.h" +#include "ops/fft_imag.h" +#include "ops/fft_real.h" +#include "ops/fill.h" +#include "ops/flatten.h" +#include "ops/floor.h" +#include "ops/floor_div.h" +#include "ops/floor_mod.h" +#include "ops/fused_batch_norm.h" +#include "ops/gather.h" +#include "ops/gather_nd.h" +#include "ops/greater_equal.h" +#include "ops/greater.h" +#include "ops/hashtable_lookup.h" +#include "ops/instance_norm.h" +#include "ops/l2_normalize.h" +#include "ops/layer_norm.h" +#include "ops/leaky_relu.h" +#include "ops/less.h" +#include "ops/less_equal.h" +#include "ops/log.h" +#include "ops/logical_and.h" +#include "ops/logical_not.h" +#include "ops/logical_or.h" +#include "ops/logical_xor.h" +#include "ops/loop.h" +#include "ops/lp_normalization.h" +#include "ops/lrn.h" +#include "ops/lsh_projection.h" +#include "ops/lstm.h" +#include "ops/make_tuple.h" +#include "ops/mat_mul.h" +#include "ops/matrix_diag.h" +#include "ops/max_pool.h" +#include "ops/maximum.h" +#include "ops/merge.h" +#include "ops/mfcc.h" +#include "ops/minimum.h" +#include "ops/mod.h" +#include "ops/mul.h" +#include "ops/neg.h" +#include "ops/net_output.h" +#include "ops/non_max_suppression.h" +#include "ops/not_equal.h" +#include "ops/one_hot.h" +#include "ops/ones_like.h" +#include "ops/pad.h" +#include "ops/permute.h" +#include "ops/prelu.h" +#include "ops/prior_box.h" +#include "ops/proposal.h" +#include "ops/quant_dtype_cast.h" +#include "ops/range.h" +#include "ops/rank.h" +#include "ops/real_div.h" +#include "ops/reciprocal.h" +#include "ops/reduce.h" +#include "ops/relu6.h" +#include "ops/reshape.h" +#include "ops/resize.h" +#include "ops/return.h" +#include "ops/reverse_sequence.h" +#include "ops/reverse_v2.h" +#include "ops/rfft.h" +#include "ops/roi_pooling.h" +#include "ops/round.h" +#include "ops/rsqrt.h" +#include "ops/scale.h" +#include "ops/scatter_nd.h" +#include "ops/select.h" +#include "ops/sgd.h" +#include "ops/shape.h" +#include "ops/sigmoid.h" +#include "ops/sigmoid_cross_entropy_with_logits.h" +#include "ops/sin.h" +#include "ops/skip_gram.h" +#include "ops/smooth_l1_loss.h" +#include "ops/softmax.h" +#include "ops/softmax_cross_entropy_with_logits.h" +#include "ops/space_to_batch.h" +#include "ops/space_to_batch_nd.h" +#include "ops/space_to_depth.h" +#include "ops/sparse_softmax_cross_entropy.h" +#include "ops/sparse_to_dense.h" +#include "ops/split.h" +#include "ops/square.h" +#include "ops/squeeze.h" +#include "ops/sqrt.h" +#include "ops/squared_difference.h" +#include "ops/stack.h" +#include "ops/strided_slice.h" +#include "ops/sub.h" +#include "ops/switch.h" +#include "ops/tan.h" +#include "ops/tanh.h" +#include "ops/tensor_list_from_tensor.h" +#include "ops/tensor_list_get_item.h" +#include "ops/tensor_list_reserve.h" +#include "ops/tensor_list_set_item.h" +#include "ops/tensor_list_stack.h" +#include "ops/tile.h" +#include "ops/transpose.h" +#include "ops/tuple_get_item.h" +#include "ops/unique.h" +#include "ops/unstack.h" +#include "ops/unsqueeze.h" +#include "ops/unsorted_segment_sum.h" +#include "ops/where.h" +#include "ops/while.h" +#include "ops/zeros_like.h" +#include "ops/grad/activation_grad.h" +#include "ops/grad/add_grad.h" +#include "ops/grad/avg_pool_grad.h" +#include "ops/grad/bias_add_grad.h" +#include "ops/grad/batch_norm_grad.h" +#include "ops/grad/binary_cross_entropy_grad.h" +#include "ops/grad/de_conv2d_grad_filter.h" +#include "ops/grad/div_grad.h" +#include "ops/grad/dropout_grad.h" +#include "ops/grad/flatten_grad.h" +#include "ops/grad/group_conv2d_grad_input.h" +#include "ops/grad/log_grad.h" +#include "ops/grad/max_pool_grad.h" +#include "ops/grad/maximum_grad.h" +#include "ops/grad/minimum_grad.h" +#include "ops/grad/mul_grad.h" +#include "ops/grad/neg_grad.h" +#include "ops/grad/pooling_grad.h" +#include "ops/grad/power_grad.h" +#include "ops/grad/sigmoid_cross_entropy_with_logits_grad.h" +#include "ops/grad/smooth_l1_loss_grad.h" +#include "ops/grad/sub_grad.h" +#include "ops/fusion/activation.h" +#include "ops/fusion/add_fusion.h" +#include "ops/fusion/adder_fusion.h" +#include "ops/fusion/arg_max_fusion.h" +#include "ops/fusion/arg_min_fusion.h" +#include "ops/fusion/avg_pool_fusion.h" +#include "ops/fusion/conv2d_backprop_filter_fusion.h" +#include "ops/fusion/conv2d_backprop_input_fusion.h" +#include "ops/fusion/conv2d_fusion.h" +#include "ops/fusion/conv2d_transpose_fusion.h" +#include "ops/fusion/div_fusion.h" +#include "ops/fusion/embedding_lookup_fusion.h" +#include "ops/fusion/exp_fusion.h" +#include "ops/fusion/full_connection.h" +#include "ops/fusion/l2_normalize_fusion.h" +#include "ops/fusion/layer_norm_fusion.h" +#include "ops/fusion/max_pool_fusion.h" +#include "ops/fusion/mul_fusion.h" +#include "ops/fusion/pad_fusion.h" +#include "ops/fusion/partial_fusion.h" +#include "ops/fusion/pow_fusion.h" +#include "ops/fusion/prelu_fusion.h" +#include "ops/fusion/reduce_fusion.h" +#include "ops/fusion/scale_fusion.h" +#include "ops/fusion/slice_fusion.h" +#include "ops/fusion/sub_fusion.h" +#include "ops/fusion/tile_fusion.h" +#include "ops/fusion/topk_fusion.h" +#include "ops/if.h" +#include "ops/gru.h" +#include "ops/non_zero.h" +#include "ops/invert_permutation.h" +#include "ops/size.h" +#include "ops/random_standard_normal.h" +#include "ops/crop_and_resize.h" +#include "ops/erf.h" +#include "ops/grad/strided_slice_grad.h" +#include "ops/is_finite.h" +#include "ops/lin_space.h" +#include "ops/uniform_real.h" +#include "ops/grad/abs_grad.h" + +#define FUNC_MSOP2SCHEMAOP_DECLARE(OP) \ + namespace mindspore::lite::ops { \ + mindspore::schema::PrimitiveT *MSOp2SchemaOp(const mindspore::ops::OP *op); \ + } +#else +#define FUNC_MSOP2SCHEMAOP_DECLARE(OP) +#endif + +#ifdef PRIMITIVE_WRITEABLE +FUNC_MSOP2SCHEMAOP_DECLARE(Abs); +FUNC_MSOP2SCHEMAOP_DECLARE(Activation); +FUNC_MSOP2SCHEMAOP_DECLARE(ActivationGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(Adam); +FUNC_MSOP2SCHEMAOP_DECLARE(AddFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(AdderFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(AddGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(AddN); +FUNC_MSOP2SCHEMAOP_DECLARE(All); +FUNC_MSOP2SCHEMAOP_DECLARE(ApplyMomentum); +FUNC_MSOP2SCHEMAOP_DECLARE(ArgMax); +FUNC_MSOP2SCHEMAOP_DECLARE(ArgMaxFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(ArgMin); +FUNC_MSOP2SCHEMAOP_DECLARE(ArgMinFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(Asin); +FUNC_MSOP2SCHEMAOP_DECLARE(Assert); +FUNC_MSOP2SCHEMAOP_DECLARE(Assign); +FUNC_MSOP2SCHEMAOP_DECLARE(AssignAdd); +FUNC_MSOP2SCHEMAOP_DECLARE(Atan); +FUNC_MSOP2SCHEMAOP_DECLARE(AudioSpectrogram); +FUNC_MSOP2SCHEMAOP_DECLARE(AvgPoolFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(AvgPoolGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(BatchNorm); +FUNC_MSOP2SCHEMAOP_DECLARE(BatchNormGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(BatchToSpace); +FUNC_MSOP2SCHEMAOP_DECLARE(BatchToSpaceND); +FUNC_MSOP2SCHEMAOP_DECLARE(BiasAdd); +FUNC_MSOP2SCHEMAOP_DECLARE(BiasAddGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(BinaryCrossEntropy); +FUNC_MSOP2SCHEMAOP_DECLARE(BinaryCrossEntropyGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(BroadcastTo); +FUNC_MSOP2SCHEMAOP_DECLARE(Cast); +FUNC_MSOP2SCHEMAOP_DECLARE(Ceil); +FUNC_MSOP2SCHEMAOP_DECLARE(Clip); +FUNC_MSOP2SCHEMAOP_DECLARE(Concat); +FUNC_MSOP2SCHEMAOP_DECLARE(ControlDepend); +FUNC_MSOP2SCHEMAOP_DECLARE(Constant); +FUNC_MSOP2SCHEMAOP_DECLARE(ConstantOfShape); +FUNC_MSOP2SCHEMAOP_DECLARE(Conv2DBackpropFilterFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(Conv2DBackpropInputFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(Conv2DFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(Conv2dTransposeFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(Cos); +FUNC_MSOP2SCHEMAOP_DECLARE(Crop); +FUNC_MSOP2SCHEMAOP_DECLARE(CustomExtractFeatures); +FUNC_MSOP2SCHEMAOP_DECLARE(CustomNormalize); +FUNC_MSOP2SCHEMAOP_DECLARE(CustomPredict); +FUNC_MSOP2SCHEMAOP_DECLARE(DeConv2DGradFilter); +FUNC_MSOP2SCHEMAOP_DECLARE(Depend); +FUNC_MSOP2SCHEMAOP_DECLARE(DepthToSpace); +FUNC_MSOP2SCHEMAOP_DECLARE(DetectionPostProcess); +FUNC_MSOP2SCHEMAOP_DECLARE(DivFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(DivGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(Dropout); +FUNC_MSOP2SCHEMAOP_DECLARE(DropoutGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(Eltwise); +FUNC_MSOP2SCHEMAOP_DECLARE(Elu); +FUNC_MSOP2SCHEMAOP_DECLARE(EmbeddingLookupFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(Equal); +FUNC_MSOP2SCHEMAOP_DECLARE(ExpFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(ExpandDims); +FUNC_MSOP2SCHEMAOP_DECLARE(FakeQuantWithMinMaxVars); +FUNC_MSOP2SCHEMAOP_DECLARE(FakeQuantWithMinMaxVarsPerChannel); +FUNC_MSOP2SCHEMAOP_DECLARE(FftImag); +FUNC_MSOP2SCHEMAOP_DECLARE(FftReal); +FUNC_MSOP2SCHEMAOP_DECLARE(Fill); +FUNC_MSOP2SCHEMAOP_DECLARE(Flatten); +FUNC_MSOP2SCHEMAOP_DECLARE(FlattenGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(Floor); +FUNC_MSOP2SCHEMAOP_DECLARE(FloorDiv); +FUNC_MSOP2SCHEMAOP_DECLARE(FloorMod); +FUNC_MSOP2SCHEMAOP_DECLARE(FullConnection); +FUNC_MSOP2SCHEMAOP_DECLARE(FusedBatchNorm); +FUNC_MSOP2SCHEMAOP_DECLARE(Gather); +FUNC_MSOP2SCHEMAOP_DECLARE(GatherNd); +FUNC_MSOP2SCHEMAOP_DECLARE(Greater); +FUNC_MSOP2SCHEMAOP_DECLARE(GreaterEqual); +FUNC_MSOP2SCHEMAOP_DECLARE(GroupConv2DGradInput); +FUNC_MSOP2SCHEMAOP_DECLARE(HashtableLookup); +FUNC_MSOP2SCHEMAOP_DECLARE(InstanceNorm); +FUNC_MSOP2SCHEMAOP_DECLARE(LayerNormFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(LeakyRelu); +FUNC_MSOP2SCHEMAOP_DECLARE(Less); +FUNC_MSOP2SCHEMAOP_DECLARE(LessEqual); +FUNC_MSOP2SCHEMAOP_DECLARE(Log); +FUNC_MSOP2SCHEMAOP_DECLARE(LogGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(LogicalAnd); +FUNC_MSOP2SCHEMAOP_DECLARE(LogicalNot); +FUNC_MSOP2SCHEMAOP_DECLARE(LogicalOr); +FUNC_MSOP2SCHEMAOP_DECLARE(LogicalXor); +FUNC_MSOP2SCHEMAOP_DECLARE(LpNormalization); +FUNC_MSOP2SCHEMAOP_DECLARE(LRN); +FUNC_MSOP2SCHEMAOP_DECLARE(LshProjection); +FUNC_MSOP2SCHEMAOP_DECLARE(LSTM); +FUNC_MSOP2SCHEMAOP_DECLARE(L2NormalizeFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(MakeTuple); +FUNC_MSOP2SCHEMAOP_DECLARE(MatMul); +FUNC_MSOP2SCHEMAOP_DECLARE(Maximum); +FUNC_MSOP2SCHEMAOP_DECLARE(MaximumGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(MaxPoolFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(MaxPoolGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(Merge); +FUNC_MSOP2SCHEMAOP_DECLARE(Mfcc); +FUNC_MSOP2SCHEMAOP_DECLARE(Minimum); +FUNC_MSOP2SCHEMAOP_DECLARE(MinimumGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(Mod); +FUNC_MSOP2SCHEMAOP_DECLARE(MulFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(MulGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(Neg); +FUNC_MSOP2SCHEMAOP_DECLARE(NegGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(NotEqual); +FUNC_MSOP2SCHEMAOP_DECLARE(NonMaxSuppression); +FUNC_MSOP2SCHEMAOP_DECLARE(OneHot); +FUNC_MSOP2SCHEMAOP_DECLARE(OnesLike); +FUNC_MSOP2SCHEMAOP_DECLARE(PadFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(PartialFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(PowFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(PowerGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(PReLUFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(PriorBox); +FUNC_MSOP2SCHEMAOP_DECLARE(Proposal); +FUNC_MSOP2SCHEMAOP_DECLARE(Rank); +FUNC_MSOP2SCHEMAOP_DECLARE(Range); +FUNC_MSOP2SCHEMAOP_DECLARE(Rank); +FUNC_MSOP2SCHEMAOP_DECLARE(RealDiv); +FUNC_MSOP2SCHEMAOP_DECLARE(Reciprocal); +FUNC_MSOP2SCHEMAOP_DECLARE(ReduceFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(Reshape); +FUNC_MSOP2SCHEMAOP_DECLARE(Resize); +FUNC_MSOP2SCHEMAOP_DECLARE(Return); +FUNC_MSOP2SCHEMAOP_DECLARE(ReverseSequence); +FUNC_MSOP2SCHEMAOP_DECLARE(ReverseV2); +FUNC_MSOP2SCHEMAOP_DECLARE(Rfft); +FUNC_MSOP2SCHEMAOP_DECLARE(ROIPooling); +FUNC_MSOP2SCHEMAOP_DECLARE(Round); +FUNC_MSOP2SCHEMAOP_DECLARE(Rsqrt); +FUNC_MSOP2SCHEMAOP_DECLARE(QuantDTypeCast); +FUNC_MSOP2SCHEMAOP_DECLARE(Scale); +FUNC_MSOP2SCHEMAOP_DECLARE(ScaleFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(ScatterNd); +FUNC_MSOP2SCHEMAOP_DECLARE(Select); +FUNC_MSOP2SCHEMAOP_DECLARE(SGD); +FUNC_MSOP2SCHEMAOP_DECLARE(Shape); +FUNC_MSOP2SCHEMAOP_DECLARE(SigmoidCrossEntropyWithLogits); +FUNC_MSOP2SCHEMAOP_DECLARE(SigmoidCrossEntropyWithLogitsGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(Sin); +FUNC_MSOP2SCHEMAOP_DECLARE(SkipGram); +FUNC_MSOP2SCHEMAOP_DECLARE(SliceFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(SmoothL1Loss); +FUNC_MSOP2SCHEMAOP_DECLARE(SmoothL1LossGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(Softmax); +FUNC_MSOP2SCHEMAOP_DECLARE(SoftmaxCrossEntropyWithLogits); +FUNC_MSOP2SCHEMAOP_DECLARE(SpaceToBatch); +FUNC_MSOP2SCHEMAOP_DECLARE(SpaceToBatchND); +FUNC_MSOP2SCHEMAOP_DECLARE(SpaceToDepth); +FUNC_MSOP2SCHEMAOP_DECLARE(SparseSoftmaxCrossEntropy); +FUNC_MSOP2SCHEMAOP_DECLARE(SparseToDense); +FUNC_MSOP2SCHEMAOP_DECLARE(Split); +FUNC_MSOP2SCHEMAOP_DECLARE(Sqrt); +FUNC_MSOP2SCHEMAOP_DECLARE(Square); +FUNC_MSOP2SCHEMAOP_DECLARE(SquaredDifference); +FUNC_MSOP2SCHEMAOP_DECLARE(Squeeze); +FUNC_MSOP2SCHEMAOP_DECLARE(Stack); +FUNC_MSOP2SCHEMAOP_DECLARE(StridedSlice); +FUNC_MSOP2SCHEMAOP_DECLARE(Sub); +FUNC_MSOP2SCHEMAOP_DECLARE(SubFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(SubGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(Switch); +FUNC_MSOP2SCHEMAOP_DECLARE(Tan); +FUNC_MSOP2SCHEMAOP_DECLARE(TensorListFromTensor); +FUNC_MSOP2SCHEMAOP_DECLARE(TensorListGetItem); +FUNC_MSOP2SCHEMAOP_DECLARE(TensorListReserve); +FUNC_MSOP2SCHEMAOP_DECLARE(TensorListSetItem); +FUNC_MSOP2SCHEMAOP_DECLARE(TensorListStack); +FUNC_MSOP2SCHEMAOP_DECLARE(TileFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(TopKFusion); +FUNC_MSOP2SCHEMAOP_DECLARE(Transpose); +FUNC_MSOP2SCHEMAOP_DECLARE(TupleGetItem); +FUNC_MSOP2SCHEMAOP_DECLARE(Unique); +FUNC_MSOP2SCHEMAOP_DECLARE(UnsortedSegmentSum); +FUNC_MSOP2SCHEMAOP_DECLARE(Unsqueeze); +FUNC_MSOP2SCHEMAOP_DECLARE(Unstack); +FUNC_MSOP2SCHEMAOP_DECLARE(While); +FUNC_MSOP2SCHEMAOP_DECLARE(Where); +FUNC_MSOP2SCHEMAOP_DECLARE(ZerosLike); +FUNC_MSOP2SCHEMAOP_DECLARE(Select); +FUNC_MSOP2SCHEMAOP_DECLARE(If); +FUNC_MSOP2SCHEMAOP_DECLARE(GRU); +FUNC_MSOP2SCHEMAOP_DECLARE(NonZero); +FUNC_MSOP2SCHEMAOP_DECLARE(InvertPermutation); +FUNC_MSOP2SCHEMAOP_DECLARE(Size); +FUNC_MSOP2SCHEMAOP_DECLARE(RandomStandardNormal); +FUNC_MSOP2SCHEMAOP_DECLARE(CropAndResize); +FUNC_MSOP2SCHEMAOP_DECLARE(Erf); +FUNC_MSOP2SCHEMAOP_DECLARE(StridedSliceGrad); +FUNC_MSOP2SCHEMAOP_DECLARE(IsFinite); +FUNC_MSOP2SCHEMAOP_DECLARE(LinSpace); +FUNC_MSOP2SCHEMAOP_DECLARE(UniformReal); +FUNC_MSOP2SCHEMAOP_DECLARE(AbsGrad); +#endif +#endif // MINDSPORE_LITE_SRC_OPS_OPS_FUNC_DECLARE_H_ diff --git a/mindspore/lite/src/ops/ops_register.h b/mindspore/lite/src/ops/ops_register.h deleted file mode 100644 index 969f925f00..0000000000 --- a/mindspore/lite/src/ops/ops_register.h +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_OP_REGISTER_H -#define LITE_MINDSPORE_LITE_C_OPS_OP_REGISTER_H - -#include <map> -#include "src/ops/primitive_c.h" -namespace mindspore { -namespace lite { -class OpsRegistry { - public: - static OpsRegistry *GetInstance() { - static OpsRegistry registry; - return &registry; - } - - void InsertPrimitiveCMap(schema::PrimitiveType type, PrimitiveCCreator creator) { - primitive_creators[type] = creator; - } - PrimitiveCCreator GetPrimitiveCreator(schema::PrimitiveType type) { - if (primitive_creators.find(type) != primitive_creators.end()) { - return primitive_creators[type]; - } else { - MS_LOG(ERROR) << "Unsupported primitive type in Create : " << schema::EnumNamePrimitiveType(type); - return nullptr; - } - } - - protected: - std::map<schema::PrimitiveType, PrimitiveCCreator> primitive_creators; -}; - -class Registry { - public: - Registry(schema::PrimitiveType primitive_type, PrimitiveCCreator creator) { - OpsRegistry::GetInstance()->InsertPrimitiveCMap(primitive_type, creator); - } -}; - -} // namespace lite -} // namespace mindspore -#endif // LITE_MINDSPORE_LITE_C_OPS_OP_REGISTER_H diff --git a/mindspore/lite/src/ops/ops_utils.cc b/mindspore/lite/src/ops/ops_utils.cc new file mode 100644 index 0000000000..1e358e0f5a --- /dev/null +++ b/mindspore/lite/src/ops/ops_utils.cc @@ -0,0 +1,905 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <memory> +#include "src/ops/ops_utils.h" + +#ifdef PRIMITIVE_WRITEABLE +#include "mindspore/core/ir/anf.h" + +namespace mindspore { +namespace lite { +schema::PrimitiveT *GetPrimitiveT(const AnfNodePtr &node) { + auto prim = GetValueNode<std::shared_ptr<Primitive>>(node); + if (prim == nullptr) { + MS_LOG(DEBUG) << "primitive is nullptr"; + return nullptr; + } + + if (prim->name().empty()) { + MS_LOG(ERROR) << "the name of primitive is null"; + return nullptr; + } + + MS_LOG(INFO) << "export prim: " << prim->name(); + auto creator = MSOpsRegistry::GetInstance()->GetPrimitiveCreator(prim->name()); + if (creator != nullptr) { + return creator(node); + } else { + MS_LOG(ERROR) << "can not find MSOpsRegistry for op: " << prim->name(); + return nullptr; + } +} + +schema::PrimitiveT *AbsPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Abs>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ActivationPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Activation>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ActivationGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ActivationGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *AdamPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Adam>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *AdderFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::AdderFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *AddFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::AddFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *AddGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::AddGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *AddNPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::AddN>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *AllPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::All>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ApplyMomentumPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ApplyMomentum>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ArgMaxFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ArgMaxFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ArgMinFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ArgMinFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *AssertPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Assert>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *AssignPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Assign>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *AssignAddPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::AssignAdd>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *AudioSpectrogramPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::AudioSpectrogram>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *AvgPoolFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::AvgPoolFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *AvgPoolGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::AvgPoolGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *BatchNormPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::BatchNorm>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *BatchToSpacePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::BatchToSpace>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *BatchToSpaceNDPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::BatchToSpaceND>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *BiasAddPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::BiasAdd>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *BiasAddGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::BiasAddGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *BNGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::BatchNormGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *BroadcastToPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::BroadcastTo>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *CastPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Cast>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *CeilPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Ceil>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ClipPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Clip>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ConcatPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Concat>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ConstantOfShapePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ConstantOfShape>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ControlDependPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ControlDepend>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *Conv2DBackpropFilterFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Conv2DBackpropFilterFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *Conv2DBackpropInputFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Conv2DBackpropInputFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *Conv2DFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Conv2DFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *Conv2dTransposeFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Conv2dTransposeFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *CosPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Cos>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *CropPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Crop>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *CropAndResizePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::CropAndResize>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *CustomExtractFeaturesPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::CustomExtractFeatures>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *CustomNormalizePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::CustomNormalize>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *CustomPredictPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::CustomPredict>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *DependPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Depend>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *DepthToSpacePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::DepthToSpace>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *DetectionPostProcessPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::DetectionPostProcess>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *DivFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::DivFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *DivGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::DivGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *DropoutPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Dropout>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *DropoutGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::DropoutGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *EltwisePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Eltwise>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *EluPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Elu>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *EmbeddingLookupFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::EmbeddingLookupFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *EqualPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Equal>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ExpandDimsPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ExpandDims>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ExpFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ExpFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *FftImagPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::FftImag>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *FftRealPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::FftReal>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *FillPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Fill>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *FlattenPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Flatten>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *FlattenGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::FlattenGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *FloorPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Floor>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *FloorDivPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::FloorDiv>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *FloorModPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::FloorMod>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *FullConnectionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::FullConnection>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *FusedBatchNormPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::FusedBatchNorm>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *GatherPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Gather>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *GatherNdPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::GatherNd>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *GreaterPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Greater>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *GreaterEqualPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::GreaterEqual>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *HashtableLookupPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::HashtableLookup>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *InstanceNormPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::InstanceNorm>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *InvertPermutationPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::InvertPermutation>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *LayerNormFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::LayerNormFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *LeakyReluPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::LeakyRelu>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *LessPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Less>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *LessEqualPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::LessEqual>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *LogPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Log>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *LogGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::LogGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *LogicalAndPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::LogicalAnd>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *LogicalNotPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::LogicalNot>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *LogicalOrPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::LogicalOr>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *LrnPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::LRN>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *LpNormalizationPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::LpNormalization>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *LshProjectionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::LshProjection>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *LSTMPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::LSTM>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *L2NormalizeFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::L2NormalizeFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *MatMulPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::MatMul>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *MaximumPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Maximum>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *MaximumGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::MaximumGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *MaxPoolFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::MaxPoolFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *MaxPoolGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::MaxPoolGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *MergePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Merge>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *MfccPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Mfcc>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *MinimumPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Minimum>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *MinimumGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::MinimumGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ModPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Mod>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *MulFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::MulFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *MulGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::MulGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *NegPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Neg>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *NegGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::NegGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *NotEqualPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::NotEqual>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *NonMaxSuppressionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::NonMaxSuppression>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *OneHotPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::OneHot>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *OnesLikePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::OnesLike>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *PadFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::PadFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *PartialFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::PartialFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *PowerGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::PowerGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *PowFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::PowFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *PReLUFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::PReLUFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *QuantDTypeCastPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::QuantDTypeCast>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *RangePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Range>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *RankPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Rank>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *RealDivPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::RealDiv>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ReciprocalPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Reciprocal>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ReduceFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ReduceFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ReshapePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Reshape>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ResizePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Resize>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ReverseV2PrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ReverseV2>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ReverseSequencePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ReverseSequence>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *RfftPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Rfft>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ROIPoolingPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ROIPooling>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *RoundPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Round>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *RsqrtPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Rsqrt>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ScaleFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ScaleFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ScatterNdPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ScatterNd>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SelectPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Select>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SGDPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SGD>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ShapePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Shape>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SigmoidCrossEntropyWithLogitsPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SigmoidCrossEntropyWithLogits>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SigmoidCrossEntropyWithLogitsGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SigmoidCrossEntropyWithLogitsGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SinPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Sin>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SizePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Size>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SkipGramPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SkipGram>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SliceFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SliceFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SmoothL1LossPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SmoothL1Loss>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SmoothL1LossGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SmoothL1LossGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SoftmaxPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Softmax>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SoftmaxCrossEntropyWithLogitsPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SoftmaxCrossEntropyWithLogits>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SpaceToBatchPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SpaceToBatch>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SpaceToBatchNDPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SpaceToBatchND>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SpaceToDepthPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SpaceToDepth>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SparseToDensePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SparseToDense>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SplitPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Split>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SqrtPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Sqrt>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SquarePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Square>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SquaredDifferencePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SquaredDifference>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SqueezePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Squeeze>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *StackPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Stack>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *StridedSlicePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::StridedSlice>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SubFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SubFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SubGradPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::SubGrad>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *SwitchPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Switch>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *TensorListFromTensorPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::TensorListFromTensor>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *TensorListGetItemPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::TensorListGetItem>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *TensorListReservePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::TensorListReserve>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *TensorListSetItemPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::TensorListSetItem>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *TensorListStackPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::TensorListStack>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *TileFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::TileFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *TopKFusionPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::TopKFusion>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *TransposePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Transpose>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *UniquePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Unique>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *UnstackPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Unstack>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *UnsortedSegmentSumPrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::UnsortedSegmentSum>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *UnsqueezePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Unsqueeze>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *WherePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::Where>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} +schema::PrimitiveT *ZerosLikePrimitiveCreator(const AnfNodePtr &node) { + auto ms_primc = GetValueNode<std::shared_ptr<mindspore::ops::ZerosLike>>(node); + return ms_primc != nullptr ? ops::MSOp2SchemaOp(ms_primc.get()) : nullptr; +} + +RegistryMSOps g_absPrimitiveCreatorRegistry("Abs", AbsPrimitiveCreator); +RegistryMSOps g_activationPrimitiveCreatorRegistry("Activation", ActivationPrimitiveCreator); +RegistryMSOps g_activationGradPrimitiveCreatorRegistry("ActivationGrad", ActivationGradPrimitiveCreator); +RegistryMSOps g_reluGradPrimitiveCreatorRegistry("ReluGrad", ActivationGradPrimitiveCreator); // ? +RegistryMSOps g_addPrimitiveCreatorRegistry("Add", AddFusionPrimitiveCreator); +RegistryMSOps g_addFusionPrimitiveCreatorRegistry("AddFusion", AddFusionPrimitiveCreator); +RegistryMSOps g_addGradPrimitiveCreatorRegistry("AddGrad", AddGradPrimitiveCreator); +RegistryMSOps g_adamPrimitiveCreatorRegistry("Adam", AdamPrimitiveCreator); +RegistryMSOps g_adderPrimitiveCreatorRegistry("Adder", AdderFusionPrimitiveCreator); +RegistryMSOps g_adderFusionPrimitiveCreatorRegistry("AdderFusion", AdderFusionPrimitiveCreator); +RegistryMSOps g_addNPrimitiveCreatorRegistry("AddN", AddNPrimitiveCreator); +RegistryMSOps g_allPrimitiveCreatorRegistry("All", AllPrimitiveCreator); +RegistryMSOps g_applyMomentumPrimitiveCreatorRegistry("ApplyMomentum", ApplyMomentumPrimitiveCreator); +RegistryMSOps g_argMaxPrimitiveCreatorRegistry("ArgMax", ArgMaxFusionPrimitiveCreator); +RegistryMSOps g_argMaxFusionPrimitiveCreatorRegistry("ArgMaxFusion", ArgMaxFusionPrimitiveCreator); +RegistryMSOps g_argMinPrimitiveCreatorRegistry("ArgMin", ArgMinFusionPrimitiveCreator); +RegistryMSOps g_argMinFusionPrimitiveCreatorRegistry("ArgMinFusion", ArgMinFusionPrimitiveCreator); +RegistryMSOps g_assertPrimitiveCreatorRegistry("Assert", AssertPrimitiveCreator); +RegistryMSOps g_assignPrimitiveCreatorRegistry("Assign", AssignPrimitiveCreator); +RegistryMSOps g_assignAddPrimitiveCreatorRegistry("AssignAdd", AssignAddPrimitiveCreator); +RegistryMSOps g_audioSpectrogramPrimitiveCreatorRegistry("AudioSpectrogram", AudioSpectrogramPrimitiveCreator); +RegistryMSOps g_avgPoolPrimitiveCreatorRegistry("AvgPool", AvgPoolFusionPrimitiveCreator); +RegistryMSOps g_avgPoolFusionPrimitiveCreatorRegistry("AvgPoolFusion", AvgPoolFusionPrimitiveCreator); +RegistryMSOps g_avgPoolGradPrimitiveCreatorRegistry("AvgPoolGrad", AvgPoolGradPrimitiveCreator); +RegistryMSOps g_batchNormPrimitiveCreatorRegistry("BatchNorm", BatchNormPrimitiveCreator); +RegistryMSOps g_batchToSpacePrimitiveCreatorRegistry("BatchToSpace", BatchToSpacePrimitiveCreator); +RegistryMSOps g_batchToSpaceNDPrimitiveCreatorRegistry("BatchToSpaceND", BatchToSpaceNDPrimitiveCreator); +RegistryMSOps g_biasAddPrimitiveCreatorRegistry("BiasAdd", BiasAddPrimitiveCreator); +RegistryMSOps g_biasGradAddPrimitiveCreatorRegistry("BiasGrad", BiasAddGradPrimitiveCreator); +RegistryMSOps g_biasAddGradAddPrimitiveCreatorRegistry("BiasAddGrad", BiasAddGradPrimitiveCreator); +RegistryMSOps g_bNGradPrimitiveCreatorRegistry("BatchNormGrad", BNGradPrimitiveCreator); +RegistryMSOps g_broadcastToPrimitiveCreatorRegistry("BroadcastTo", BroadcastToPrimitiveCreator); +RegistryMSOps g_castPrimitiveCreatorRegistry("Cast", CastPrimitiveCreator); +RegistryMSOps g_ceilPrimitiveCreatorRegistry("Ceil", CeilPrimitiveCreator); +RegistryMSOps g_clipPrimitiveCreatorRegistry("Clip", ClipPrimitiveCreator); +RegistryMSOps g_concatPrimitiveCreatorRegistry("Concat", ConcatPrimitiveCreator); +RegistryMSOps g_controlDependPrimitiveCreatorRegistry("ControlDepend", ControlDependPrimitiveCreator); +RegistryMSOps g_conv2DBackpropFilterFusionPrimitiveCreatorRegistry("Conv2DBackpropFilterFusion", + Conv2DBackpropFilterFusionPrimitiveCreator); +RegistryMSOps g_conv2DBackpropInputFusionPrimitiveCreatorRegistry("Conv2DBackpropInputFusion", + Conv2DBackpropInputFusionPrimitiveCreator); +RegistryMSOps g_conv2DPrimitiveCreatorRegistry("Conv2D", Conv2DFusionPrimitiveCreator); +RegistryMSOps g_conv2DFusionPrimitiveCreatorRegistry("Conv2DFusion", Conv2DFusionPrimitiveCreator); +RegistryMSOps g_conv2dTransposePrimitiveCreatorRegistry("Conv2dTranspose", Conv2dTransposeFusionPrimitiveCreator); +RegistryMSOps g_conv2dTransposeFusionPrimitiveCreatorRegistry("Conv2dTransposeFusion", + Conv2dTransposeFusionPrimitiveCreator); +RegistryMSOps g_constantOfShapePrimitiveCreatorRegistry("ConstantOfShape", ConstantOfShapePrimitiveCreator); +RegistryMSOps g_cosPrimitiveCreatorRegistry("Cos", CosPrimitiveCreator); +RegistryMSOps g_cropPrimitiveCreatorRegistry("Crop", CropPrimitiveCreator); +RegistryMSOps g_cropAndResizePrimitiveCreatorRegistry("CropAndResize", CropAndResizePrimitiveCreator); +RegistryMSOps g_customExtractFeaturesPrimitiveCreatorRegistry("CustomExtractFeatures", + CustomExtractFeaturesPrimitiveCreator); +RegistryMSOps g_customNormalizePrimitiveCreatorRegistry("CustomNormalize", CustomNormalizePrimitiveCreator); +RegistryMSOps g_customPredictPrimitiveCreatorRegistry("CustomPredict", CustomPredictPrimitiveCreator); +RegistryMSOps g_dependPrimitiveCreatorRegistry("Depend", DependPrimitiveCreator); +RegistryMSOps g_depthToSpacePrimitiveCreatorRegistry("DepthToSpace", DepthToSpacePrimitiveCreator); +RegistryMSOps g_detectionPostProcessPrimitiveCreatorRegistry("DetectionPostProcess", + DetectionPostProcessPrimitiveCreator); +RegistryMSOps g_divPrimitiveCreatorRegistry("Div", DivFusionPrimitiveCreator); +RegistryMSOps g_divFusionPrimitiveCreatorRegistry("DivFusion", DivFusionPrimitiveCreator); +RegistryMSOps g_divGradPrimitiveCreatorRegistry("DivGrad", DivGradPrimitiveCreator); +RegistryMSOps g_dropoutPrimitiveCreatorRegistry("Dropout", DropoutPrimitiveCreator); +RegistryMSOps g_dropoutGradPrimitiveCreatorRegistry("DropoutGrad", DropoutGradPrimitiveCreator); +RegistryMSOps g_eltwisePrimitiveCreatorRegistry("Eltwise", EltwisePrimitiveCreator); +RegistryMSOps g_eluPrimitiveCreatorRegistry("Elu", EluPrimitiveCreator); +RegistryMSOps g_equalPrimitiveCreatorRegistry("Equal", EqualPrimitiveCreator); +RegistryMSOps g_embeddingLookupFusionPrimitiveCreatorRegistry("EmbeddingLookupFusion", + EmbeddingLookupFusionPrimitiveCreator); +RegistryMSOps g_expandDimsPrimitiveCreatorRegistry("ExpandDims", ExpandDimsPrimitiveCreator); +RegistryMSOps g_expPrimitiveCreatorRegistry("Exp", ExpFusionPrimitiveCreator); +RegistryMSOps g_expFusionPrimitiveCreatorRegistry("ExpFusion", ExpFusionPrimitiveCreator); +RegistryMSOps g_fftImagPrimitiveCreatorRegistry("FftImag", FftImagPrimitiveCreator); +RegistryMSOps g_fftRealPrimitiveCreatorRegistry("FftReal", FftRealPrimitiveCreator); +RegistryMSOps g_fillPrimitiveCreatorRegistry("Fill", FillPrimitiveCreator); +RegistryMSOps g_flattenPrimitiveCreatorRegistry("Flatten", FlattenPrimitiveCreator); +RegistryMSOps g_flattenGradPrimitiveCreatorRegistry("FlattenGrad", FlattenGradPrimitiveCreator); +RegistryMSOps g_floorPrimitiveCreatorRegistry("Floor", FloorPrimitiveCreator); +RegistryMSOps g_floorDivPrimitiveCreatorRegistry("FloorDiv", FloorDivPrimitiveCreator); +RegistryMSOps g_floorModPrimitiveCreatorRegistry("FloorMod", FloorModPrimitiveCreator); +RegistryMSOps g_fullConnectionPrimitiveCreatorRegistry("FullConnection", FullConnectionPrimitiveCreator); +RegistryMSOps g_fusedBatchNormPrimitiveCreatorRegistry("FusedBatchNorm", FusedBatchNormPrimitiveCreator); +RegistryMSOps g_gatherPrimitiveCreatorRegistry("Gather", GatherPrimitiveCreator); +RegistryMSOps g_gatherNdPrimitiveCreatorRegistry("GatherNd", GatherNdPrimitiveCreator); +RegistryMSOps g_greaterPrimitiveCreatorRegistry("Greater", GreaterPrimitiveCreator); +RegistryMSOps g_greaterEqualPrimitiveCreatorRegistry("GreaterEqual", GreaterEqualPrimitiveCreator); +RegistryMSOps g_hashtableLookupPrimitiveCreatorRegistry("HashtableLookup", HashtableLookupPrimitiveCreator); +RegistryMSOps g_instanceNormPrimitiveCreatorRegistry("InstanceNorm", InstanceNormPrimitiveCreator); +RegistryMSOps g_invertPermutationPrimitiveCreatorRegistry("InvertPermutation", InvertPermutationPrimitiveCreator); +RegistryMSOps g_layerNormPrimitiveCreatorRegistry("LayerNorm", LayerNormFusionPrimitiveCreator); +RegistryMSOps g_layerNormFusionPrimitiveCreatorRegistry("LayerNormFusion", LayerNormFusionPrimitiveCreator); +RegistryMSOps g_leakyReluPrimitiveCreatorRegistry("LeakyRelu", LeakyReluPrimitiveCreator); +RegistryMSOps g_lessPrimitiveCreatorRegistry("Less", LessPrimitiveCreator); +RegistryMSOps g_lessEqualPrimitiveCreatorRegistry("LessEqual", LessEqualPrimitiveCreator); +RegistryMSOps g_logPrimitiveCreatorRegistry("Log", LogPrimitiveCreator); +RegistryMSOps g_logGradPrimitiveCreatorRegistry("LogGrad", LogGradPrimitiveCreator); +RegistryMSOps g_logicalAndPrimitiveCreatorRegistry("LogicalAnd", LogicalAndPrimitiveCreator); +RegistryMSOps g_logicalNotPrimitiveCreatorRegistry("LogicalNot", LogicalNotPrimitiveCreator); +RegistryMSOps g_logicalOrPrimitiveCreatorRegistry("LogicalOr", LogicalOrPrimitiveCreator); +RegistryMSOps g_lpNormalizationPrimitiveCreatorRegistry("LpNormalization", LpNormalizationPrimitiveCreator); +RegistryMSOps g_lrnPrimitiveCreatorRegistry("LRN", LrnPrimitiveCreator); +RegistryMSOps g_lshProjectionPrimitiveCreatorRegistry("LshProjection", LshProjectionPrimitiveCreator); +RegistryMSOps g_lSTMPrimitiveCreatorRegistry("LSTM", LSTMPrimitiveCreator); +RegistryMSOps g_l2NormalizeFusionPrimitiveCreatorRegistry("L2NormalizeFusion", L2NormalizeFusionPrimitiveCreator); +RegistryMSOps g_matMulPrimitiveCreatorRegistry("MatMul", MatMulPrimitiveCreator); +RegistryMSOps g_maximumPrimitiveCreatorRegistry("Maximum", MaximumPrimitiveCreator); +RegistryMSOps g_maximumGradPrimitiveCreatorRegistry("MaximumGrad", MaximumGradPrimitiveCreator); +RegistryMSOps g_maxPoolPrimitiveCreatorRegistry("MaxPool", MaxPoolFusionPrimitiveCreator); +RegistryMSOps g_maxPoolFusionPrimitiveCreatorRegistry("MaxPoolFusion", MaxPoolFusionPrimitiveCreator); +RegistryMSOps g_maxPoolGradPrimitiveCreatorRegistry("MaxPoolGrad", MaxPoolGradPrimitiveCreator); +RegistryMSOps g_mergePrimitiveCreatorRegistry("Merge", MergePrimitiveCreator); +RegistryMSOps g_mfccPrimitiveCreatorRegistry("Mfcc", MfccPrimitiveCreator); +RegistryMSOps g_minimumPrimitiveCreatorRegistry("Minimum", MinimumPrimitiveCreator); +RegistryMSOps g_minimumGradPrimitiveCreatorRegistry("MinimumGrad", MinimumGradPrimitiveCreator); +RegistryMSOps g_modPrimitiveCreatorRegistry("Mod", ModPrimitiveCreator); +RegistryMSOps g_mulPrimitiveCreatorRegistry("Mul", MulFusionPrimitiveCreator); +RegistryMSOps g_mulMulFusionPrimitiveCreatorRegistry("MulFusion", MulFusionPrimitiveCreator); +RegistryMSOps g_mulGradPrimitiveCreatorRegistry("MulGrad", MulGradPrimitiveCreator); +RegistryMSOps g_negPrimitiveCreatorRegistry("Neg", NegPrimitiveCreator); +RegistryMSOps g_negGradPrimitiveCreatorRegistry("NegGrad", NegGradPrimitiveCreator); +RegistryMSOps g_nonMaxSuppressionPrimitiveCreatorRegistry("NonMaxSuppression", NonMaxSuppressionPrimitiveCreator); +RegistryMSOps g_notEqualPrimitiveCreatorRegistry("NotEqual", NotEqualPrimitiveCreator); +RegistryMSOps g_oneHotPrimitiveCreatorRegistry("OneHot", OneHotPrimitiveCreator); +RegistryMSOps g_onesLikePrimitiveCreatorRegistry("OnesLike", OnesLikePrimitiveCreator); +RegistryMSOps g_padPrimitiveCreatorRegistry("Pad", PadFusionPrimitiveCreator); +RegistryMSOps g_padFusionPrimitiveCreatorRegistry("PadFusion", PadFusionPrimitiveCreator); +RegistryMSOps g_partialFusionPrimitiveCreatorRegistry("PartialFusion", PartialFusionPrimitiveCreator); +RegistryMSOps g_powerGradPrimitiveCreatorRegistry("PowerGrad", PowerGradPrimitiveCreator); +RegistryMSOps g_powFusionPrimitiveCreatorRegistry("PowFusion", PowFusionPrimitiveCreator); +RegistryMSOps g_pReLUFusionPrimitiveCreatorRegistry("PReLUFusion", PReLUFusionPrimitiveCreator); +RegistryMSOps g_rangePrimitiveCreatorRegistry("Range", RangePrimitiveCreator); +RegistryMSOps g_rankPrimitiveCreatorRegistry("Rank", RankPrimitiveCreator); +RegistryMSOps g_reciprocalPrimitiveCreatorRegistry("Reciprocal", ReciprocalPrimitiveCreator); +RegistryMSOps g_realDivPrimitiveCreatorRegistry("RealDiv", RealDivPrimitiveCreator); +RegistryMSOps g_reducePrimitiveCreatorRegistry("Reduce", ReduceFusionPrimitiveCreator); +RegistryMSOps g_reduceFusionPrimitiveCreatorRegistry("ReduceFusion", ReduceFusionPrimitiveCreator); +RegistryMSOps g_reshapePrimitiveCreatorRegistry("Reshape", ReshapePrimitiveCreator); +RegistryMSOps g_resizePrimitiveCreatorRegistry("Resize", ResizePrimitiveCreator); +RegistryMSOps g_reverseV2PrimitiveCreatorRegistry("ReverseV2", ReverseV2PrimitiveCreator); +RegistryMSOps g_reverseSequencePrimitiveCreatorRegistry("ReverseSequence", ReverseSequencePrimitiveCreator); +RegistryMSOps g_rfftPrimitiveCreatorRegistry("Rfft", RfftPrimitiveCreator); +RegistryMSOps g_rOIPoolingPrimitiveCreatorRegistry("ROIPooling", ROIPoolingPrimitiveCreator); +RegistryMSOps g_roundPrimitiveCreatorRegistry("Round", RoundPrimitiveCreator); +RegistryMSOps g_rsqrtPrimitiveCreatorRegistry("Rsqrt", RsqrtPrimitiveCreator); +RegistryMSOps g_quantDTypeCastPrimitiveCreatorRegistry("QuantDTypeCast", QuantDTypeCastPrimitiveCreator); +RegistryMSOps g_scalePrimitiveCreatorRegistry("Scale", ScaleFusionPrimitiveCreator); +RegistryMSOps g_scaleFusionPrimitiveCreatorRegistry("ScaleFusion", ScaleFusionPrimitiveCreator); +RegistryMSOps g_scatterNdPrimitiveCreatorRegistry("ScatterNd", ScatterNdPrimitiveCreator); +RegistryMSOps g_selectPrimitiveCreatorRegistry("Select", SelectPrimitiveCreator); +RegistryMSOps g_SGDPrimitiveCreatorRegistry("SGD", SGDPrimitiveCreator); +RegistryMSOps g_shapePrimitiveCreatorRegistry("Shape", ShapePrimitiveCreator); +RegistryMSOps g_sigmoidCrossEntropyWithLogitsPrimitiveCreatorRegistry("SigmoidCrossEntropyWithLogits", + SigmoidCrossEntropyWithLogitsPrimitiveCreator); +RegistryMSOps g_sigmoidCrossEntropyWithLogitsGradPrimitiveCreatorRegistry( + "SigmoidCrossEntropyWithLogitsGrad", SigmoidCrossEntropyWithLogitsGradPrimitiveCreator); +RegistryMSOps g_sinPrimitiveCreatorRegistry("Sin", SinPrimitiveCreator); +RegistryMSOps g_sizePrimitiveCreatorRegistry("Size", SizePrimitiveCreator); +RegistryMSOps g_skipGramPrimitiveCreatorRegistry("SkipGram", SkipGramPrimitiveCreator); +RegistryMSOps g_sliceFusionPrimitiveCreatorRegistry("SliceFusion", SliceFusionPrimitiveCreator); +RegistryMSOps g_smoothL1LossPrimitiveCreatorRegistry("SmoothL1Loss", SmoothL1LossPrimitiveCreator); +RegistryMSOps g_smoothL1LossGradPrimitiveCreatorRegistry("SmoothL1LossGrad", SmoothL1LossGradPrimitiveCreator); +RegistryMSOps g_softmaxPrimitiveCreatorRegistry("Softmax", SoftmaxPrimitiveCreator); +RegistryMSOps g_softmaxCrossEntropyWithLogitsPrimitiveCreatorRegistry("SoftmaxCrossEntropyWithLogits", + SoftmaxCrossEntropyWithLogitsPrimitiveCreator); +RegistryMSOps g_spaceToBatchPrimitiveCreatorRegistry("SpaceToBatch", SpaceToBatchPrimitiveCreator); +RegistryMSOps g_spaceToBatchNDPrimitiveCreatorRegistry("SpaceToBatchND", SpaceToBatchNDPrimitiveCreator); +RegistryMSOps g_spaceToDepthPrimitiveCreatorRegistry("SpaceToDepth", SpaceToDepthPrimitiveCreator); +RegistryMSOps g_sparseToDensePrimitiveCreatorRegistry("SparseToDense", SparseToDensePrimitiveCreator); +RegistryMSOps g_splitPrimitiveCreatorRegistry("Split", SplitPrimitiveCreator); +RegistryMSOps g_sqrtPrimitiveCreatorRegistry("Sqrt", SqrtPrimitiveCreator); +RegistryMSOps g_squeezePrimitiveCreatorRegistry("Squeeze", SqueezePrimitiveCreator); +RegistryMSOps g_squarePrimitiveCreatorRegistry("Square", SquarePrimitiveCreator); +RegistryMSOps g_squaredDifferencePrimitiveCreatorRegistry("SquaredDifference", SquaredDifferencePrimitiveCreator); +RegistryMSOps g_stackPrimitiveCreatorRegistry("Stack", StackPrimitiveCreator); +RegistryMSOps g_stridedSlicePrimitiveCreatorRegistry("StridedSlice", StridedSlicePrimitiveCreator); +RegistryMSOps g_subPrimitiveCreatorRegistry("Sub", SubFusionPrimitiveCreator); +RegistryMSOps g_subFusionPrimitiveCreatorRegistry("SubFusion", SubFusionPrimitiveCreator); +RegistryMSOps g_subGradPrimitiveCreatorRegistry("SubGrad", SubGradPrimitiveCreator); +RegistryMSOps g_switchPrimitiveCreatorRegistry("Switch", SwitchPrimitiveCreator); +RegistryMSOps g_tensorListFromTensorPrimitiveCreatorRegistry("TensorListFromTensor", + TensorListFromTensorPrimitiveCreator); +RegistryMSOps g_tensorListGetItemPrimitiveCreatorRegistry("TensorListGetItem", TensorListGetItemPrimitiveCreator); +RegistryMSOps g_tensorListReservePrimitiveCreatorRegistry("TensorListReserve", TensorListReservePrimitiveCreator); +RegistryMSOps g_tensorListSetItemPrimitiveCreatorRegistry("TensorListSetItem", TensorListSetItemPrimitiveCreator); +RegistryMSOps g_tensorListStackPrimitiveCreatorRegistry("TensorListStack", TensorListStackPrimitiveCreator); +RegistryMSOps g_tileFusionPrimitiveCreatorRegistry("TileFusion", TileFusionPrimitiveCreator); +RegistryMSOps g_topKPrimitiveCreatorRegistry("TopK", TopKFusionPrimitiveCreator); +RegistryMSOps g_topKFusionPrimitiveCreatorRegistry("TopKFusion", TopKFusionPrimitiveCreator); +RegistryMSOps g_transposePrimitiveCreatorxRegistry("Transpose", TransposePrimitiveCreator); +RegistryMSOps g_uniquePrimitiveCreatorRegistry("Unique", UniquePrimitiveCreator); +RegistryMSOps g_unstackPrimitiveCreatorRegistry("Unstack", UnstackPrimitiveCreator); +RegistryMSOps g_unsortedSegmentSumPrimitiveCreatorRegistry("UnsortedSegmentSum", UnsortedSegmentSumPrimitiveCreator); +RegistryMSOps g_unsqueezePrimitiveCreatorRegistry("Unsqueeze", UnsqueezePrimitiveCreator); +RegistryMSOps g_wherePrimitiveCreatorRegistry("Where", WherePrimitiveCreator); +RegistryMSOps g_zerosLikePrimitiveCreatorRegistry("ZerosLike", ZerosLikePrimitiveCreator); +} // namespace lite +} // namespace mindspore + +#endif diff --git a/mindspore/lite/src/ops/ops_utils.h b/mindspore/lite/src/ops/ops_utils.h new file mode 100644 index 0000000000..c7b7fcb25e --- /dev/null +++ b/mindspore/lite/src/ops/ops_utils.h @@ -0,0 +1,62 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_OPS_MS_OPS_UTILS_H_ +#define MINDSPORE_LITE_SRC_OPS_MS_OPS_UTILS_H_ + +#include <map> +#include <string> +#include "src/ops/ops_func_declare.h" + +#ifdef PRIMITIVE_WRITEABLE +namespace mindspore { +namespace lite { +typedef schema::PrimitiveT *(*PrimitiveTCreator)(const AnfNodePtr &node); + +class MSOpsRegistry { + public: + static MSOpsRegistry *GetInstance() { + static MSOpsRegistry registry; + return &registry; + } + void InsertPrimitiveTMap(std::string name, PrimitiveTCreator creator) { primitive_creators[name] = creator; } + PrimitiveTCreator GetPrimitiveCreator(std::string name) { + if (primitive_creators.find(name) != primitive_creators.end()) { + return primitive_creators[name]; + } else { + MS_LOG(ERROR) << "Unsupported primitive type in Create: " << name; + return nullptr; + } + } + + protected: + std::map<std::string, PrimitiveTCreator> primitive_creators; +}; + +class RegistryMSOps { + public: + RegistryMSOps(std::string name, PrimitiveTCreator creator) { + MSOpsRegistry::GetInstance()->InsertPrimitiveTMap(name, creator); + } + ~RegistryMSOps() = default; +}; + +schema::PrimitiveT *GetPrimitiveT(const mindspore::AnfNodePtr &node); +} // namespace lite +} // namespace mindspore +#endif + +#endif // MINDSPORE_LITE_SRC_OPS_MS_OPS_UTILS_H_ diff --git a/mindspore/lite/src/ops/p_relu.cc b/mindspore/lite/src/ops/p_relu.cc deleted file mode 100644 index 6600a44396..0000000000 --- a/mindspore/lite/src/ops/p_relu.cc +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/p_relu.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -bool PReLU::GetChannelShared() const { return this->primitive_->value.AsPReLU()->channelShared; } - -void PReLU::SetChannelShared(bool channel_shared) { this->primitive_->value.AsPReLU()->channelShared = channel_shared; } - -#else -bool PReLU::GetChannelShared() const { return this->primitive_->value_as_PReLU()->channelShared(); } - -int PReLU::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_PReLU(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_PReLU return nullptr"; - return RET_ERROR; - } - std::vector<float> slope; - if (attr->slope() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->slope()->size()); i++) { - slope.push_back(attr->slope()->data()[i]); - } - } - auto val_offset = schema::CreatePReLUDirect(*fbb, attr->channelShared(), &slope); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_PReLU, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *PReLUCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<PReLU>(primitive); } -Registry PReLURegistry(schema::PrimitiveType_PReLU, PReLUCreator); - -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/p_relu.h b/mindspore/lite/src/ops/p_relu.h deleted file mode 100644 index c8fb191266..0000000000 --- a/mindspore/lite/src/ops/p_relu.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_P_RELU_H_ -#define LITE_MINDSPORE_LITE_C_OPS_P_RELU_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/activation.h" - -namespace mindspore { -namespace lite { -class PReLU : public Activation { - public: - PReLU() = default; - ~PReLU() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(PReLU, Activation); - explicit PReLU(schema::PrimitiveT *primitive) : Activation(primitive) {} - void SetChannelShared(bool channel_shared); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - bool GetChannelShared() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_P_RELU_H_ diff --git a/mindspore/lite/src/ops/pad.cc b/mindspore/lite/src/ops/pad.cc deleted file mode 100644 index 951b9d4f5f..0000000000 --- a/mindspore/lite/src/ops/pad.cc +++ /dev/null @@ -1,199 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/pad.h" -#include <string> -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> Pad::GetPaddings() const { return this->primitive_->value.AsPad()->paddings; } -int Pad::GetPaddingMode() const { return this->primitive_->value.AsPad()->paddingMode; } -float Pad::GetConstantValue() const { return this->primitive_->value.AsPad()->constantValue; } - -void Pad::SetPaddings(const std::vector<int> &paddings) { this->primitive_->value.AsPad()->paddings = paddings; } -void Pad::SetPaddingMode(int padding_mode) { - this->primitive_->value.AsPad()->paddingMode = (schema::PaddingMode)padding_mode; -} -void Pad::SetConstantValue(float constant_value) { this->primitive_->value.AsPad()->constantValue = constant_value; } -int Pad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Pad; - } - if (this->primitive_->value.type != schema::PrimitiveType_Pad) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::PadT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - string paddingmode = "REFLECT"; - if (prim.GetAttr("mode") == nullptr) { - if (prim.name() == "Pad") { - paddingmode = "CONSTANT"; - } else { - MS_LOG(ERROR) << "get mode failed!"; - delete this->primitive_; - delete attr; - this->primitive_ = nullptr; - attr = nullptr; - return RET_ERROR; - } - } else { - paddingmode = GetValue<string>(prim.GetAttr("mode")); - } - if (paddingmode == "REFLECT") { - attr->paddingMode = schema::PaddingMode_REFLECT; - } else if (paddingmode == "SYMMETRIC") { - attr->paddingMode = schema::PaddingMode_SYMMETRIC; - } else if (paddingmode == "CONSTANT") { - attr->paddingMode = schema::PaddingMode_CONSTANT; - if (prim.GetAttr("paddings") != nullptr) { - auto paddings = prim.GetAttr("paddings"); - auto str = (*paddings).ToString(); - std::replace(str.begin(), str.end(), ',', ' '); - std::replace(str.begin(), str.end(), ')', ' '); - std::replace(str.begin(), str.end(), '(', ' '); - std::stringstream ss(str); - for (int i; ss >> i;) { - attr->paddings.push_back(i); - } - } - } else { - MS_LOG(ERROR) << "model type not supported!"; - delete this->primitive_; - delete attr; - this->primitive_ = nullptr; - attr = nullptr; - return RET_ERROR; - } - this->primitive_->value.value = attr; - } - return RET_OK; -} - -#else - -std::vector<int> Pad::GetPaddings() const { - auto fb_vector = this->primitive_->value_as_Pad()->paddings(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -int Pad::GetPaddingMode() const { return this->primitive_->value_as_Pad()->paddingMode(); } -float Pad::GetConstantValue() const { return this->primitive_->value_as_Pad()->constantValue(); } - -int Pad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Pad(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Pad return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> paddings; - if (attr->paddings() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->paddings()->size()); i++) { - paddings.push_back(attr->paddings()->data()[i]); - } - } - auto val_offset = schema::CreatePadDirect(*fbb, &paddings, attr->paddingMode(), attr->constantValue()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Pad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *PadCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Pad>(primitive); } -Registry PadRegistry(schema::PrimitiveType_Pad, PadCreator); -#endif - -int GetPaddingFromInput(const std::vector<Tensor *> &inputs, std::vector<int> *paddings) { - auto paddings_tensor = inputs.at(1); - int rank = static_cast<int>(inputs.front()->shape().size()); - MS_ASSERT(paddings_tensor->ElementsNum() == 2 * rank); - if (paddings_tensor->data_c() == nullptr) { - return RET_INFER_ERR; - } - paddings->clear(); - if (paddings_tensor->data_type() == mindspore::kNumberTypeInt64) { - auto paddings_data = reinterpret_cast<int64_t *>(paddings_tensor->data_c()); - for (auto i = 0; i < rank; ++i) { - paddings->emplace_back(paddings_data[i * 2]); - paddings->emplace_back(paddings_data[i * 2 + 1]); - } - } else if (paddings_tensor->data_type() == mindspore::kNumberTypeInt32) { - auto paddings_data = reinterpret_cast<int32_t *>(paddings_tensor->data_c()); - for (auto i = 0; i < rank; ++i) { - paddings->emplace_back(paddings_data[i * 2]); - paddings->emplace_back(paddings_data[i * 2 + 1]); - } - } - return RET_OK; -} - -int Pad::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - MS_ASSERT(this->primitive_ != nullptr); - if (this->primitive_ == nullptr) { - return RET_NULL_PTR; - } - - auto input = inputs.front(); - if (input == nullptr) { - return RET_NULL_PTR; - } - auto output = outputs.front(); - if (output == nullptr) { - return RET_NULL_PTR; - } - output->set_format(input->format()); - output->set_data_type(input->data_type()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - std::vector<int> paddings; - if (inputs.size() == 1) { - paddings = GetPaddings(); - } else { - GetPaddingFromInput(inputs, &paddings); - } - - if (paddings.empty()) { - return RET_INFER_INVALID; - } - auto input_shape = input->shape(); - std::vector<int> output_shape; - MS_ASSERT(input->shape().size() <= 4); - for (size_t i = 0; i < input_shape.size(); i++) { - auto paddings_index = i; - auto shape = input_shape.at(i) + paddings.at(2 * paddings_index) + paddings.at(2 * paddings_index + 1); - output_shape.push_back(shape); - } - - output->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/pad.h b/mindspore/lite/src/ops/pad.h deleted file mode 100644 index af18c746a5..0000000000 --- a/mindspore/lite/src/ops/pad.h +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_PAD_H_ -#define LITE_MINDSPORE_LITE_C_OPS_PAD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Pad : public PrimitiveC { - public: - Pad() = default; - ~Pad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Pad, PrimitiveC); - explicit Pad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetPaddings(const std::vector<int> &paddings); - void SetPaddingMode(int padding_mode); - void SetConstantValue(float constant_value); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - std::vector<int> GetPaddings() const; - int GetPaddingMode() const; - float GetConstantValue() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_PAD_H_ diff --git a/mindspore/lite/src/ops/partial.cc b/mindspore/lite/src/ops/partial.cc deleted file mode 100644 index deb4d80b20..0000000000 --- a/mindspore/lite/src/ops/partial.cc +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/partial.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE - -int Partial::GetSubGraphIndex() const { return this->primitive_->value.AsPartial()->subGraphIndex; } - -int Partial::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Partial; - } - if (this->primitive_->value.type != schema::PrimitiveType_Partial) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::PartialT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else - -int Partial::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Partial(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Partial return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreatePartial(*fbb, attr->subGraphIndex()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Partial, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -int Partial::GetSubGraphIndex() const { return this->primitive_->value_as_Partial()->subGraphIndex(); } - -PrimitiveC *PartialCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Partial>(primitive); } -Registry PartialRegistry(schema::PrimitiveType_Partial, PartialCreator); - -#endif - -int Partial::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { return RET_OK; } -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/partial.h b/mindspore/lite/src/ops/partial.h deleted file mode 100644 index 66c680c845..0000000000 --- a/mindspore/lite/src/ops/partial.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_PARTIAL_H_ -#define LITE_MINDSPORE_LITE_C_OPS_PARTIAL_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Partial : public PrimitiveC { - public: - Partial() = default; - ~Partial() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Partial, PrimitiveC); - explicit Partial(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetSubGraphIndex() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_PARTIAL_H_ diff --git a/mindspore/lite/src/ops/pooling.cc b/mindspore/lite/src/ops/pooling.cc deleted file mode 100644 index 271418520a..0000000000 --- a/mindspore/lite/src/ops/pooling.cc +++ /dev/null @@ -1,235 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/pooling.h" -#include <memory> -#include <string> -#include <vector> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { - -#ifdef PRIMITIVE_WRITEABLE -int Pooling::GetFormat() const { return this->primitive_->value.AsPooling()->format; } -int Pooling::GetPoolingMode() const { return this->primitive_->value.AsPooling()->poolingMode; } -bool Pooling::GetGlobal() const { return this->primitive_->value.AsPooling()->global; } -int Pooling::GetWindowW() const { return this->primitive_->value.AsPooling()->windowW; } -int Pooling::GetWindowH() const { return this->primitive_->value.AsPooling()->windowH; } -int Pooling::GetStrideW() const { return this->primitive_->value.AsPooling()->strideW; } -int Pooling::GetStrideH() const { return this->primitive_->value.AsPooling()->strideH; } -int Pooling::GetPadMode() const { return this->primitive_->value.AsPooling()->padMode; } -int Pooling::GetPadUp() const { return this->primitive_->value.AsPooling()->padUp; } -int Pooling::GetPadDown() const { return this->primitive_->value.AsPooling()->padDown; } -int Pooling::GetPadLeft() const { return this->primitive_->value.AsPooling()->padLeft; } -int Pooling::GetPadRight() const { return this->primitive_->value.AsPooling()->padRight; } -int Pooling::GetRoundMode() const { return this->primitive_->value.AsPooling()->roundMode; } -int Pooling::GetActivationType() const { return this->primitive_->value.AsPooling()->activationType; } -int Pooling::GetAvgMode() const { return this->primitive_->value.AsPooling()->avgMode; } - -void Pooling::SetFormat(int format) { this->primitive_->value.AsPooling()->format = (schema::Format)format; } -void Pooling::SetPoolingMode(int pooling_mode) { - this->primitive_->value.AsPooling()->poolingMode = (schema::PoolMode)pooling_mode; -} -void Pooling::SetGlobal(bool global) { this->primitive_->value.AsPooling()->global = global; } -void Pooling::SetWindowW(int window_w) { this->primitive_->value.AsPooling()->windowW = window_w; } -void Pooling::SetWindowH(int window_h) { this->primitive_->value.AsPooling()->windowH = window_h; } -void Pooling::SetStrideW(int stride_w) { this->primitive_->value.AsPooling()->strideW = stride_w; } -void Pooling::SetStrideH(int stride_h) { this->primitive_->value.AsPooling()->strideH = stride_h; } -void Pooling::SetPadMode(int pad_mode) { this->primitive_->value.AsPooling()->padMode = (schema::PadMode)pad_mode; } -void Pooling::SetPadUp(int pad_up) { this->primitive_->value.AsPooling()->padUp = pad_up; } -void Pooling::SetPadDown(int pad_down) { this->primitive_->value.AsPooling()->padDown = pad_down; } -void Pooling::SetPadLeft(int pad_left) { this->primitive_->value.AsPooling()->padLeft = pad_left; } -void Pooling::SetPadRight(int pad_right) { this->primitive_->value.AsPooling()->padRight = pad_right; } -void Pooling::SetRoundMode(int round_mode) { - this->primitive_->value.AsPooling()->roundMode = (schema::RoundMode)round_mode; -} -void Pooling::SetActivationType(int activation_type) { - this->primitive_->value.AsPooling()->activationType = (schema::ActivationType)activation_type; -} -void Pooling::SetAvgMode(int avg_mode) { this->primitive_->value.AsPooling()->avgMode = avg_mode; } - -int Pooling::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Pooling; - } - if (this->primitive_->value.type != schema::PrimitiveType_Pooling) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::PoolingT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - if (prim.instance_name() == "MaxPool") { - attr->poolingMode = schema::PoolMode_MAX_POOLING; - } else if (prim.instance_name() == "MeanPool" || prim.instance_name() == "AvgPool") { - attr->poolingMode = schema::PoolMode_MEAN_POOLING; - } - - auto format = GetValue<std::string>(prim.GetAttr("data_format")); - if (format == "NCHW") { - attr->format = schema::Format::Format_NCHW; - } else if (format == "NHWC") { - attr->format = schema::Format::Format_NHWC; - } else { - attr->format = schema::Format::Format_NUM_OF_FORMAT; - } - - auto pad_mode = GetValue<std::string>(prim.GetAttr("padding")); - if (pad_mode == "VALID") { - attr->padMode = schema::PadMode_VALID; - } else if (pad_mode == "SAME") { - attr->padMode = schema::PadMode_SAME_UPPER; - } else { - attr->padMode = schema::PadMode_NOTSET; - } - - auto kernel_size = CastToInt(prim.GetAttr("ksize")); - attr->windowH = kernel_size.at(2); - attr->windowW = kernel_size.at(3); - - auto stride = CastToInt(prim.GetAttr("strides")); - attr->strideH = stride.at(2); - attr->strideW = stride.at(3); - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - - return RET_OK; -} - -#else - -int Pooling::GetFormat() const { return this->primitive_->value_as_Pooling()->format(); } -int Pooling::GetPoolingMode() const { return this->primitive_->value_as_Pooling()->poolingMode(); } -bool Pooling::GetGlobal() const { return this->primitive_->value_as_Pooling()->global(); } -int Pooling::GetWindowW() const { return this->primitive_->value_as_Pooling()->windowW(); } -int Pooling::GetWindowH() const { return this->primitive_->value_as_Pooling()->windowH(); } -int Pooling::GetStrideW() const { return this->primitive_->value_as_Pooling()->strideW(); } -int Pooling::GetStrideH() const { return this->primitive_->value_as_Pooling()->strideH(); } -int Pooling::GetPadMode() const { return this->primitive_->value_as_Pooling()->padMode(); } -int Pooling::GetPadUp() const { return this->primitive_->value_as_Pooling()->padUp(); } -int Pooling::GetPadDown() const { return this->primitive_->value_as_Pooling()->padDown(); } -int Pooling::GetPadLeft() const { return this->primitive_->value_as_Pooling()->padLeft(); } -int Pooling::GetPadRight() const { return this->primitive_->value_as_Pooling()->padRight(); } -int Pooling::GetRoundMode() const { return this->primitive_->value_as_Pooling()->roundMode(); } -int Pooling::GetActivationType() const { return this->primitive_->value_as_Pooling()->activationType(); } -int Pooling::GetAvgMode() const { return this->primitive_->value_as_Pooling()->avgMode(); } - -int Pooling::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Pooling(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Pooling return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreatePooling(*fbb, attr->format(), attr->poolingMode(), attr->global(), attr->windowW(), - attr->windowH(), attr->strideW(), attr->strideH(), attr->padMode(), - attr->padUp(), attr->padDown(), attr->padLeft(), attr->padRight(), - attr->roundMode(), attr->activationType(), attr->avgMode()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Pooling, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *PoolingCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Pooling>(primitive); } -Registry PoolingRegistry(schema::PrimitiveType_Pooling, PoolingCreator); - -#endif - -int Pooling::PadUp() const { return this->pad_u_; } -int Pooling::PadDown() const { return this->pad_d_; } -int Pooling::PadLeft() const { return this->pad_l_; } -int Pooling::PadRight() const { return this->pad_r_; } - -int Pooling::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(input->data_type()); - output->set_format(schema::Format::Format_NHWC); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - int input_h = input->shape().at(1); - int input_w = input->shape().at(2); - - auto window_h = GetWindowH(); - auto window_w = GetWindowW(); - if (GetGlobal()) { - window_h = input_h; - window_w = input_w; - } - int output_h = 0; - int output_w = 0; - pad_l_ = GetPadLeft(); - pad_u_ = GetPadUp(); - pad_d_ = GetPadDown(); - pad_r_ = GetPadRight(); - if (GetPadMode() == schema::PadMode_SAME_UPPER) { - output_w = std::ceil(static_cast<float>(input_w) / static_cast<float>(GetStrideW())); - output_h = std::ceil(static_cast<float>(input_h) / static_cast<float>(GetStrideH())); - auto pad_h_all = ((output_h - 1) * GetStrideH() + (window_h - 1) + 1 - input_h); - auto pad_w_all = ((output_w - 1) * GetStrideW() + (window_w - 1) + 1 - input_w); - if (pad_h_all < 0) { - pad_u_ = pad_d_ = 0; - } else { - pad_u_ = pad_h_all / 2; - pad_d_ = pad_h_all - pad_u_; - } - if (pad_w_all < 0) { - pad_l_ = pad_r_ = 0; - } else { - pad_l_ = pad_w_all / 2; - pad_r_ = pad_w_all - pad_l_; - } - } else { - auto round_mode = (schema::RoundMode)GetRoundMode(); - if (round_mode == schema::RoundMode_FLOOR) { - output_h = std::floor(static_cast<float>(input_h + pad_u_ + pad_d_ - window_h) / GetStrideH()) + 1; - output_w = std::floor(static_cast<float>(input_w + pad_l_ + pad_r_ - window_w) / GetStrideW()) + 1; - } else if (round_mode == schema::RoundMode_CEIL) { - output_h = std::ceil(static_cast<float>(input_h + pad_u_ + pad_d_ - window_h) / GetStrideH()) + 1; - output_w = std::ceil(static_cast<float>(input_w + pad_l_ + pad_r_ - window_w) / GetStrideW()) + 1; - } else { - MS_LOG(ERROR) << "unsupported round mode."; - } - } - auto input_shape = input->shape(); - input_shape.at(1) = output_h > 0 ? output_h : 1; - input_shape.at(2) = output_w > 0 ? output_w : 1; - output->set_shape(input_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/pooling.h b/mindspore/lite/src/ops/pooling.h deleted file mode 100644 index 5e7572ffa3..0000000000 --- a/mindspore/lite/src/ops/pooling.h +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_POOLING_H_ -#define LITE_MINDSPORE_LITE_C_OPS_POOLING_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Pooling : public PrimitiveC { - public: - Pooling() = default; - ~Pooling() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Pooling, PrimitiveC); - explicit Pooling(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetFormat(int format); - void SetPoolingMode(int pooling_mode); - void SetGlobal(bool global); - void SetWindowW(int window_w); - void SetWindowH(int window_h); - void SetStrideW(int stride_w); - void SetStrideH(int stride_h); - void SetPadMode(int pad_mode); - void SetPadUp(int pad_up); - void SetPadDown(int pad_down); - void SetPadLeft(int pad_left); - void SetPadRight(int pad_right); - void SetRoundMode(int round_mode); - void SetActivationType(int activation_type); - void SetAvgMode(int avg_mode); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetFormat() const; - int GetPoolingMode() const; - bool GetGlobal() const; - int GetWindowW() const; - int GetWindowH() const; - int GetStrideW() const; - int GetStrideH() const; - int GetPadMode() const; - int GetPadUp() const; - int GetPadDown() const; - int GetPadLeft() const; - int GetPadRight() const; - int GetRoundMode() const; - int GetActivationType() const; - int GetAvgMode() const; - - int PadUp() const; - int PadDown() const; - int PadLeft() const; - int PadRight() const; - - protected: - int pad_u_ = 0; - int pad_d_ = 0; - int pad_l_ = 0; - int pad_r_ = 0; -}; // namespace lite -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_POOLING_H_ diff --git a/mindspore/lite/src/ops/pooling_grad.cc b/mindspore/lite/src/ops/pooling_grad.cc deleted file mode 100644 index 47825e9709..0000000000 --- a/mindspore/lite/src/ops/pooling_grad.cc +++ /dev/null @@ -1,215 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/pooling_grad.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int PoolingGrad::GetFormat() const { return this->primitive_->value.AsPoolingGrad()->format; } -int PoolingGrad::GetPoolingMode() const { return this->primitive_->value.AsPoolingGrad()->poolingMode; } -bool PoolingGrad::GetGlobal() const { return this->primitive_->value.AsPoolingGrad()->global; } -int PoolingGrad::GetWindowW() const { return this->primitive_->value.AsPoolingGrad()->windowW; } -int PoolingGrad::GetWindowH() const { return this->primitive_->value.AsPoolingGrad()->windowH; } -int PoolingGrad::GetStrideW() const { return this->primitive_->value.AsPoolingGrad()->strideW; } -int PoolingGrad::GetStrideH() const { return this->primitive_->value.AsPoolingGrad()->strideH; } -int PoolingGrad::GetPadMode() const { return this->primitive_->value.AsPoolingGrad()->padMode; } -int PoolingGrad::GetPadUp() const { return this->primitive_->value.AsPoolingGrad()->padUp; } -int PoolingGrad::GetPadDown() const { return this->primitive_->value.AsPoolingGrad()->padDown; } -int PoolingGrad::GetPadLeft() const { return this->primitive_->value.AsPoolingGrad()->padLeft; } -int PoolingGrad::GetPadRight() const { return this->primitive_->value.AsPoolingGrad()->padRight; } -int PoolingGrad::GetRoundMode() const { return this->primitive_->value.AsPoolingGrad()->roundMode; } - -void PoolingGrad::SetFormat(int format) { this->primitive_->value.AsPoolingGrad()->format = (schema::Format)format; } -void PoolingGrad::SetPoolingMode(int pooling_mode) { - this->primitive_->value.AsPoolingGrad()->poolingMode = (schema::PoolMode)pooling_mode; -} -void PoolingGrad::SetGlobal(bool global) { this->primitive_->value.AsPoolingGrad()->global = global; } -void PoolingGrad::SetWindowW(int window_w) { this->primitive_->value.AsPoolingGrad()->windowW = window_w; } -void PoolingGrad::SetWindowH(int window_h) { this->primitive_->value.AsPoolingGrad()->windowH = window_h; } -void PoolingGrad::SetStrideW(int stride_w) { this->primitive_->value.AsPoolingGrad()->strideW = stride_w; } -void PoolingGrad::SetStrideH(int stride_h) { this->primitive_->value.AsPoolingGrad()->strideH = stride_h; } -void PoolingGrad::SetPadMode(int pad_mode) { - this->primitive_->value.AsPoolingGrad()->padMode = (schema::PadMode)pad_mode; -} -void PoolingGrad::SetPadUp(int pad_up) { this->primitive_->value.AsPoolingGrad()->padUp = pad_up; } -void PoolingGrad::SetPadDown(int pad_down) { this->primitive_->value.AsPoolingGrad()->padDown = pad_down; } -void PoolingGrad::SetPadLeft(int pad_left) { this->primitive_->value.AsPoolingGrad()->padLeft = pad_left; } -void PoolingGrad::SetPadRight(int pad_right) { this->primitive_->value.AsPoolingGrad()->padRight = pad_right; } -void PoolingGrad::SetRoundMode(int round_mode) { - this->primitive_->value.AsPoolingGrad()->roundMode = (schema::RoundMode)round_mode; -} -int PoolingGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_PoolingGrad; - } - if (this->primitive_->value.type != schema::PrimitiveType_PoolingGrad) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::PoolingGradT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - - auto format = GetValue<std::string>(prim.GetAttr("data_format")); - if (format == "NCHW") { - attr->format = schema::Format_NCHW; - } else if (format == "NHWC") { - attr->format = schema::Format_NHWC; - } else { - attr->format = schema::Format_NUM_OF_FORMAT; - } - - if (prim.instance_name() == "MaxPoolGrad") { - attr->poolingMode = schema::PoolMode_MAX_POOLING; - } else if (prim.instance_name() == "AvgPoolGrad") { - attr->poolingMode = schema::PoolMode_MEAN_POOLING; - } else if (prim.instance_name() == "AvgPoolGradGpu") { - attr->poolingMode = schema::PoolMode_MEAN_POOLING; - } else if (prim.instance_name() == "AvgPoolGradCpu") { - attr->poolingMode = schema::PoolMode_MEAN_POOLING; - } else { - attr->poolingMode = schema::PoolMode_MAX_POOLING; - } - - auto pad_mode = GetValue<std::string>(prim.GetAttr("padding")); - if (pad_mode == "VALID") { - attr->padMode = schema::PadMode_VALID; - } else if (pad_mode == "SAME") { - attr->padMode = schema::PadMode_SAME_UPPER; - } else { - attr->padMode = schema::PadMode_NOTSET; - } - - auto kernel_size = CastToInt(prim.GetAttr("ksize")); - attr->windowH = kernel_size.at(2); - attr->windowW = kernel_size.at(3); - - auto stride = CastToInt(prim.GetAttr("strides")); - attr->strideH = stride.at(2); - attr->strideW = stride.at(3); - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else - -int PoolingGrad::GetFormat() const { return this->primitive_->value_as_PoolingGrad()->format(); } -int PoolingGrad::GetPoolingMode() const { return this->primitive_->value_as_PoolingGrad()->poolingMode(); } -bool PoolingGrad::GetGlobal() const { return this->primitive_->value_as_PoolingGrad()->global(); } -int PoolingGrad::GetWindowW() const { return this->primitive_->value_as_PoolingGrad()->windowW(); } -int PoolingGrad::GetWindowH() const { return this->primitive_->value_as_PoolingGrad()->windowH(); } -int PoolingGrad::GetStrideW() const { return this->primitive_->value_as_PoolingGrad()->strideW(); } -int PoolingGrad::GetStrideH() const { return this->primitive_->value_as_PoolingGrad()->strideH(); } -int PoolingGrad::GetPadMode() const { return this->primitive_->value_as_PoolingGrad()->padMode(); } -int PoolingGrad::GetPadUp() const { return this->primitive_->value_as_PoolingGrad()->padUp(); } -int PoolingGrad::GetPadDown() const { return this->primitive_->value_as_PoolingGrad()->padDown(); } -int PoolingGrad::GetPadLeft() const { return this->primitive_->value_as_PoolingGrad()->padLeft(); } -int PoolingGrad::GetPadRight() const { return this->primitive_->value_as_PoolingGrad()->padRight(); } -int PoolingGrad::GetRoundMode() const { return this->primitive_->value_as_PoolingGrad()->roundMode(); } - -int PoolingGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_PoolingGrad(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_PoolingGrad return nullptr"; - return RET_ERROR; - } - auto val_offset = - schema::CreatePoolingGrad(*fbb, attr->format(), attr->poolingMode(), attr->global(), attr->windowW(), - attr->windowH(), attr->strideW(), attr->strideH(), attr->padMode(), attr->padUp(), - attr->padDown(), attr->padLeft(), attr->padRight(), attr->roundMode()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_PoolingGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *PoolingGradCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<PoolingGrad>(primitive); -} -Registry PoolingGradRegistry(schema::PrimitiveType_PoolingGrad, PoolingGradCreator); -#endif - -int PoolingGrad::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - if (3 != inputs_.size()) { - MS_LOG(ERROR) << "Pooling Grad Filter should have 3 inputs"; - return RET_ERROR; - } - if (1 != outputs_.size()) { - MS_LOG(ERROR) << "Pooling Grad Filter should have one output"; - return RET_ERROR; - } - - auto input = inputs_.at(0); - MS_ASSERT(input != nullptr); - int input_h = input->shape().at(1); - int input_w = input->shape().at(2); - - auto window_h = GetWindowH(); - auto window_w = GetWindowW(); - if (GetGlobal()) { - window_h = input_h; - window_w = input_w; - } - - pad_l_ = GetPadLeft(); - pad_u_ = GetPadUp(); - pad_d_ = GetPadDown(); - pad_r_ = GetPadRight(); - if (GetPadMode() == schema::PadMode_SAME_UPPER) { - int output_w = std::ceil(static_cast<float>(input_w) / static_cast<float>(GetStrideW())); - int output_h = std::ceil(static_cast<float>(input_h) / static_cast<float>(GetStrideH())); - auto pad_h_all = ((output_h - 1) * GetStrideH() + (window_h - 1) + 1 - input_h); - auto pad_w_all = ((output_w - 1) * GetStrideW() + (window_w - 1) + 1 - input_w); - if (pad_h_all < 0) { - pad_u_ = pad_d_ = 0; - } else { - pad_u_ = pad_h_all / 2; - pad_d_ = pad_h_all - pad_u_; - } - if (pad_w_all < 0) { - pad_l_ = pad_r_ = 0; - } else { - pad_l_ = pad_w_all / 2; - pad_r_ = pad_w_all - pad_l_; - } - } - auto grad_output = outputs_.at(0); - auto output_shape = input->shape(); - grad_output->set_shape(output_shape); - grad_output->set_data_type(input->data_type()); - grad_output->set_format(input->format()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/pooling_grad.h b/mindspore/lite/src/ops/pooling_grad.h deleted file mode 100644 index 1f47d57e60..0000000000 --- a/mindspore/lite/src/ops/pooling_grad.h +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_POOLING_GRAD_H_ -#define MINDSPORE_LITE_SRC_OPS_POOLING_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <string> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class PoolingGrad : public PrimitiveC { - public: - PoolingGrad() = default; - ~PoolingGrad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(PoolingGrad, PrimitiveC); - explicit PoolingGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetFormat(int format); - void SetPoolingMode(int pooling_mode); - void SetGlobal(bool global); - void SetWindowW(int window_w); - void SetWindowH(int window_h); - void SetStrideW(int stride_w); - void SetStrideH(int stride_h); - void SetPadMode(int pad_mode); - void SetPadUp(int pad_up); - void SetPadDown(int pad_down); - void SetPadLeft(int pad_left); - void SetPadRight(int pad_right); - void SetRoundMode(int round_mode); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetFormat() const; - int GetPoolingMode() const; - bool GetGlobal() const; - int GetWindowW() const; - int GetWindowH() const; - int GetStrideW() const; - int GetStrideH() const; - int GetPadMode() const; - int GetPadUp() const; - int GetPadDown() const; - int GetPadLeft() const; - int GetPadRight() const; - int GetRoundMode() const; - - protected: - int pad_u_ = 0; - int pad_d_ = 0; - int pad_l_ = 0; - int pad_r_ = 0; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_POOLING_GRAD_H_ diff --git a/mindspore/lite/src/ops/populate/activation_grad_populate.cc b/mindspore/lite/src/ops/populate/activation_grad_populate.cc index 54cfcbc0f7..f832719648 100644 --- a/mindspore/lite/src/ops/populate/activation_grad_populate.cc +++ b/mindspore/lite/src/ops/populate/activation_grad_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,15 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/activation_grad.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32_grad/activation_grad.h" namespace mindspore { namespace lite { -OpParameter *PopulateActivationGradParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateActivationGradParameter(const void *prim) { ActivationGradParameter *act_param = reinterpret_cast<ActivationGradParameter *>(malloc(sizeof(ActivationGradParameter))); if (act_param == nullptr) { @@ -29,13 +26,15 @@ OpParameter *PopulateActivationGradParameter(const mindspore::lite::PrimitiveC * return nullptr; } memset(act_param, 0, sizeof(ActivationGradParameter)); - act_param->op_parameter.type_ = primitive->Type(); - auto activation = - reinterpret_cast<mindspore::lite::ActivationGrad *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - act_param->type_ = static_cast<int>(activation->GetType()); - act_param->alpha_ = activation->GetAlpha(); + + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_ActivationGrad(); + act_param->op_parameter.type_ = primitive->value_type(); + act_param->type_ = static_cast<int>(value->activation_type()); + act_param->alpha_ = value->alpha(); return reinterpret_cast<OpParameter *>(act_param); } -Registry ActivationGradParameterRegistry(schema::PrimitiveType_ActivationGrad, PopulateActivationGradParameter); +Registry ActivationGradParameterRegistry(schema::PrimitiveType_ActivationGrad, PopulateActivationGradParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/activation_populate.cc b/mindspore/lite/src/ops/populate/activation_populate.cc index 82a4e99046..1f0e8c9e01 100644 --- a/mindspore/lite/src/ops/populate/activation_populate.cc +++ b/mindspore/lite/src/ops/populate/activation_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,30 +13,30 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/activation.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/activation_fp32.h" namespace mindspore { namespace lite { -OpParameter *PopulateActivationParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateRelu6Parameter(const void *prim) { ActivationParameter *act_param = reinterpret_cast<ActivationParameter *>(malloc(sizeof(ActivationParameter))); if (act_param == nullptr) { MS_LOG(ERROR) << "malloc ActivationParameter failed."; return nullptr; } memset(act_param, 0, sizeof(ActivationParameter)); - act_param->op_parameter_.type_ = primitive->Type(); - auto activation = - reinterpret_cast<mindspore::lite::Activation *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - act_param->type_ = static_cast<int>(activation->GetType()); - act_param->alpha_ = activation->GetAlpha(); - act_param->min_val_ = activation->GetMinVal(); - act_param->max_val_ = activation->GetMaxVal(); + auto primitive = static_cast<const schema::Primitive *>(prim); + act_param->op_parameter_.type_ = primitive->value_type(); + auto acti_prim = primitive->value_as_Activation(); + act_param->type_ = static_cast<int>(acti_prim->activation_type()); + act_param->alpha_ = acti_prim->alpha(); + act_param->min_val_ = acti_prim->min_val(); + act_param->max_val_ = acti_prim->max_val(); return reinterpret_cast<OpParameter *>(act_param); } -Registry ActivationParameterRegistry(schema::PrimitiveType_Activation, PopulateActivationParameter); +} // namespace + +Registry g_relu6ParameterRegistry(schema::PrimitiveType_Activation, PopulateRelu6Parameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/adam_populate.cc b/mindspore/lite/src/ops/populate/adam_populate.cc index ec06f36589..38682785ee 100644 --- a/mindspore/lite/src/ops/populate/adam_populate.cc +++ b/mindspore/lite/src/ops/populate/adam_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,24 +13,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/adam.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateAdamParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateAdamParameter(const void *prim) { OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (param == nullptr) { MS_LOG(ERROR) << "malloc Adam Parameter failed."; return nullptr; } memset(param, 0, sizeof(OpParameter)); - param->type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + param->type_ = primitive->value_type(); return param; } -Registry AdamParameterRegistry(schema::PrimitiveType_Adam, PopulateAdamParameter); +Registry AdamParameterRegistry(schema::PrimitiveType_Adam, PopulateAdamParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/add_populate.cc b/mindspore/lite/src/ops/populate/add_populate.cc index e2722ff084..ff5ac84dc3 100644 --- a/mindspore/lite/src/ops/populate/add_populate.cc +++ b/mindspore/lite/src/ops/populate/add_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,25 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/add.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/arithmetic.h" #include "src/ops/populate/arithmetic_populate.h" namespace mindspore { namespace lite { -OpParameter *PopulateAddParameter(const mindspore::lite::PrimitiveC *primitive) { - ArithmeticParameter *param = PopulateArithmeticCommonPara(primitive); +namespace { +OpParameter *PopulateAddParameter(const void *prim) { + ArithmeticParameter *param = PopulateArithmeticCommonPara(prim); if (param == nullptr) { MS_LOG(ERROR) << "PopulateArithmeticCommonPara failed."; return nullptr; } - param->activation_type_ = reinterpret_cast<const mindspore::lite::Add *>(primitive)->GetActivationType(); + auto *primitive = static_cast<const schema::Primitive *>(prim); + param->op_parameter_.type_ = primitive->value_type(); + auto add_prim = primitive->value_as_AddFusion(); + param->activation_type_ = add_prim->activation_type(); return reinterpret_cast<OpParameter *>(param); } -Registry AddParameterRegistry(schema::PrimitiveType_Add, PopulateAddParameter); - +} // namespace +Registry g_addParameterRegistry(schema::PrimitiveType_AddFusion, PopulateAddParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/adder_populate.cc b/mindspore/lite/src/ops/populate/adder_populate.cc index 59ab043381..295a814306 100644 --- a/mindspore/lite/src/ops/populate/adder_populate.cc +++ b/mindspore/lite/src/ops/populate/adder_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,42 +13,37 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/adder.h" #include "src/common/log_adapter.h" #include "nnacl/conv_parameter.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateAdderParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateAdderParameter(const void *prim) { ConvParameter *conv_param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); if (conv_param == nullptr) { MS_LOG(ERROR) << "malloc ConvParameter failed."; return nullptr; } memset(conv_param, 0, sizeof(ConvParameter)); - conv_param->op_parameter_.type_ = primitive->Type(); - auto adder_primitive = - reinterpret_cast<mindspore::lite::Adder *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - conv_param->kernel_h_ = adder_primitive->GetKernelH(); - conv_param->kernel_w_ = adder_primitive->GetKernelW(); - conv_param->group_ = adder_primitive->GetGroup(); - conv_param->stride_h_ = adder_primitive->GetStrideH(); - conv_param->stride_w_ = adder_primitive->GetStrideW(); - auto adder_lite_primitive = (lite::Adder *)primitive; - conv_param->pad_u_ = adder_lite_primitive->PadUp(); - conv_param->pad_d_ = adder_lite_primitive->PadDown(); - conv_param->pad_l_ = adder_lite_primitive->PadLeft(); - conv_param->pad_r_ = adder_lite_primitive->PadRight(); - conv_param->dilation_h_ = adder_primitive->GetDilateH(); - conv_param->dilation_w_ = adder_primitive->GetDilateW(); - conv_param->input_channel_ = adder_primitive->GetChannelIn(); - conv_param->output_channel_ = adder_primitive->GetChannelOut(); - conv_param->group_ = adder_primitive->GetGroup(); - auto act_type = adder_primitive->GetActivationType(); + auto primitive = static_cast<const schema::Primitive *>(prim); + conv_param->op_parameter_.type_ = primitive->value_type(); + auto conv_primitive = primitive->value_as_AdderFusion(); + conv_param->kernel_h_ = static_cast<int>(*(conv_primitive->kernel_size()->begin())); + conv_param->kernel_w_ = static_cast<int>(*(conv_primitive->kernel_size()->begin() + 1)); + conv_param->group_ = static_cast<int>(conv_primitive->group()); + conv_param->stride_h_ = static_cast<int>(*(conv_primitive->stride()->begin())); + conv_param->stride_w_ = static_cast<int>(*(conv_primitive->stride()->begin() + 1)); + conv_param->pad_u_ = static_cast<int>(*(conv_primitive->pad_list()->begin())); + conv_param->pad_d_ = static_cast<int>(*(conv_primitive->pad_list()->begin() + 1)); + conv_param->pad_l_ = static_cast<int>(*(conv_primitive->pad_list()->begin() + 2)); + conv_param->pad_r_ = static_cast<int>(*(conv_primitive->pad_list()->begin() + 3)); + conv_param->dilation_h_ = static_cast<int>(*(conv_primitive->dilation()->begin())); + conv_param->dilation_w_ = static_cast<int>(*(conv_primitive->dilation()->begin() + 1)); + conv_param->input_channel_ = static_cast<int>(conv_primitive->in_channel()); + conv_param->output_channel_ = static_cast<int>(conv_primitive->out_channel()); + auto act_type = conv_primitive->activation_type(); switch (act_type) { case schema::ActivationType_RELU: conv_param->act_type_ = ActType_Relu; @@ -62,6 +57,6 @@ OpParameter *PopulateAdderParameter(const mindspore::lite::PrimitiveC *primitive } return reinterpret_cast<OpParameter *>(conv_param); } -Registry AdderParameterRegistry(schema::PrimitiveType_Adder, PopulateAdderParameter); +Registry g_AdderParameterRegistry(schema::PrimitiveType_AdderFusion, PopulateAdderParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/addn_populate.cc b/mindspore/lite/src/ops/populate/addn_populate.cc index 22aacebc72..7932356f7a 100644 --- a/mindspore/lite/src/ops/populate/addn_populate.cc +++ b/mindspore/lite/src/ops/populate/addn_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,23 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/op_base.h" namespace mindspore { namespace lite { -OpParameter *PopulateAddNParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateAddNParameter(const void *prim) { OpParameter *addn_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (addn_param == nullptr) { MS_LOG(ERROR) << "malloc OpParameter failed."; return nullptr; } memset(addn_param, 0, sizeof(OpParameter)); - addn_param->type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + addn_param->type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(addn_param); } -Registry AddNParameterRegistry(schema::PrimitiveType_AddN, PopulateAddNParameter); +} // namespace +Registry g_addNParameterRegistry(schema::PrimitiveType_AddN, PopulateAddNParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/argmax_populate.cc b/mindspore/lite/src/ops/populate/argmax_populate.cc index 387001de96..99da4b97c1 100644 --- a/mindspore/lite/src/ops/populate/argmax_populate.cc +++ b/mindspore/lite/src/ops/populate/argmax_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,32 +13,31 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/argmax.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/arg_min_max_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateArgMaxParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateArgMaxParameter(const void *prim) { ArgMinMaxParameter *arg_param = reinterpret_cast<ArgMinMaxParameter *>(malloc(sizeof(ArgMinMaxParameter))); if (arg_param == nullptr) { MS_LOG(ERROR) << "malloc ArgMinMaxParameter failed."; return nullptr; } memset(arg_param, 0, sizeof(ArgMinMaxParameter)); - arg_param->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::ArgMax *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - arg_param->axis_ = param->GetAxis(); - arg_param->topk_ = param->GetTopK(); - arg_param->axis_type_ = param->GetAxisType(); - arg_param->out_value_ = param->GetOutMaxValue(); - arg_param->keep_dims_ = param->GetKeepDims(); + auto *primitive = static_cast<const schema::Primitive *>(prim); + arg_param->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_ArgMaxFusion(); + arg_param->axis_ = param->axis(); + arg_param->topk_ = param->top_k(); + arg_param->out_value_ = param->out_max_value(); + arg_param->keep_dims_ = param->keep_dims(); arg_param->get_max_ = true; return reinterpret_cast<OpParameter *>(arg_param); } +} // namespace -Registry ArgMaxParameterRegistry(schema::PrimitiveType_ArgMax, PopulateArgMaxParameter); +Registry g_argMaxParameterRegistry(schema::PrimitiveType_ArgMaxFusion, PopulateArgMaxParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/argmin_populate.cc b/mindspore/lite/src/ops/populate/argmin_populate.cc index 61c98355f8..630260526e 100644 --- a/mindspore/lite/src/ops/populate/argmin_populate.cc +++ b/mindspore/lite/src/ops/populate/argmin_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,32 +13,31 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/argmin.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/arg_min_max_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateArgMinParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateArgMinParameter(const void *prim) { ArgMinMaxParameter *arg_param = reinterpret_cast<ArgMinMaxParameter *>(malloc(sizeof(ArgMinMaxParameter))); if (arg_param == nullptr) { MS_LOG(ERROR) << "malloc ArgMinMaxParameter failed."; return nullptr; } memset(arg_param, 0, sizeof(ArgMinMaxParameter)); - arg_param->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::ArgMin *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - arg_param->axis_ = param->GetAxis(); - arg_param->topk_ = param->GetTopK(); - arg_param->axis_type_ = param->GetAxisType(); - arg_param->out_value_ = param->GetOutMaxValue(); - arg_param->keep_dims_ = param->GetKeepDims(); + auto *primitive = static_cast<const schema::Primitive *>(prim); + arg_param->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_ArgMinFusion(); + arg_param->axis_ = param->axis(); + arg_param->topk_ = param->top_k(); + arg_param->out_value_ = param->out_max_value(); + arg_param->keep_dims_ = param->keep_dims(); arg_param->get_max_ = false; return reinterpret_cast<OpParameter *>(arg_param); } +} // namespace -Registry ArgMinParameterRegistry(schema::PrimitiveType_ArgMin, PopulateArgMinParameter); +Registry g_argMinParameterRegistry(schema::PrimitiveType_ArgMinFusion, PopulateArgMinParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/arithmetic_populate.cc b/mindspore/lite/src/ops/populate/arithmetic_populate.cc index d02a050859..3106a0569f 100644 --- a/mindspore/lite/src/ops/populate/arithmetic_populate.cc +++ b/mindspore/lite/src/ops/populate/arithmetic_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,39 +13,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - #include "src/ops/populate/arithmetic_populate.h" -#include "src/ops/arithmetic.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { - -ArithmeticParameter *PopulateArithmeticCommonPara(const mindspore::lite::PrimitiveC *primitive) { +ArithmeticParameter *PopulateArithmeticCommonPara(const void *prim) { ArithmeticParameter *param = reinterpret_cast<ArithmeticParameter *>(malloc(sizeof(ArithmeticParameter))); if (param == nullptr) { MS_LOG(ERROR) << "malloc ArithmeticParameter failed."; return nullptr; } memset(param, 0, sizeof(ArithmeticParameter)); - param->op_parameter_.type_ = primitive->Type(); - param->broadcasting_ = reinterpret_cast<const lite::Arithmetic *>(primitive)->Broadcasting(); - param->ndim_ = reinterpret_cast<const lite::Arithmetic *>(primitive)->NDims(); + const schema::Primitive *primitive = static_cast<const schema::Primitive *>(prim); + param->op_parameter_.type_ = primitive->value_type(); + param->broadcasting_ = false; + param->ndim_ = 0; param->activation_type_ = 0; - - auto tmp_shape = reinterpret_cast<const lite::Arithmetic *>(primitive)->InShape0(); - memcpy(param->in_shape0_, static_cast<void *>(tmp_shape.data()), tmp_shape.size() * sizeof(int)); - tmp_shape = reinterpret_cast<const lite::Arithmetic *>(primitive)->InShape1(); - memcpy(param->in_shape1_, static_cast<void *>(tmp_shape.data()), tmp_shape.size() * sizeof(int)); - tmp_shape = reinterpret_cast<const lite::Arithmetic *>(primitive)->OutputShape(); - memcpy(param->out_shape_, static_cast<void *>(tmp_shape.data()), tmp_shape.size() * sizeof(int)); return param; } -OpParameter *PopulateArithmetic(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateArithmetic(const void *primitive) { ArithmeticParameter *param = PopulateArithmeticCommonPara(primitive); if (param == nullptr) { MS_LOG(ERROR) << "PopulateArithmeticCommonPara failed."; @@ -54,20 +42,20 @@ OpParameter *PopulateArithmetic(const mindspore::lite::PrimitiveC *primitive) { return reinterpret_cast<OpParameter *>(param); } -Registry RealDivParameterRegistry(schema::PrimitiveType_RealDiv, PopulateArithmetic); -Registry LogicalAndParameterRegistry(schema::PrimitiveType_LogicalAnd, PopulateArithmetic); -Registry ParameterRegistry(schema::PrimitiveType_LogicalOr, PopulateArithmetic); -Registry EqualParameterRegistry(schema::PrimitiveType_Equal, PopulateArithmetic); -Registry NotEqualParameterRegistry(schema::PrimitiveType_NotEqual, PopulateArithmetic); -Registry LessParameterRegistry(schema::PrimitiveType_Less, PopulateArithmetic); -Registry LessEqualParameterRegistry(schema::PrimitiveType_LessEqual, PopulateArithmetic); -Registry GreaterParameterRegistry(schema::PrimitiveType_Greater, PopulateArithmetic); -Registry GreaterEqualParameterRegistry(schema::PrimitiveType_GreaterEqual, PopulateArithmetic); -Registry MaximumParameterRegistry(schema::PrimitiveType_Maximum, PopulateArithmetic); -Registry MinimumParameterRegistry(schema::PrimitiveType_Minimum, PopulateArithmetic); -Registry FloorDivParameterRegistry(schema::PrimitiveType_FloorDiv, PopulateArithmetic); -Registry FloorModParameterRegistry(schema::PrimitiveType_FloorMod, PopulateArithmetic); -Registry ModParameterRegistry(schema::PrimitiveType_Mod, PopulateArithmetic); -Registry SquaredDifferenceParameterRegistry(schema::PrimitiveType_SquaredDifference, PopulateArithmetic); +Registry g_realDivParameterRegistry(schema::PrimitiveType_RealDiv, PopulateArithmetic, SCHEMA_CUR); +Registry g_ogicalAndParameterRegistry(schema::PrimitiveType_LogicalAnd, PopulateArithmetic, SCHEMA_CUR); +Registry g_parameterRegistry(schema::PrimitiveType_LogicalOr, PopulateArithmetic, SCHEMA_CUR); +Registry g_equalParameterRegistry(schema::PrimitiveType_Equal, PopulateArithmetic, SCHEMA_CUR); +Registry g_notEqualParameterRegistry(schema::PrimitiveType_NotEqual, PopulateArithmetic, SCHEMA_CUR); +Registry g_essParameterRegistry(schema::PrimitiveType_Less, PopulateArithmetic, SCHEMA_CUR); +Registry g_lessEqualParameterRegistry(schema::PrimitiveType_LessEqual, PopulateArithmetic, SCHEMA_CUR); +Registry g_greaterParameterRegistry(schema::PrimitiveType_Greater, PopulateArithmetic, SCHEMA_CUR); +Registry g_greaterEqualParameterRegistry(schema::PrimitiveType_GreaterEqual, PopulateArithmetic, SCHEMA_CUR); +Registry g_maximumParameterRegistry(schema::PrimitiveType_Maximum, PopulateArithmetic, SCHEMA_CUR); +Registry g_minimumParameterRegistry(schema::PrimitiveType_Minimum, PopulateArithmetic, SCHEMA_CUR); +Registry g_floorDivParameterRegistry(schema::PrimitiveType_FloorDiv, PopulateArithmetic, SCHEMA_CUR); +Registry g_floorModParameterRegistry(schema::PrimitiveType_FloorMod, PopulateArithmetic, SCHEMA_CUR); +Registry g_modParameterRegistry(schema::PrimitiveType_Mod, PopulateArithmetic, SCHEMA_CUR); +Registry g_squaredDifferenceParameterRegistry(schema::PrimitiveType_SquaredDifference, PopulateArithmetic, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/arithmetic_populate.h b/mindspore/lite/src/ops/populate/arithmetic_populate.h index 1112919aba..afce903883 100644 --- a/mindspore/lite/src/ops/populate/arithmetic_populate.h +++ b/mindspore/lite/src/ops/populate/arithmetic_populate.h @@ -16,14 +16,12 @@ #ifndef MINDSPORE_LITE_SRC_OPS_POPULATE_ARITHMETIC_POPULATE_H_ #define MINDSPORE_LITE_SRC_OPS_POPULATE_ARITHMETIC_POPULATE_H_ -#include "src/ops/arithmetic.h" +#include "nnacl/arithmetic.h" namespace mindspore { namespace lite { - -ArithmeticParameter *PopulateArithmeticCommonPara(const mindspore::lite::PrimitiveC *primitive); -OpParameter *PopulateArithmetic(const mindspore::lite::PrimitiveC *primitive); - +ArithmeticParameter *PopulateArithmeticCommonPara(const void *primitive); +OpParameter *PopulateArithmetic(const void *primitive); } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_SRC_OPS_POPULATE_ARITHMETIC_POPULATE_H_ diff --git a/mindspore/lite/src/ops/populate/arithmetic_self_populate.cc b/mindspore/lite/src/ops/populate/arithmetic_self_populate.cc index 2d1cc864a7..99a47ebaaf 100644 --- a/mindspore/lite/src/ops/populate/arithmetic_self_populate.cc +++ b/mindspore/lite/src/ops/populate/arithmetic_self_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,15 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/arithmetic_self.h" #include "src/common/log_adapter.h" -#include "src/ops/primitive_c.h" +#include "nnacl/arithmetic_self_parameter.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateArithmeticSelf(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateArithmeticSelf(const void *prim) { ArithmeticSelfParameter *arithmetic_self_param = reinterpret_cast<ArithmeticSelfParameter *>(malloc(sizeof(ArithmeticSelfParameter))); if (arithmetic_self_param == nullptr) { @@ -29,26 +27,25 @@ OpParameter *PopulateArithmeticSelf(const mindspore::lite::PrimitiveC *primitive return nullptr; } memset(arithmetic_self_param, 0, sizeof(ArithmeticSelfParameter)); - arithmetic_self_param->op_parameter_.type_ = primitive->Type(); + const schema::Primitive *primitive = static_cast<const schema::Primitive *>(prim); + arithmetic_self_param->op_parameter_.type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(arithmetic_self_param); } -Registry AbsParameterRegistry(schema::PrimitiveType_Abs, PopulateArithmeticSelf); -Registry CosParameterRegistry(schema::PrimitiveType_Cos, PopulateArithmeticSelf); -Registry SinParameterRegistry(schema::PrimitiveType_Sin, PopulateArithmeticSelf); -Registry LogParameterRegistry(schema::PrimitiveType_Log, PopulateArithmeticSelf); -Registry NegParameterRegistry(schema::PrimitiveType_Neg, PopulateArithmeticSelf); -Registry NegGradParameterRegistry(schema::PrimitiveType_NegGrad, PopulateArithmeticSelf); -Registry LogGradParameterRegistry(schema::PrimitiveType_LogGrad, PopulateArithmeticSelf); -Registry AbsGradParameterRegistry(schema::PrimitiveType_AbsGrad, PopulateArithmeticSelf); -Registry SqrtParameterRegistry(schema::PrimitiveType_Sqrt, PopulateArithmeticSelf); -Registry SquareParameterRegistry(schema::PrimitiveType_Square, PopulateArithmeticSelf); -Registry RsqrtParameterRegistry(schema::PrimitiveType_Rsqrt, PopulateArithmeticSelf); -Registry LogicalNotParameterRegistry(schema::PrimitiveType_LogicalNot, PopulateArithmeticSelf); -Registry FloorParameterRegistry(schema::PrimitiveType_Floor, PopulateArithmeticSelf); -Registry CeilParameterRegistry(schema::PrimitiveType_Ceil, PopulateArithmeticSelf); -Registry RoundParameterRegistry(schema::PrimitiveType_Round, PopulateArithmeticSelf); -Registry ReciprocalParameterRegistry(schema::PrimitiveType_Reciprocal, PopulateArithmeticSelf); - +Registry g_absParameterRegistry(schema::PrimitiveType_Abs, PopulateArithmeticSelf, SCHEMA_CUR); +Registry g_cosParameterRegistry(schema::PrimitiveType_Cos, PopulateArithmeticSelf, SCHEMA_CUR); +Registry g_sinParameterRegistry(schema::PrimitiveType_Sin, PopulateArithmeticSelf, SCHEMA_CUR); +Registry g_logParameterRegistry(schema::PrimitiveType_Log, PopulateArithmeticSelf, SCHEMA_CUR); +Registry g_negParameterRegistry(schema::PrimitiveType_Neg, PopulateArithmeticSelf, SCHEMA_CUR); +Registry g_negGradParameterRegistry(schema::PrimitiveType_NegGrad, PopulateArithmeticSelf, SCHEMA_CUR); +Registry g_logGradParameterRegistry(schema::PrimitiveType_LogGrad, PopulateArithmeticSelf, SCHEMA_CUR); +Registry g_sqrtParameterRegistry(schema::PrimitiveType_Sqrt, PopulateArithmeticSelf, SCHEMA_CUR); +Registry g_squareParameterRegistry(schema::PrimitiveType_Square, PopulateArithmeticSelf, SCHEMA_CUR); +Registry g_rsqrtParameterRegistry(schema::PrimitiveType_Rsqrt, PopulateArithmeticSelf, SCHEMA_CUR); +Registry g_logicalNotParameterRegistry(schema::PrimitiveType_LogicalNot, PopulateArithmeticSelf, SCHEMA_CUR); +Registry g_floorParameterRegistry(schema::PrimitiveType_Floor, PopulateArithmeticSelf, SCHEMA_CUR); +Registry g_ceilParameterRegistry(schema::PrimitiveType_Ceil, PopulateArithmeticSelf, SCHEMA_CUR); +Registry g_roundParameterRegistry(schema::PrimitiveType_Round, PopulateArithmeticSelf, SCHEMA_CUR); +Registry g_reciprocalParameterRegistry(schema::PrimitiveType_Reciprocal, PopulateArithmeticSelf, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/assert_populate.cc b/mindspore/lite/src/ops/populate/assert_populate.cc index ef0f4e0b02..3a83b0f714 100644 --- a/mindspore/lite/src/ops/populate/assert_populate.cc +++ b/mindspore/lite/src/ops/populate/assert_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,24 +13,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/assert_op.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateAssertParameter(const mindspore::lite::PrimitiveC *primitive) { + +OpParameter *PopulateAssertParameter(const void *prim) { OpParameter *assert_parameter = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (assert_parameter == nullptr) { MS_LOG(ERROR) << "malloc AssertParameter failed."; return nullptr; } memset(assert_parameter, 0, sizeof(OpParameter)); - assert_parameter->type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + assert_parameter->type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(assert_parameter); } -Registry AssertParameterRegistry(schema::PrimitiveType_Assert, PopulateAssertParameter); +Registry AssertParameterRegistry(schema::PrimitiveType_Assert, PopulateAssertParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/assign_add_populate.cc b/mindspore/lite/src/ops/populate/assign_add_populate.cc index 7169e07b24..3e601f6fb6 100644 --- a/mindspore/lite/src/ops/populate/assign_add_populate.cc +++ b/mindspore/lite/src/ops/populate/assign_add_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,24 +13,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/assign_add.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateAssignAddParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateAssignAddParameter(const void *prim) { OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (param == nullptr) { MS_LOG(ERROR) << "malloc AssignAdd Parameter failed."; return nullptr; } memset(param, 0, sizeof(OpParameter)); - param->type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + param->type_ = primitive->value_type(); return param; } -Registry AssignAddParameterRegistry(schema::PrimitiveType_AssignAdd, PopulateAssignAddParameter); +Registry AssignAddParameterRegistry(schema::PrimitiveType_AssignAdd, PopulateAssignAddParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/assign_populate.cc b/mindspore/lite/src/ops/populate/assign_populate.cc index 86710bfd44..191897af93 100644 --- a/mindspore/lite/src/ops/populate/assign_populate.cc +++ b/mindspore/lite/src/ops/populate/assign_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,24 +13,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/assign.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateAssignParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateAssignParameter(const void *prim) { OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (param == nullptr) { MS_LOG(ERROR) << "malloc Assign Parameter failed."; return nullptr; } memset(param, 0, sizeof(OpParameter)); - param->type_ = primitive->Type(); + + auto primitive = static_cast<const schema::Primitive *>(prim); + param->type_ = primitive->value_type(); return param; } -Registry AssignParameterRegistry(schema::PrimitiveType_Assign, PopulateAssignParameter); +Registry AssignParameterRegistry(schema::PrimitiveType_Assign, PopulateAssignParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/audio_spectrogram_populate.cc b/mindspore/lite/src/ops/populate/audio_spectrogram_populate.cc new file mode 100644 index 0000000000..3c88e961c2 --- /dev/null +++ b/mindspore/lite/src/ops/populate/audio_spectrogram_populate.cc @@ -0,0 +1,42 @@ +/** + * Copyright 2019-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/populate_register.h" +#include "nnacl/infer/audio_spectrogram_infer.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateAudioSpectrogramParameter(const void *prim) { + AudioSpectrogramParameter *arg_param = + reinterpret_cast<AudioSpectrogramParameter *>(malloc(sizeof(AudioSpectrogramParameter))); + if (arg_param == nullptr) { + MS_LOG(ERROR) << "malloc AudioSpectrogramParameter failed."; + return nullptr; + } + memset(arg_param, 0, sizeof(AudioSpectrogramParameter)); + auto *primitive = static_cast<const schema::Primitive *>(prim); + arg_param->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_AudioSpectrogram(); + arg_param->window_size_ = param->window_size(); + arg_param->stride_ = param->stride(); + return reinterpret_cast<OpParameter *>(arg_param); +} +} // namespace + +Registry g_audioSpectrogramParameterRegistry(schema::PrimitiveType_AudioSpectrogram, PopulateAudioSpectrogramParameter, + SCHEMA_CUR); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/batch_norm_populate.cc b/mindspore/lite/src/ops/populate/batch_norm_populate.cc index 3561572f49..77eb009a2e 100644 --- a/mindspore/lite/src/ops/populate/batch_norm_populate.cc +++ b/mindspore/lite/src/ops/populate/batch_norm_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,30 +13,29 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/batch_norm.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/batchnorm_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateBatchNorm(const mindspore::lite::PrimitiveC *primitive) { - const auto param = - reinterpret_cast<mindspore::lite::BatchNorm *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); +namespace { +OpParameter *PopulateBatchNorm(const void *prim) { BatchNormParameter *batch_norm_param = reinterpret_cast<BatchNormParameter *>(malloc(sizeof(BatchNormParameter))); if (batch_norm_param == nullptr) { MS_LOG(ERROR) << "malloc BatchNormParameter failed."; return nullptr; } memset(batch_norm_param, 0, sizeof(BatchNormParameter)); - batch_norm_param->op_parameter_.type_ = primitive->Type(); - batch_norm_param->epsilon_ = param->GetEpsilon(); + const schema::Primitive *primitive = static_cast<const schema::Primitive *>(prim); + batch_norm_param->op_parameter_.type_ = primitive->value_type(); + auto prim_batchnorm = primitive->value_as_BatchNorm(); + batch_norm_param->epsilon_ = prim_batchnorm->epsilon(); batch_norm_param->fused_ = false; return reinterpret_cast<OpParameter *>(batch_norm_param); } +} // namespace -Registry BatchNormParameterRegistry(schema::PrimitiveType_BatchNorm, PopulateBatchNorm); +Registry g_batchNormParameterRegistry(schema::PrimitiveType_BatchNorm, PopulateBatchNorm, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/batch_to_space_populate.cc b/mindspore/lite/src/ops/populate/batch_to_space_populate.cc index a3ae90ac9b..f39744918f 100644 --- a/mindspore/lite/src/ops/populate/batch_to_space_populate.cc +++ b/mindspore/lite/src/ops/populate/batch_to_space_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,17 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/batch_to_space.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/batch_to_space.h" namespace mindspore { namespace lite { -OpParameter *PopulateBatchToSpaceParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateBatchToSpaceParameter(const void *prim) { BatchToSpaceParameter *batch_space_param = reinterpret_cast<BatchToSpaceParameter *>(malloc(sizeof(BatchToSpaceParameter))); if (batch_space_param == nullptr) { @@ -31,21 +27,25 @@ OpParameter *PopulateBatchToSpaceParameter(const mindspore::lite::PrimitiveC *pr return nullptr; } memset(batch_space_param, 0, sizeof(BatchToSpaceParameter)); - batch_space_param->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::BatchToSpace *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - auto block_shape = param->GetBlockShape(); - if (block_shape.empty()) { + const schema::Primitive *primitive = static_cast<const schema::Primitive *>(prim); + batch_space_param->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_BatchToSpace(); + if (param->block_size() == nullptr) { return reinterpret_cast<OpParameter *>(batch_space_param); } + auto block_shape = std::vector<int64_t>(param->block_size()->begin(), param->block_size()->end()); if (block_shape.size() != BATCH_TO_SPACE_BLOCK_SHAPE_SIZE) { MS_LOG(ERROR) << "batch_to_space blockShape size should be " << BATCH_TO_SPACE_BLOCK_SHAPE_SIZE; free(batch_space_param); return nullptr; } - auto crops = param->GetCrops(); - if (crops.empty()) { - return reinterpret_cast<OpParameter *>(batch_space_param); + auto fb_crops = param->crops()->data(); + std::vector<int64_t> crops; + for (auto iter = fb_crops->begin(); iter != fb_crops->end(); ++iter) { + auto crops_data = (*iter)->data(); + auto crops_vec = std::vector<int64_t>(crops_data->begin(), crops_data->end()); + crops.insert(crops.end(), crops_vec.begin(), crops_vec.end()); } if (crops.size() != COMM_SHAPE_SIZE) { MS_LOG(ERROR) << "batch_to_space crops size should be " << COMM_SHAPE_SIZE; @@ -54,19 +54,16 @@ OpParameter *PopulateBatchToSpaceParameter(const mindspore::lite::PrimitiveC *pr } for (int i = 0; i < BATCH_TO_SPACE_BLOCK_SHAPE_SIZE; ++i) { - batch_space_param->block_shape_[i] = block_shape[i]; + batch_space_param->block_shape_[i] = static_cast<int>(block_shape[i]); } - batch_space_param->no_crop_ = true; for (int i = 0; i < COMM_SHAPE_SIZE; ++i) { - batch_space_param->crops_[i] = crops[i]; - if (batch_space_param->crops_[i] != 0) { - batch_space_param->no_crop_ = false; - } + batch_space_param->crops_[i] = static_cast<int>(crops[i]); } return reinterpret_cast<OpParameter *>(batch_space_param); } -Registry BatchToSpaceParameterRegistry(schema::PrimitiveType_BatchToSpace, PopulateBatchToSpaceParameter); -Registry BatchToSpaceNDParameterRegistry(schema::PrimitiveType_BatchToSpaceND, PopulateBatchToSpaceParameter); +} // namespace +Registry g_batchToSpaceRegistry(schema::PrimitiveType_BatchToSpace, PopulateBatchToSpaceParameter, SCHEMA_CUR); +Registry g_batchToSpaceNDRegistry(schema::PrimitiveType_BatchToSpaceND, PopulateBatchToSpaceParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/bias_add_populate.cc b/mindspore/lite/src/ops/populate/bias_add_populate.cc index f4875a5fd4..b58b222845 100644 --- a/mindspore/lite/src/ops/populate/bias_add_populate.cc +++ b/mindspore/lite/src/ops/populate/bias_add_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,25 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/arithmetic.h" namespace mindspore { namespace lite { -OpParameter *PopulateBiasAddParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateBiasAddParameter(const void *prim) { ArithmeticParameter *arithmetic_param = reinterpret_cast<ArithmeticParameter *>(malloc(sizeof(ArithmeticParameter))); if (arithmetic_param == nullptr) { MS_LOG(ERROR) << "malloc ArithmeticParameter failed."; return nullptr; } memset(arithmetic_param, 0, sizeof(ArithmeticParameter)); - arithmetic_param->op_parameter_.type_ = primitive->Type(); + const schema::Primitive *primitive = static_cast<const schema::Primitive *>(prim); + arithmetic_param->op_parameter_.type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(arithmetic_param); } -Registry BiasAddParameterRegistry(schema::PrimitiveType_BiasAdd, PopulateBiasAddParameter); +} // namespace +Registry g_biasAddParameterRegistry(schema::PrimitiveType_BiasAdd, PopulateBiasAddParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/bias_grad_populate.cc b/mindspore/lite/src/ops/populate/bias_grad_populate.cc index 0bb338d3e3..72cb7a9d82 100644 --- a/mindspore/lite/src/ops/populate/bias_grad_populate.cc +++ b/mindspore/lite/src/ops/populate/bias_grad_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,25 +13,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/arithmetic.h" namespace mindspore { namespace lite { -OpParameter *PopulateBiasGradParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateBiasAddGradParameter(const void *prim) { ArithmeticParameter *arithmetic_param = reinterpret_cast<ArithmeticParameter *>(malloc(sizeof(ArithmeticParameter))); if (arithmetic_param == nullptr) { MS_LOG(ERROR) << "malloc ArithmeticParameter failed."; return nullptr; } memset(arithmetic_param, 0, sizeof(ArithmeticParameter)); - arithmetic_param->op_parameter_.type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + arithmetic_param->op_parameter_.type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(arithmetic_param); } -Registry PopulateBiasGradParameterParameterRegistry(schema::PrimitiveType_BiasGrad, PopulateBiasGradParameter); +} // namespace +Registry g_populateBiasGradParameterParameterRegistry(schema::PrimitiveType_BiasAddGrad, PopulateBiasAddGradParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/binary_cross_entropy_grad_populate.cc b/mindspore/lite/src/ops/populate/binary_cross_entropy_grad_populate.cc index 0087432b08..910b055460 100644 --- a/mindspore/lite/src/ops/populate/binary_cross_entropy_grad_populate.cc +++ b/mindspore/lite/src/ops/populate/binary_cross_entropy_grad_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,15 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/binary_cross_entropy_grad.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32_grad/binary_cross_entropy_grad.h" namespace mindspore { namespace lite { -OpParameter *PopulateBinaryCrossEntropyGradParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateBinaryCrossEntropyGradParameter(const void *prim) { BinaryCrossEntropyGradParameter *bce_param = reinterpret_cast<BinaryCrossEntropyGradParameter *>(malloc(sizeof(BinaryCrossEntropyGradParameter))); if (bce_param == nullptr) { @@ -29,14 +27,15 @@ OpParameter *PopulateBinaryCrossEntropyGradParameter(const mindspore::lite::Prim return nullptr; } memset(bce_param, 0, sizeof(BinaryCrossEntropyGradParameter)); - bce_param->op_parameter_.type_ = primitive->Type(); - auto param = - reinterpret_cast<mindspore::lite::BinaryCrossEntropyGrad *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - bce_param->reduction = param->GetReduction(); + auto *primitive = static_cast<const schema::Primitive *>(prim); + bce_param->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_BinaryCrossEntropyGrad(); + bce_param->reduction = param->reduction(); return reinterpret_cast<OpParameter *>(bce_param); } +} // namespace -Registry BinaryCrossEntropyGradParameterRegistry(schema::PrimitiveType_BinaryCrossEntropyGrad, - PopulateBinaryCrossEntropyGradParameter); +Registry g_binaryCrossEntropyGradParameterRegistry(schema::PrimitiveType_BinaryCrossEntropyGrad, + PopulateBinaryCrossEntropyGradParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/binary_cross_entropy_populate.cc b/mindspore/lite/src/ops/populate/binary_cross_entropy_populate.cc index 1e150a21fa..2ade2b29fc 100644 --- a/mindspore/lite/src/ops/populate/binary_cross_entropy_populate.cc +++ b/mindspore/lite/src/ops/populate/binary_cross_entropy_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,15 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/binary_cross_entropy.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32_grad/binary_cross_entropy.h" namespace mindspore { namespace lite { -OpParameter *PopulateBinaryCrossEntropyParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateBinaryCrossEntropyParameter(const void *prim) { BinaryCrossEntropyParameter *bce_param = reinterpret_cast<BinaryCrossEntropyParameter *>(malloc(sizeof(BinaryCrossEntropyParameter))); if (bce_param == nullptr) { @@ -29,14 +26,14 @@ OpParameter *PopulateBinaryCrossEntropyParameter(const mindspore::lite::Primitiv return nullptr; } memset(bce_param, 0, sizeof(BinaryCrossEntropyParameter)); - bce_param->op_parameter_.type_ = primitive->Type(); - auto param = - reinterpret_cast<mindspore::lite::BinaryCrossEntropy *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - bce_param->reduction = param->GetReduction(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_BinaryCrossEntropy(); + bce_param->op_parameter_.type_ = primitive->value_type(); + bce_param->reduction = value->reduction(); return reinterpret_cast<OpParameter *>(bce_param); } Registry BinaryCrossEntropyParameterRegistry(schema::PrimitiveType_BinaryCrossEntropy, - PopulateBinaryCrossEntropyParameter); + PopulateBinaryCrossEntropyParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/broadcast_to_populate.cc b/mindspore/lite/src/ops/populate/broadcast_to_populate.cc index b73188cbff..0bc8dc4203 100644 --- a/mindspore/lite/src/ops/populate/broadcast_to_populate.cc +++ b/mindspore/lite/src/ops/populate/broadcast_to_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,15 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/broadcast_to.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/broadcast_to_fp32.h" namespace mindspore { namespace lite { -OpParameter *PopulateBroadcastToParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateBroadcastToParameter(const void *prim) { BroadcastToParameter *broadcast_param = reinterpret_cast<BroadcastToParameter *>(malloc(sizeof(BroadcastToParameter))); if (broadcast_param == nullptr) { @@ -29,16 +26,17 @@ OpParameter *PopulateBroadcastToParameter(const mindspore::lite::PrimitiveC *pri return nullptr; } memset(broadcast_param, 0, sizeof(BroadcastToParameter)); - auto param = reinterpret_cast<mindspore::lite::BroadcastTo *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - broadcast_param->op_parameter_.type_ = primitive->Type(); - auto dst_shape = param->GetDstShape(); - broadcast_param->shape_size_ = dst_shape.size(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_BroadcastTo(); + broadcast_param->op_parameter_.type_ = primitive->value_type(); + auto dst_shape = value->shape(); + broadcast_param->shape_size_ = dst_shape->size(); for (size_t i = 0; i < broadcast_param->shape_size_; ++i) { - broadcast_param->shape_[i] = dst_shape[i]; + broadcast_param->shape_[i] = dst_shape->Get(i); } return reinterpret_cast<OpParameter *>(broadcast_param); } -Registry BroadcastToParameterRegistry(schema::PrimitiveType_BroadcastTo, PopulateBroadcastToParameter); +Registry BroadcastToParameterRegistry(schema::PrimitiveType_BroadcastTo, PopulateBroadcastToParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/cast_populate.cc b/mindspore/lite/src/ops/populate/cast_populate.cc index 3543a2a53c..d30859aa66 100644 --- a/mindspore/lite/src/ops/populate/cast_populate.cc +++ b/mindspore/lite/src/ops/populate/cast_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,30 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/cast.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" -#include "nnacl/cast_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateCastParameter(const mindspore::lite::PrimitiveC *primitive) { - CastParameter *cast_param = reinterpret_cast<CastParameter *>(malloc(sizeof(CastParameter))); +namespace { +OpParameter *PopulateCastParameter(const void *prim) { + OpParameter *cast_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (cast_param == nullptr) { MS_LOG(ERROR) << "malloc CastParameter failed."; return nullptr; } - memset(cast_param, 0, sizeof(CastParameter)); - cast_param->op_parameter_.type_ = primitive->Type(); - - auto param = reinterpret_cast<mindspore::lite::Cast *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - cast_param->src_type_ = param->GetSrcT(); - cast_param->dst_type_ = param->GetDstT(); - + memset(cast_param, 0, sizeof(OpParameter)); + auto *primitive = static_cast<const schema::Primitive *>(prim); + cast_param->type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(cast_param); } +} // namespace -Registry CastParameterRegistry(schema::PrimitiveType_Cast, PopulateCastParameter); +Registry g_castParameterRegistry(schema::PrimitiveType_Cast, PopulateCastParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/clip_populate.cc b/mindspore/lite/src/ops/populate/clip_populate.cc new file mode 100644 index 0000000000..6a2f2e2078 --- /dev/null +++ b/mindspore/lite/src/ops/populate/clip_populate.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2019-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateClipParameter(const void *prim) { + OpParameter *act_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (act_param == nullptr) { + MS_LOG(ERROR) << "malloc ClipParameter failed."; + return nullptr; + } + memset(act_param, 0, sizeof(OpParameter)); + auto primitive = static_cast<const schema::Primitive *>(prim); + act_param->type_ = primitive->value_type(); + return reinterpret_cast<OpParameter *>(act_param); +} +} // namespace + +Registry g_clipParameterRegistry(schema::PrimitiveType_Clip, PopulateClipParameter, SCHEMA_CUR); + +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/common_populate.cc b/mindspore/lite/src/ops/populate/common_populate.cc index 3d7fe2ef97..0ddf8b0a3a 100644 --- a/mindspore/lite/src/ops/populate/common_populate.cc +++ b/mindspore/lite/src/ops/populate/common_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,26 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { - -OpParameter *PopulateCommonParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateCommonParameter(const void *prim) { auto *common_parameter = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (common_parameter == nullptr) { MS_LOG(ERROR) << "malloc OpParameter failed."; return nullptr; } memset(common_parameter, 0, sizeof(OpParameter)); + auto primitive = static_cast<const schema::Primitive *>(prim); + common_parameter->type_ = primitive->value_type(); return common_parameter; } +} // namespace -Registry ZerosLikeParameterRegistry(schema::PrimitiveType_ZerosLike, PopulateCommonParameter); -Registry SizeParameterRegistry(schema::PrimitiveType_Size, PopulateCommonParameter); -Registry InvertPermutationParameterRegistry(schema::PrimitiveType_InvertPermutation, PopulateCommonParameter); - +Registry g_zerosLikeParameterRegistry(schema::PrimitiveType_ZerosLike, PopulateCommonParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/concat_populate.cc b/mindspore/lite/src/ops/populate/concat_populate.cc index e9be7786c9..10d0451675 100644 --- a/mindspore/lite/src/ops/populate/concat_populate.cc +++ b/mindspore/lite/src/ops/populate/concat_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,29 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/concat.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/concat_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateConcatParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateConcatParameter(const void *prim) { ConcatParameter *concat_param = reinterpret_cast<ConcatParameter *>(malloc(sizeof(ConcatParameter))); if (concat_param == nullptr) { MS_LOG(ERROR) << "malloc ConcatParameter failed."; return nullptr; } memset(concat_param, 0, sizeof(ConcatParameter)); - concat_param->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::Concat *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - concat_param->axis_ = param->GetAxis(); + const schema::Primitive *primitive = static_cast<const schema::Primitive *>(prim); + concat_param->op_parameter_.type_ = primitive->value_type(); + concat_param->axis_ = static_cast<int>(primitive->value_as_Concat()->axis()); return reinterpret_cast<OpParameter *>(concat_param); } +} // namespace -Registry ConcatParameterRegistry(schema::PrimitiveType_Concat, PopulateConcatParameter); +Registry g_concatParameterRegistry(schema::PrimitiveType_Concat, PopulateConcatParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/constant_of_shape_populate.cc b/mindspore/lite/src/ops/populate/constant_of_shape_populate.cc index 4a04ef0812..89997653c4 100644 --- a/mindspore/lite/src/ops/populate/constant_of_shape_populate.cc +++ b/mindspore/lite/src/ops/populate/constant_of_shape_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,19 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/constant_of_shape.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/constant_of_shape.h" namespace mindspore::lite { namespace { -OpParameter *PopulateConstantOfShapeParameter(const mindspore::lite::PrimitiveC *primitive) { - auto attr = - reinterpret_cast<mindspore::lite::ConstantOfShape *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); +OpParameter *PopulateConstantOfShapeParameter(const void *prim) { ConstantOfShapeParameter *param = reinterpret_cast<ConstantOfShapeParameter *>(malloc(sizeof(ConstantOfShapeParameter))); if (param == nullptr) { @@ -33,25 +26,28 @@ OpParameter *PopulateConstantOfShapeParameter(const mindspore::lite::PrimitiveC return nullptr; } memset(param, 0, sizeof(ConstantOfShapeParameter)); - param->op_parameter_.type_ = primitive->Type(); - param->data_type_ = attr->GetDataType(); - auto value = attr->GetValue(); + auto primitive = static_cast<const schema::Primitive *>(prim); + param->op_parameter_.type_ = primitive->value_type(); + auto attr = primitive->value_as_ConstantOfShape(); + auto value = std::vector<float>(attr->value()->begin(), attr->value()->end()); + param->data_type_ = static_cast<int>(attr->data_type()); if (value.empty() || value.size() > 1) { MS_LOG(ERROR) << "The value of constant of shape is empty or more than 1."; } else { switch (param->data_type_) { case kNumberTypeFloat32: - param->value_.f32_value_ = attr->GetValue().at(0); + param->value_.f32_value_ = *(attr->value()->begin()); break; case kNumberTypeInt32: - param->value_.int32_value_ = attr->GetValue().at(0); + param->value_.int32_value_ = *(attr->value()->begin()); break; default: MS_LOG(ERROR) << "The value of constant of shape is invalid"; } } return reinterpret_cast<OpParameter *>(param); -} -Registry ConstantOfShapeParameterRegistry(schema::PrimitiveType_ConstantOfShape, PopulateConstantOfShapeParameter); +} // namespace +Registry g_constantOfShapeParameterRegistry(schema::PrimitiveType_ConstantOfShape, PopulateConstantOfShapeParameter, + SCHEMA_CUR); } // namespace } // namespace mindspore::lite diff --git a/mindspore/lite/src/ops/populate/conv2d_populate.cc b/mindspore/lite/src/ops/populate/conv2d_populate.cc index 35f46c0288..4500cabbfb 100644 --- a/mindspore/lite/src/ops/populate/conv2d_populate.cc +++ b/mindspore/lite/src/ops/populate/conv2d_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,53 +14,53 @@ * limitations under the License. */ -#include "src/ops/conv2d.h" -#include "src/common/log_adapter.h" #include "nnacl/conv_parameter.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateConvParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateConvParameter(const void *prim) { ConvParameter *conv_param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); if (conv_param == nullptr) { MS_LOG(ERROR) << "malloc ConvParameter failed."; return nullptr; } memset(conv_param, 0, sizeof(ConvParameter)); - conv_param->op_parameter_.type_ = primitive->Type(); - auto conv_primitive = - reinterpret_cast<mindspore::lite::Conv2D *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - conv_param->kernel_h_ = conv_primitive->GetKernelH(); - conv_param->kernel_w_ = conv_primitive->GetKernelW(); - conv_param->group_ = conv_primitive->GetGroup(); - conv_param->stride_h_ = conv_primitive->GetStrideH(); - conv_param->stride_w_ = conv_primitive->GetStrideW(); - - auto conv2d_lite_primitive = (lite::Conv2D *)primitive; - conv_param->pad_u_ = conv2d_lite_primitive->PadUp(); - conv_param->pad_d_ = conv2d_lite_primitive->PadDown(); - conv_param->pad_l_ = conv2d_lite_primitive->PadLeft(); - conv_param->pad_r_ = conv2d_lite_primitive->PadRight(); - conv_param->dilation_h_ = conv_primitive->GetDilateH(); - conv_param->dilation_w_ = conv_primitive->GetDilateW(); - conv_param->input_channel_ = conv_primitive->GetChannelIn(); - conv_param->output_channel_ = conv_primitive->GetChannelOut(); - conv_param->group_ = conv_primitive->GetGroup(); - auto pad_mode = conv_primitive->GetPadMode(); - switch (pad_mode) { - case schema::PadMode_SAME_UPPER: - conv_param->pad_mode_ = Pad_Same; + auto primitive = static_cast<const schema::Primitive *>(prim); + conv_param->op_parameter_.type_ = primitive->value_type(); + auto conv_primitive = primitive->value_as_Conv2DFusion(); + conv_param->kernel_h_ = static_cast<int>(*(conv_primitive->kernel_size()->begin())); + conv_param->kernel_w_ = static_cast<int>(*(conv_primitive->kernel_size()->begin() + 1)); + conv_param->group_ = static_cast<int>(conv_primitive->group()); + conv_param->stride_h_ = static_cast<int>(*(conv_primitive->stride()->begin())); + conv_param->stride_w_ = static_cast<int>(*(conv_primitive->stride()->begin() + 1)); + switch (conv_primitive->pad_mode()) { + case schema::PadMode_SAME: + conv_param->pad_mode_ = Pad_same; break; case schema::PadMode_VALID: - conv_param->pad_mode_ = Pad_Valid; + conv_param->pad_mode_ = Pad_valid; break; default: - conv_param->pad_mode_ = Pad_No; - break; + conv_param->pad_mode_ = Pad_pad; } - auto act_type = conv_primitive->GetActivationType(); + if (conv_primitive->pad_list() == nullptr || conv_primitive->pad_list()->size() < 4) { + conv_param->pad_u_ = 0; + conv_param->pad_d_ = 0; + conv_param->pad_l_ = 0; + conv_param->pad_r_ = 0; + } else { + conv_param->pad_u_ = static_cast<int>(*(conv_primitive->pad_list()->begin())); + conv_param->pad_d_ = static_cast<int>(*(conv_primitive->pad_list()->begin() + 1)); + conv_param->pad_l_ = static_cast<int>(*(conv_primitive->pad_list()->begin() + 2)); + conv_param->pad_r_ = static_cast<int>(*(conv_primitive->pad_list()->begin() + 3)); + } + conv_param->dilation_h_ = static_cast<int>(*(conv_primitive->dilation()->begin())); + conv_param->dilation_w_ = static_cast<int>(*(conv_primitive->dilation()->begin() + 1)); + conv_param->input_channel_ = static_cast<int>(conv_primitive->in_channel()); + conv_param->output_channel_ = static_cast<int>(conv_primitive->out_channel()); + auto act_type = conv_primitive->activation_type(); switch (act_type) { case schema::ActivationType_RELU: conv_param->act_type_ = ActType_Relu; @@ -70,10 +70,10 @@ OpParameter *PopulateConvParameter(const mindspore::lite::PrimitiveC *primitive) break; default: conv_param->act_type_ = ActType_No; - break; } return reinterpret_cast<OpParameter *>(conv_param); } -Registry Conv2DParameterRegistry(schema::PrimitiveType_Conv2D, PopulateConvParameter); +} // namespace +Registry g_conv2DParameterRegistry(schema::PrimitiveType_Conv2DFusion, PopulateConvParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/crop_and_resize_populate.cc b/mindspore/lite/src/ops/populate/crop_and_resize_populate.cc index e87ef703fa..0908af070b 100644 --- a/mindspore/lite/src/ops/populate/crop_and_resize_populate.cc +++ b/mindspore/lite/src/ops/populate/crop_and_resize_populate.cc @@ -13,14 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/crop_and_resize.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/resize_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateCropAndResizeParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateCropAndResizeParameter(const void *prim) { CropAndResizeParameter *crop_resize_param = reinterpret_cast<CropAndResizeParameter *>(malloc(sizeof(CropAndResizeParameter))); if (crop_resize_param == nullptr) { @@ -28,13 +26,16 @@ OpParameter *PopulateCropAndResizeParameter(const mindspore::lite::PrimitiveC *p return nullptr; } memset(crop_resize_param, 0, sizeof(CropAndResizeParameter)); - crop_resize_param->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::CropAndResize *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - crop_resize_param->method_ = static_cast<int>(param->GetMethod()); - crop_resize_param->extrapolation_value_ = param->GetExtrapolationValue(); + auto primitive = static_cast<const schema::Primitive *>(prim); + crop_resize_param->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_CropAndResize(); + crop_resize_param->method_ = static_cast<int>(param->method()); + crop_resize_param->extrapolation_value_ = param->extrapolation_value(); return reinterpret_cast<OpParameter *>(crop_resize_param); } +} // namespace -Registry CropAndResizeParameterRegistry(schema::PrimitiveType_CropAndResize, PopulateCropAndResizeParameter); +Registry g_cropAndResizeParameterRegistry(schema::PrimitiveType_CropAndResize, PopulateCropAndResizeParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/crop_populate.cc b/mindspore/lite/src/ops/populate/crop_populate.cc index 0898fa283f..bdfb0ed193 100644 --- a/mindspore/lite/src/ops/populate/crop_populate.cc +++ b/mindspore/lite/src/ops/populate/crop_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,20 +13,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/crop.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/crop_parameter.h" namespace mindspore { namespace lite { - -OpParameter *PopulateCropParameter(const mindspore::lite::PrimitiveC *primitive) { - auto param = reinterpret_cast<mindspore::lite::Crop *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - auto param_offset = param->GetOffsets(); - if (param_offset.size() > COMM_SHAPE_SIZE) { - MS_LOG(ERROR) << "crop_param offset size(" << param_offset.size() << ") should <= " << COMM_SHAPE_SIZE; +namespace { +OpParameter *PopulateCropParameter(const void *prim) { + auto primitive = static_cast<const schema::Primitive *>(prim); + auto crop_prim = primitive->value_as_Crop(); + auto param_offset = crop_prim->offsets(); + if (param_offset->size() > COMM_SHAPE_SIZE) { + MS_LOG(ERROR) << "crop_param offset size(" << param_offset->size() << ") should <= " << COMM_SHAPE_SIZE; return nullptr; } CropParameter *crop_param = reinterpret_cast<CropParameter *>(malloc(sizeof(CropParameter))); @@ -35,15 +33,16 @@ OpParameter *PopulateCropParameter(const mindspore::lite::PrimitiveC *primitive) return nullptr; } memset(crop_param, 0, sizeof(CropParameter)); - crop_param->op_parameter_.type_ = primitive->Type(); - crop_param->axis_ = param->GetAxis(); - crop_param->offset_size_ = param_offset.size(); - for (size_t i = 0; i < param_offset.size(); ++i) { - crop_param->offset_[i] = param_offset[i]; + crop_param->op_parameter_.type_ = primitive->value_type(); + crop_param->axis_ = crop_prim->axis(); + crop_param->offset_size_ = param_offset->size(); + for (size_t i = 0; i < param_offset->size(); ++i) { + crop_param->offset_[i] = *(param_offset->begin() + i); } return reinterpret_cast<OpParameter *>(crop_param); } -Registry CropParameterRegistry(schema::PrimitiveType_Crop, PopulateCropParameter); +} // namespace +Registry g_cropParameterRegistry(schema::PrimitiveType_Crop, PopulateCropParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/custom_extract_features_populate.cc b/mindspore/lite/src/ops/populate/custom_extract_features_populate.cc index 9d755dd15c..d7ab450664 100644 --- a/mindspore/lite/src/ops/populate/custom_extract_features_populate.cc +++ b/mindspore/lite/src/ops/populate/custom_extract_features_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,26 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/custom_extract_features.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { - -OpParameter *PopulateExtractFeaturesParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateExtractFeaturesParameter(const void *prim) { OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (param == nullptr) { MS_LOG(ERROR) << "new OpParameter failed."; return nullptr; } memset(param, 0, sizeof(OpParameter)); - param->type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + param->type_ = primitive->value_type(); return param; } -Registry CustomExtractFeaturesParameterRegistry(schema::PrimitiveType_CustomExtractFeatures, - PopulateExtractFeaturesParameter); - +} // namespace +Registry g_customExtractFeaturesParameterRegistry(schema::PrimitiveType_CustomExtractFeatures, + PopulateExtractFeaturesParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/custom_normalize_populate.cc b/mindspore/lite/src/ops/populate/custom_normalize_populate.cc index 2a127670da..94fa7975ea 100644 --- a/mindspore/lite/src/ops/populate/custom_normalize_populate.cc +++ b/mindspore/lite/src/ops/populate/custom_normalize_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,25 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "src/ops/custom_normalize.h" -#include "src/common/string_util.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateCustomNormalizeParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateCustomNormalizeParameter(const void *prim) { OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (param == nullptr) { MS_LOG(ERROR) << "new OpParameter failed."; return nullptr; } memset(param, 0, sizeof(OpParameter)); - param->type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + param->type_ = primitive->value_type(); return param; } -Registry CustomNormalizeParameterRegistry(schema::PrimitiveType_CustomNormalize, PopulateCustomNormalizeParameter); +Registry CustomNormalizeParameterRegistry(schema::PrimitiveType_CustomNormalize, PopulateCustomNormalizeParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/custom_predict_populate.cc b/mindspore/lite/src/ops/populate/custom_predict_populate.cc index bf00613e81..d0084f246c 100644 --- a/mindspore/lite/src/ops/populate/custom_predict_populate.cc +++ b/mindspore/lite/src/ops/populate/custom_predict_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,28 +13,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "src/ops/custom_predict.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/predict_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateCustomPredictParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateCustomPredictParameter(const void *prim) { PredictParameter *param = reinterpret_cast<PredictParameter *>(malloc(sizeof(PredictParameter))); if (param == nullptr) { MS_LOG(ERROR) << "malloc param failed."; return nullptr; } memset(param, 0, sizeof(PredictParameter)); - param->op_parameter_.type_ = primitive->Type(); - auto prim = reinterpret_cast<mindspore::lite::CustomPredict *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - param->output_num = prim->GetOutputNum(); - param->weight_threshold = prim->GetWeightThreshold(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_CustomPredict(); + param->op_parameter_.type_ = primitive->value_type(); + param->output_num = value->output_num(); + param->weight_threshold = value->weight_threshold(); return reinterpret_cast<OpParameter *>(param); } -Registry CustomPredictParameterRegistry(schema::PrimitiveType_CustomPredict, PopulateCustomPredictParameter); +Registry CustomPredictParameterRegistry(schema::PrimitiveType_CustomPredict, PopulateCustomPredictParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/deconv2d_populate.cc b/mindspore/lite/src/ops/populate/deconv2d_populate.cc index 96ffb97e9c..f3ed7da114 100644 --- a/mindspore/lite/src/ops/populate/deconv2d_populate.cc +++ b/mindspore/lite/src/ops/populate/deconv2d_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,41 +13,54 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/deconv2d.h" #include "src/common/log_adapter.h" - -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/conv_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateDeconvParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateDeconvParameter(const void *prim) { ConvParameter *conv_param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); if (conv_param == nullptr) { MS_LOG(ERROR) << "malloc ConvParameter failed."; return nullptr; } memset(conv_param, 0, sizeof(ConvParameter)); - conv_param->op_parameter_.type_ = primitive->Type(); - auto conv_primitive = - reinterpret_cast<mindspore::lite::DeConv2D *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - conv_param->kernel_h_ = conv_primitive->GetKernelH(); - conv_param->kernel_w_ = conv_primitive->GetKernelW(); - conv_param->stride_h_ = conv_primitive->GetStrideH(); - conv_param->stride_w_ = conv_primitive->GetStrideW(); - - auto deconv_lite_primitive = (lite::DeConv2D *)primitive; - conv_param->pad_u_ = deconv_lite_primitive->PadUp(); - conv_param->pad_d_ = deconv_lite_primitive->PadDown(); - conv_param->pad_l_ = deconv_lite_primitive->PadLeft(); - conv_param->pad_r_ = deconv_lite_primitive->PadRight(); - conv_param->dilation_h_ = conv_primitive->GetDilateH(); - conv_param->dilation_w_ = conv_primitive->GetDilateW(); - conv_param->group_ = conv_primitive->GetGroup(); - auto act_type = conv_primitive->GetActivationType(); + auto primitive = static_cast<const schema::Primitive *>(prim); + conv_param->op_parameter_.type_ = primitive->value_type(); + auto conv_primitive = primitive->value_as_Conv2dTransposeFusion(); + conv_param->kernel_h_ = static_cast<int>(*(conv_primitive->kernel_size()->begin())); + conv_param->kernel_w_ = static_cast<int>(*(conv_primitive->kernel_size()->begin() + 1)); + conv_param->group_ = static_cast<int>(conv_primitive->group()); + conv_param->stride_h_ = static_cast<int>(*(conv_primitive->stride()->begin())); + conv_param->stride_w_ = static_cast<int>(*(conv_primitive->stride()->begin() + 1)); + switch (conv_primitive->pad_mode()) { + case schema::PadMode_SAME: + conv_param->pad_mode_ = Pad_same; + break; + case schema::PadMode_VALID: + conv_param->pad_mode_ = Pad_valid; + break; + default: + conv_param->pad_mode_ = Pad_pad; + } + if (conv_primitive->pad_list() == nullptr || conv_primitive->pad_list()->size() < 4) { + conv_param->pad_u_ = 0; + conv_param->pad_d_ = 0; + conv_param->pad_l_ = 0; + conv_param->pad_r_ = 0; + } else { + conv_param->pad_u_ = static_cast<int>(*(conv_primitive->pad_list()->begin())); + conv_param->pad_d_ = static_cast<int>(*(conv_primitive->pad_list()->begin() + 1)); + conv_param->pad_l_ = static_cast<int>(*(conv_primitive->pad_list()->begin() + 2)); + conv_param->pad_r_ = static_cast<int>(*(conv_primitive->pad_list()->begin() + 3)); + } + conv_param->dilation_h_ = static_cast<int>(*(conv_primitive->dilation()->begin())); + conv_param->dilation_w_ = static_cast<int>(*(conv_primitive->dilation()->begin() + 1)); + conv_param->input_channel_ = static_cast<int>(conv_primitive->in_channel()); + conv_param->output_channel_ = static_cast<int>(conv_primitive->out_channel()); + auto act_type = conv_primitive->activation_type(); switch (act_type) { case schema::ActivationType_RELU: conv_param->act_type_ = ActType_Relu; @@ -62,7 +75,6 @@ OpParameter *PopulateDeconvParameter(const mindspore::lite::PrimitiveC *primitiv return reinterpret_cast<OpParameter *>(conv_param); } -Registry DeConv2DParameterRegistry(schema::PrimitiveType_DeConv2D, PopulateDeconvParameter); - +Registry g_Deconv2DParameterRegistry(schema::PrimitiveType_Conv2dTransposeFusion, PopulateDeconvParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/dedepthwise_conv2d_populate.cc b/mindspore/lite/src/ops/populate/dedepthwise_conv2d_populate.cc index 732c26cac3..332f4582c9 100644 --- a/mindspore/lite/src/ops/populate/dedepthwise_conv2d_populate.cc +++ b/mindspore/lite/src/ops/populate/dedepthwise_conv2d_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,15 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/dedepthwise_conv2d.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/conv_parameter.h" namespace mindspore { namespace lite { - +/* OpParameter *PopulateDeconvDwParameter(const mindspore::lite::PrimitiveC *primitive) { ConvParameter *conv_param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); if (conv_param == nullptr) { @@ -59,7 +56,6 @@ OpParameter *PopulateDeconvDwParameter(const mindspore::lite::PrimitiveC *primit return reinterpret_cast<OpParameter *>(conv_param); } -Registry DeDepthwiseConv2DParameterRegistry(schema::PrimitiveType_DeDepthwiseConv2D, PopulateDeconvDwParameter); - +*/ } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/default_populate.cc b/mindspore/lite/src/ops/populate/default_populate.cc new file mode 100644 index 0000000000..faac1397ca --- /dev/null +++ b/mindspore/lite/src/ops/populate/default_populate.cc @@ -0,0 +1,36 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/default_populate.h" +#include <cstdlib> +#include <cstring> +#include "schema/model_generated.h" +#include "src/common/log_adapter.h" + +namespace mindspore { +namespace lite { +OpParameter *DefaultPopulateParameter(const void *prim) { + OpParameter *param = static_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "Malloc OpParameter failed."; + return nullptr; + } + memset(param, 0, sizeof(OpParameter)); + auto *primitive = static_cast<const schema::Primitive *>(prim); + param->type_ = primitive->value_type(); + return param; +} +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/default_populate.h b/mindspore/lite/src/ops/populate/default_populate.h new file mode 100644 index 0000000000..fe215824af --- /dev/null +++ b/mindspore/lite/src/ops/populate/default_populate.h @@ -0,0 +1,26 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_OPS_POPULATE_DEFAULT_POPULATE_H_ +#define MINDSPORE_LITE_SRC_OPS_POPULATE_DEFAULT_POPULATE_H_ + +#include "nnacl/op_base.h" + +namespace mindspore { +namespace lite { +OpParameter *DefaultPopulateParameter(const void *prim); +} // namespace lite +} // namespace mindspore +#endif // MINDSPORE_LITE_SRC_OPS_POPULATE_DEFAULT_POPULATE_H_ diff --git a/mindspore/lite/src/ops/populate/depth_to_space_populate.cc b/mindspore/lite/src/ops/populate/depth_to_space_populate.cc index 4b4227a886..fe6b92a785 100644 --- a/mindspore/lite/src/ops/populate/depth_to_space_populate.cc +++ b/mindspore/lite/src/ops/populate/depth_to_space_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,17 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/depth_to_space.h" -#include "src/common/common.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/depth_to_space_parameter.h" namespace mindspore { namespace lite { - -OpParameter *PopulateDepthToSpaceParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateDepthToSpaceParameter(const void *prim) { DepthToSpaceParameter *depth_space_param = reinterpret_cast<DepthToSpaceParameter *>(malloc(sizeof(DepthToSpaceParameter))); if (depth_space_param == nullptr) { @@ -31,14 +27,14 @@ OpParameter *PopulateDepthToSpaceParameter(const mindspore::lite::PrimitiveC *pr return nullptr; } memset(depth_space_param, 0, sizeof(DepthToSpaceParameter)); - auto param = reinterpret_cast<mindspore::lite::DepthToSpace *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - depth_space_param->op_parameter_.type_ = primitive->Type(); - depth_space_param->block_size_ = param->GetBlockSize(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto param = primitive->value_as_DepthToSpace(); + depth_space_param->op_parameter_.type_ = primitive->value_type(); + depth_space_param->block_size_ = param->block_size(); return reinterpret_cast<OpParameter *>(depth_space_param); } +} // namespace -Registry DepthToSpaceParameterRegistry(schema::PrimitiveType_DepthToSpace, PopulateDepthToSpaceParameter); - +Registry g_depthToSpaceParamRegistry(schema::PrimitiveType_DepthToSpace, PopulateDepthToSpaceParameter, SCHEMA_CUR); } // namespace lite - } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/depthwise_conv2d_populate.cc b/mindspore/lite/src/ops/populate/depthwise_conv2d_populate.cc index b59536e950..678ff5a685 100644 --- a/mindspore/lite/src/ops/populate/depthwise_conv2d_populate.cc +++ b/mindspore/lite/src/ops/populate/depthwise_conv2d_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,67 +13,61 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/depthwise_conv2d.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/conv_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateConvDwParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateConvDwParameter(const void *primitive) { ConvParameter *conv_param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); if (conv_param == nullptr) { MS_LOG(ERROR) << "malloc ConvParameter failed."; return nullptr; } memset(conv_param, 0, sizeof(ConvParameter)); - conv_param->op_parameter_.type_ = primitive->Type(); + // conv_param->op_parameter_.type_ = primitive->Type(); - auto conv_primitive = - reinterpret_cast<mindspore::lite::DepthwiseConv2D *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - conv_param->kernel_h_ = conv_primitive->GetKernelH(); - conv_param->kernel_w_ = conv_primitive->GetKernelW(); - conv_param->stride_h_ = conv_primitive->GetStrideH(); - conv_param->stride_w_ = conv_primitive->GetStrideW(); + // auto conv_primitive = + // reinterpret_cast<mindspore::lite::DepthwiseConv2D *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); + // conv_param->kernel_h_ = conv_primitive->GetKernelH(); + // conv_param->kernel_w_ = conv_primitive->GetKernelW(); + // conv_param->stride_h_ = conv_primitive->GetStrideH(); + // conv_param->stride_w_ = conv_primitive->GetStrideW(); - auto convdw_lite_primitive = (lite::DepthwiseConv2D *)primitive; - conv_param->pad_u_ = convdw_lite_primitive->PadUp(); - conv_param->pad_d_ = convdw_lite_primitive->PadDown(); - conv_param->pad_l_ = convdw_lite_primitive->PadLeft(); - conv_param->pad_r_ = convdw_lite_primitive->PadRight(); - conv_param->input_channel_ = convdw_lite_primitive->GetInputChannel(); - conv_param->dilation_h_ = conv_primitive->GetDilateH(); - conv_param->dilation_w_ = conv_primitive->GetDilateW(); - auto pad_mode = conv_primitive->GetPadMode(); - switch (pad_mode) { - case schema::PadMode_SAME_UPPER: - conv_param->pad_mode_ = Pad_Same; - break; - case schema::PadMode_VALID: - conv_param->pad_mode_ = Pad_Valid; - break; - default: - conv_param->pad_mode_ = Pad_No; - break; - } - auto act_type = conv_primitive->GetActivationType(); - switch (act_type) { - case schema::ActivationType_RELU: - conv_param->act_type_ = ActType_Relu; - break; - case schema::ActivationType_RELU6: - conv_param->act_type_ = ActType_Relu6; - break; - default: - conv_param->act_type_ = ActType_No; - break; - } + // auto convdw_lite_primitive = (lite::DepthwiseConv2D *)primitive; + // conv_param->pad_u_ = convdw_lite_primitive->PadUp(); + // conv_param->pad_d_ = convdw_lite_primitive->PadDown(); + // conv_param->pad_l_ = convdw_lite_primitive->PadLeft(); + // conv_param->pad_r_ = convdw_lite_primitive->PadRight(); + // conv_param->input_channel_ = convdw_lite_primitive->GetInputChannel(); + // conv_param->dilation_h_ = conv_primitive->GetDilateH(); + // conv_param->dilation_w_ = conv_primitive->GetDilateW(); + // auto pad_mode = conv_primitive->GetPadMode(); + // switch (pad_mode) { + // case schema::PadMode_SAME_UPPER: + // conv_param->pad_mode_ = Pad_Same; + // break; + // case schema::PadMode_VALID: + // conv_param->pad_mode_ = Pad_Valid; + // break; + // default: + // conv_param->pad_mode_ = Pad_No; + // break; + // } + // auto act_type = conv_primitive->GetActivationType(); + // switch (act_type) { + // case schema::ActivationType_RELU: + // conv_param->act_type_ = ActType_Relu; + // break; + // case schema::ActivationType_RELU6: + // conv_param->act_type_ = ActType_Relu6; + // break; + // default: + // conv_param->act_type_ = ActType_No; + // break; + // } return reinterpret_cast<OpParameter *>(conv_param); } - -Registry DepthwiseConv2DParameterRegistry(schema::PrimitiveType_DepthwiseConv2D, PopulateConvDwParameter); - } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/detection_post_process_populate.cc b/mindspore/lite/src/ops/populate/detection_post_process_populate.cc index 51895495cd..4e66d25374 100644 --- a/mindspore/lite/src/ops/populate/detection_post_process_populate.cc +++ b/mindspore/lite/src/ops/populate/detection_post_process_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,16 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/detection_post_process.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/detection_post_process_parameter.h" namespace mindspore { namespace lite { - -OpParameter *PopulateDetectionPostProcessParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateDetectionPostProcessParameter(const void *prim) { DetectionPostProcessParameter *detection_post_process_parameter = reinterpret_cast<DetectionPostProcessParameter *>(malloc(sizeof(DetectionPostProcessParameter))); if (detection_post_process_parameter == nullptr) { @@ -30,24 +27,25 @@ OpParameter *PopulateDetectionPostProcessParameter(const mindspore::lite::Primit return nullptr; } memset(detection_post_process_parameter, 0, sizeof(DetectionPostProcessParameter)); - detection_post_process_parameter->op_parameter_.type_ = primitive->Type(); - auto param = - reinterpret_cast<mindspore::lite::DetectionPostProcess *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - detection_post_process_parameter->h_scale_ = param->GetHScale(); - detection_post_process_parameter->w_scale_ = param->GetWScale(); - detection_post_process_parameter->x_scale_ = param->GetXScale(); - detection_post_process_parameter->y_scale_ = param->GetYScale(); - detection_post_process_parameter->nms_iou_threshold_ = param->GetNmsIouThreshold(); - detection_post_process_parameter->nms_score_threshold_ = param->GetNmsScoreThreshold(); - detection_post_process_parameter->max_detections_ = param->GetMaxDetections(); - detection_post_process_parameter->detections_per_class_ = param->GetDetectionsPerClass(); - detection_post_process_parameter->max_classes_per_detection_ = param->GetMaxClassesPerDetection(); - detection_post_process_parameter->num_classes_ = param->GetNumClasses(); - detection_post_process_parameter->use_regular_nms_ = param->GetUseRegularNms(); + auto primitive = static_cast<const schema::Primitive *>(prim); + detection_post_process_parameter->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_DetectionPostProcess(); + detection_post_process_parameter->h_scale_ = *(param->scale()->begin()); + detection_post_process_parameter->w_scale_ = *(param->scale()->begin() + 1); + detection_post_process_parameter->x_scale_ = *(param->scale()->begin() + 2); + detection_post_process_parameter->y_scale_ = *(param->scale()->begin() + 3); + detection_post_process_parameter->nms_iou_threshold_ = param->nms_iou_threshold(); + detection_post_process_parameter->nms_score_threshold_ = param->nms_score_threshold(); + detection_post_process_parameter->max_detections_ = param->max_detections(); + detection_post_process_parameter->detections_per_class_ = param->detections_per_class(); + detection_post_process_parameter->max_classes_per_detection_ = param->max_classes_per_detection(); + detection_post_process_parameter->num_classes_ = param->num_classes(); + detection_post_process_parameter->use_regular_nms_ = param->use_regular_nms(); return reinterpret_cast<OpParameter *>(detection_post_process_parameter); } -Registry DetectionPostProcessParameterRegistry(schema::PrimitiveType_DetectionPostProcess, - PopulateDetectionPostProcessParameter); +} // namespace +Registry g_detectionPostProcessParameterRegistry(schema::PrimitiveType_DetectionPostProcess, + PopulateDetectionPostProcessParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/div_populate.cc b/mindspore/lite/src/ops/populate/div_populate.cc index 78af04ef10..647ef86947 100644 --- a/mindspore/lite/src/ops/populate/div_populate.cc +++ b/mindspore/lite/src/ops/populate/div_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,25 +13,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/div.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "src/ops/populate/arithmetic_populate.h" namespace mindspore { namespace lite { -OpParameter *PopulateDivParameter(const mindspore::lite::PrimitiveC *primitive) { - ArithmeticParameter *param = PopulateArithmeticCommonPara(primitive); +OpParameter *PopulateDivParameter(const void *prim) { + ArithmeticParameter *param = PopulateArithmeticCommonPara(prim); if (param == nullptr) { MS_LOG(ERROR) << "PopulateArithmeticCommonPara failed."; return nullptr; } - param->activation_type_ = reinterpret_cast<const mindspore::lite::Div *>(primitive)->GetActivationType(); return reinterpret_cast<OpParameter *>(param); } -Registry DivParameterRegistry(schema::PrimitiveType_Div, PopulateDivParameter); +Registry g_divParameterRegistry(schema::PrimitiveType_DivFusion, PopulateDivParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/eltwise_populate.cc b/mindspore/lite/src/ops/populate/eltwise_populate.cc index b3efabb77f..b58ed8f865 100644 --- a/mindspore/lite/src/ops/populate/eltwise_populate.cc +++ b/mindspore/lite/src/ops/populate/eltwise_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,40 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/div.h" -#include "src/ops/eltwise.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "src/ops/populate/arithmetic_populate.h" + namespace mindspore { namespace lite { - -OpParameter *PopulateEltwiseParameter(const mindspore::lite::PrimitiveC *primitive) { - ArithmeticParameter *param = PopulateArithmeticCommonPara(primitive); +namespace { +OpParameter *PopulateEltwiseParameter(const void *prim) { + ArithmeticParameter *param = PopulateArithmeticCommonPara(prim); if (param == nullptr) { MS_LOG(ERROR) << "PopulateArithmeticCommonPara failed."; return nullptr; } - auto eltwise = reinterpret_cast<const mindspore::lite::Eltwise *>(primitive); - switch (eltwise->GetMode()) { - case schema::EltwiseMode_PROD: - param->op_parameter_.type_ = schema::PrimitiveType_Mul; - break; - case schema::EltwiseMode_SUM: - param->op_parameter_.type_ = schema::PrimitiveType_Add; - break; - case schema::EltwiseMode_MAXIMUM: - param->op_parameter_.type_ = schema::PrimitiveType_Maximum; - break; - default: - free(param); - return nullptr; - } + auto primitive = static_cast<const schema::Primitive *>(prim); + param->eltwise_mode_ = primitive->value_as_Eltwise()->mode(); return reinterpret_cast<OpParameter *>(param); } +} // namespace -Registry EltwiseParameterRegistry(schema::PrimitiveType_Eltwise, PopulateEltwiseParameter); - +Registry g_eltwiseParameterRegistry(schema::PrimitiveType_Eltwise, PopulateEltwiseParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/elu_populate.cc b/mindspore/lite/src/ops/populate/elu_populate.cc index 95821b6481..b5854f3b6e 100644 --- a/mindspore/lite/src/ops/populate/elu_populate.cc +++ b/mindspore/lite/src/ops/populate/elu_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,27 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/elu.h" #include "nnacl/fp32/elu_fp32.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { - -OpParameter *PopulateEluParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateEluParameter(const void *prim) { EluParameter *elu_parameter = reinterpret_cast<EluParameter *>(malloc(sizeof(EluParameter))); if (elu_parameter == nullptr) { MS_LOG(ERROR) << "malloc EluParameter failed."; return nullptr; } memset(elu_parameter, 0, sizeof(EluParameter)); - elu_parameter->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::Elu *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - elu_parameter->alpha_ = param->GetAlpha(); + auto primitive = static_cast<const schema::Primitive *>(prim); + elu_parameter->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_Elu(); + elu_parameter->alpha_ = param->alpha(); return reinterpret_cast<OpParameter *>(elu_parameter); } -Registry EluParameterRegistry(schema::PrimitiveType_Elu, PopulateEluParameter); +} // namespace +Registry g_eluParameterRegistry(schema::PrimitiveType_Elu, PopulateEluParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/embedding_lookup_populate.cc b/mindspore/lite/src/ops/populate/embedding_lookup_populate.cc index 907aa261f9..3c0881d1bc 100644 --- a/mindspore/lite/src/ops/populate/embedding_lookup_populate.cc +++ b/mindspore/lite/src/ops/populate/embedding_lookup_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,37 +13,35 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/embedding_lookup.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/embedding_lookup_fp32.h" namespace mindspore { namespace lite { -OpParameter *PopulateEmbeddingLookupParameter(const mindspore::lite::PrimitiveC *primitive) { - EmbeddingLookupParameter *embedding_lookup_parameter = +OpParameter *PopulateEmbeddingLookupParameter(const void *prim) { + EmbeddingLookupParameter *param = reinterpret_cast<EmbeddingLookupParameter *>(malloc(sizeof(EmbeddingLookupParameter))); - if (embedding_lookup_parameter == nullptr) { + if (param == nullptr) { MS_LOG(ERROR) << "malloc EmbeddingLookupParameter failed."; return nullptr; } - memset(embedding_lookup_parameter, 0, sizeof(EmbeddingLookupParameter)); - embedding_lookup_parameter->op_parameter_.type_ = primitive->Type(); - auto param = - reinterpret_cast<mindspore::lite::EmbeddingLookup *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - embedding_lookup_parameter->max_norm_ = param->GetMaxNorm(); - if (embedding_lookup_parameter->max_norm_ < 0) { - MS_LOG(ERROR) << "Embedding lookup max norm should be positive number, got " - << embedding_lookup_parameter->max_norm_; - free(embedding_lookup_parameter); + memset(param, 0, sizeof(EmbeddingLookupParameter)); + + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_EmbeddingLookupFusion(); + param->op_parameter_.type_ = primitive->value_type(); + param->max_norm_ = value->max_norm(); + if (param->max_norm_ < 0) { + MS_LOG(ERROR) << "Embedding lookup max norm should be positive number, got " << param->max_norm_; + free(param); return nullptr; } - return reinterpret_cast<OpParameter *>(embedding_lookup_parameter); + return reinterpret_cast<OpParameter *>(param); } -Registry EmbeddingLookupParameterRegistry(schema::PrimitiveType_EmbeddingLookup, PopulateEmbeddingLookupParameter); +Registry EmbeddingLookupParameterRegistry(schema::PrimitiveType_EmbeddingLookupFusion, PopulateEmbeddingLookupParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/erf_populate.cc b/mindspore/lite/src/ops/populate/erf_populate.cc new file mode 100644 index 0000000000..ea7657b118 --- /dev/null +++ b/mindspore/lite/src/ops/populate/erf_populate.cc @@ -0,0 +1,23 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/populate_register.h" +#include "src/ops/populate/default_populate.h" + +namespace mindspore { +namespace lite { +Registry g_erfParameterRegistry(schema::PrimitiveType_Erf, DefaultPopulateParameter, SCHEMA_CUR); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/exp_populate.cc b/mindspore/lite/src/ops/populate/exp_populate.cc index 4535413fe7..122dc3c84a 100644 --- a/mindspore/lite/src/ops/populate/exp_populate.cc +++ b/mindspore/lite/src/ops/populate/exp_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,26 +13,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/exp.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/exp_fp32.h" namespace mindspore { namespace lite { -OpParameter *PopulateExpParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateExpParameter(const void *prim) { ExpParameter *exp_parameter = reinterpret_cast<ExpParameter *>(malloc(sizeof(ExpParameter))); if (exp_parameter == nullptr) { MS_LOG(ERROR) << "malloc ExpParameter failed."; return nullptr; } memset(exp_parameter, 0, sizeof(ExpParameter)); - exp_parameter->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::Exp *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - exp_parameter->base_ = param->GetBase(); - exp_parameter->scale_ = param->GetScale(); - exp_parameter->shift_ = param->GetShift(); + + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_ExpFusion(); + exp_parameter->op_parameter_.type_ = primitive->value_type(); + exp_parameter->base_ = value->base(); + exp_parameter->scale_ = value->scale(); + exp_parameter->shift_ = value->shift(); if (exp_parameter->base_ != -1 && exp_parameter->base_ <= 0) { MS_LOG(ERROR) << "Exp base must be strictly positive, got " << exp_parameter->base_; free(exp_parameter); @@ -41,6 +40,6 @@ OpParameter *PopulateExpParameter(const mindspore::lite::PrimitiveC *primitive) return reinterpret_cast<OpParameter *>(exp_parameter); } -Registry ExpParameterRegistry(schema::PrimitiveType_Exp, PopulateExpParameter); +Registry ExpParameterRegistry(schema::PrimitiveType_ExpFusion, PopulateExpParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/expand_dims_populate.cc b/mindspore/lite/src/ops/populate/expand_dims_populate.cc index bb62cc477c..47351d005f 100644 --- a/mindspore/lite/src/ops/populate/expand_dims_populate.cc +++ b/mindspore/lite/src/ops/populate/expand_dims_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,25 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { - -OpParameter *PopulateExpandDimsParameter(const mindspore::lite::PrimitiveC *primitive) { - OpParameter *expand_dims_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); - if (expand_dims_param == nullptr) { +namespace { +OpParameter *PopulateExpandDimsParameter(const void *prim) { + OpParameter *expand_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (expand_param == nullptr) { MS_LOG(ERROR) << "malloc ExpandDimsParameter failed."; return nullptr; } - expand_dims_param->type_ = primitive->Type(); - memset(expand_dims_param, 0, sizeof(OpParameter)); - return reinterpret_cast<OpParameter *>(expand_dims_param); + memset(expand_param, 0, sizeof(OpParameter)); + auto primitive = static_cast<const schema::Primitive *>(prim); + expand_param->type_ = primitive->value_type(); + return reinterpret_cast<OpParameter *>(expand_param); } +} // namespace -Registry ExpandDimsParameterRegistry(schema::PrimitiveType_ExpandDims, PopulateExpandDimsParameter); - +Registry g_expandDimsParameterRegistry(schema::PrimitiveType_ExpandDims, PopulateExpandDimsParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/fill_populate.cc b/mindspore/lite/src/ops/populate/fill_populate.cc index c68d457fce..b48f4b24cf 100644 --- a/mindspore/lite/src/ops/populate/fill_populate.cc +++ b/mindspore/lite/src/ops/populate/fill_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,34 +13,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/fill.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" -#include "nnacl/fill_parameter.h" namespace mindspore { namespace lite { - -OpParameter *PopulateFillParameter(const mindspore::lite::PrimitiveC *primitive) { - const auto param = reinterpret_cast<mindspore::lite::Fill *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - FillParameter *fill_param = reinterpret_cast<FillParameter *>(malloc(sizeof(FillParameter))); +namespace { +OpParameter *PopulateFillParameter(const void *prim) { + OpParameter *fill_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (fill_param == nullptr) { MS_LOG(ERROR) << "malloc FillParameter failed."; return nullptr; } - memset(fill_param, 0, sizeof(FillParameter)); - fill_param->op_parameter_.type_ = primitive->Type(); - auto flatDims = param->GetDims(); - fill_param->num_dims_ = flatDims.size(); - int i = 0; - for (auto iter = flatDims.begin(); iter != flatDims.end(); iter++) { - fill_param->dims_[i++] = *iter; - } + memset(fill_param, 0, sizeof(OpParameter)); + auto primitive = static_cast<const schema::Primitive *>(prim); + fill_param->type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(fill_param); } +} // namespace -Registry FillParameterRegistry(schema::PrimitiveType_Fill, PopulateFillParameter); +Registry g_fillParameterRegistry(schema::PrimitiveType_Fill, PopulateFillParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/flatten_populate.cc b/mindspore/lite/src/ops/populate/flatten_populate.cc index 6905ad3176..09cbab37cd 100644 --- a/mindspore/lite/src/ops/populate/flatten_populate.cc +++ b/mindspore/lite/src/ops/populate/flatten_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,24 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateFlattenParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateFlattenParameter(const void *prim) { OpParameter *flatten_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (flatten_param == nullptr) { MS_LOG(ERROR) << "malloc FlattenParameter failed."; return nullptr; } memset(flatten_param, 0, sizeof(OpParameter)); - flatten_param->type_ = primitive->Type(); + + auto primitive = static_cast<const schema::Primitive *>(prim); + flatten_param->type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(flatten_param); } -Registry FlattenParameterRegistry(schema::PrimitiveType_Flatten, PopulateFlattenParameter); +Registry FlattenParameterRegistry(schema::PrimitiveType_Flatten, PopulateFlattenParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/full_connection_populate.cc b/mindspore/lite/src/ops/populate/full_connection_populate.cc index fafc985b64..e60c48a222 100644 --- a/mindspore/lite/src/ops/populate/full_connection_populate.cc +++ b/mindspore/lite/src/ops/populate/full_connection_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,40 +13,38 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/full_connection.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/matmul_parameter.h" namespace mindspore { namespace lite { - -OpParameter *PopulateFullconnectionParameter(const mindspore::lite::PrimitiveC *primitive) { - auto param = - reinterpret_cast<mindspore::lite::FullConnection *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); +namespace { +OpParameter *PopulateFullconnectionParameter(const void *prim) { MatMulParameter *matmul_param = reinterpret_cast<MatMulParameter *>(malloc(sizeof(MatMulParameter))); if (matmul_param == nullptr) { MS_LOG(ERROR) << "malloc MatMulParameter failed."; return nullptr; } memset(matmul_param, 0, sizeof(MatMulParameter)); - matmul_param->op_parameter_.type_ = primitive->Type(); + auto *primitive = static_cast<const schema::Primitive *>(prim); + matmul_param->op_parameter_.type_ = primitive->value_type(); + auto full_conn_prim = primitive->value_as_FullConnection(); matmul_param->b_transpose_ = true; matmul_param->a_transpose_ = false; - matmul_param->has_bias_ = param->GetHasBias(); - if (param->GetActivationType() == schema::ActivationType_RELU) { + matmul_param->has_bias_ = full_conn_prim->has_bias(); + if (full_conn_prim->activation_type() == schema::ActivationType_RELU) { matmul_param->act_type_ = ActType_Relu; - } else if (param->GetActivationType() == schema::ActivationType_RELU6) { + } else if (full_conn_prim->activation_type() == schema::ActivationType_RELU6) { matmul_param->act_type_ = ActType_Relu6; } else { matmul_param->act_type_ = ActType_No; } - + matmul_param->axis_ = full_conn_prim->axis(); + matmul_param->use_axis_ = full_conn_prim->use_axis(); return reinterpret_cast<OpParameter *>(matmul_param); } +} // namespace -Registry FullConnectionParameterRegistry(schema::PrimitiveType_FullConnection, PopulateFullconnectionParameter); - +Registry g_fullConnRegistry(schema::PrimitiveType_FullConnection, PopulateFullconnectionParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/fused_batchnorm_populate.cc b/mindspore/lite/src/ops/populate/fused_batchnorm_populate.cc index c7825066f0..1457eeea6a 100644 --- a/mindspore/lite/src/ops/populate/fused_batchnorm_populate.cc +++ b/mindspore/lite/src/ops/populate/fused_batchnorm_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,32 +13,29 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/fused_batchnorm.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/batchnorm_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateFusedBatchNorm(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateFusedBatchNorm(const void *prim) { BatchNormParameter *batch_norm_param = reinterpret_cast<BatchNormParameter *>(malloc(sizeof(BatchNormParameter))); if (batch_norm_param == nullptr) { MS_LOG(ERROR) << "malloc BatchNormParameter failed."; return nullptr; } memset(batch_norm_param, 0, sizeof(BatchNormParameter)); - batch_norm_param->op_parameter_.type_ = primitive->Type(); - auto param = - reinterpret_cast<mindspore::lite::FusedBatchNorm *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - batch_norm_param->epsilon_ = param->GetEpsilon(); - batch_norm_param->momentum_ = param->GetMomentum(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_FusedBatchNorm(); + batch_norm_param->op_parameter_.type_ = primitive->value_type(); + batch_norm_param->epsilon_ = value->epsilon(); + batch_norm_param->momentum_ = value->momentum(); batch_norm_param->fused_ = true; return reinterpret_cast<OpParameter *>(batch_norm_param); } -Registry FusedBatchNormParameterRegistry(schema::PrimitiveType_FusedBatchNorm, PopulateFusedBatchNorm); +Registry FusedBatchNormParameterRegistry(schema::PrimitiveType_FusedBatchNorm, PopulateFusedBatchNorm, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/gather_nd_populate.cc b/mindspore/lite/src/ops/populate/gather_nd_populate.cc index efadd2a69c..953cfe1b37 100644 --- a/mindspore/lite/src/ops/populate/gather_nd_populate.cc +++ b/mindspore/lite/src/ops/populate/gather_nd_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,27 +13,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/gather_nd.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/gatherNd_fp32.h" namespace mindspore { namespace lite { - -OpParameter *PopulateGatherNdParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateGatherNdParameter(const void *prim) { GatherNdParameter *gather_nd_param = reinterpret_cast<GatherNdParameter *>(malloc(sizeof(GatherNdParameter))); if (gather_nd_param == nullptr) { MS_LOG(ERROR) << "malloc GatherNdParameter failed."; return nullptr; } memset(gather_nd_param, 0, sizeof(GatherNdParameter)); - gather_nd_param->op_parameter_.type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + gather_nd_param->op_parameter_.type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(gather_nd_param); } +} // namespace -Registry GatherNdParameterRegistry(schema::PrimitiveType_GatherNd, PopulateGatherNdParameter); - +Registry g_gatherNdParameterRegistry(schema::PrimitiveType_GatherNd, PopulateGatherNdParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/gather_populate.cc b/mindspore/lite/src/ops/populate/gather_populate.cc index bec392473a..0da4594501 100644 --- a/mindspore/lite/src/ops/populate/gather_populate.cc +++ b/mindspore/lite/src/ops/populate/gather_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,36 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/gather.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/gather_parameter.h" namespace mindspore { namespace lite { - -OpParameter *PopulateGatherParameter(const mindspore::lite::PrimitiveC *primitive) { - auto gather_attr = reinterpret_cast<mindspore::lite::Gather *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); +namespace { +OpParameter *PopulateGatherParameter(const void *prim) { GatherParameter *gather_param = reinterpret_cast<GatherParameter *>(malloc(sizeof(GatherParameter))); if (gather_param == nullptr) { MS_LOG(ERROR) << "malloc GatherParameter failed."; return nullptr; } memset(gather_param, 0, sizeof(GatherParameter)); - gather_param->op_parameter_.type_ = primitive->Type(); - if (gather_attr->GetAxis() < 0) { - MS_LOG(ERROR) << "axis should be >= 0."; - free(gather_param); - return nullptr; - } - gather_param->axis_ = gather_attr->GetAxis(); - gather_param->batchDims_ = gather_attr->GetBatchDims(); + auto primitive = static_cast<const schema::Primitive *>(prim); + gather_param->op_parameter_.type_ = primitive->value_type(); + return reinterpret_cast<OpParameter *>(gather_param); } -Registry GatherParameterRegistry(schema::PrimitiveType_Gather, PopulateGatherParameter); +} // namespace +Registry g_gatherParameterRegistry(schema::PrimitiveType_Gather, PopulateGatherParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/gru_populate.cc b/mindspore/lite/src/ops/populate/gru_populate.cc index 1e57855d30..4e492aca98 100644 --- a/mindspore/lite/src/ops/populate/gru_populate.cc +++ b/mindspore/lite/src/ops/populate/gru_populate.cc @@ -13,30 +13,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "src/ops/gru.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/gru_fp32.h" namespace mindspore { namespace lite { -OpParameter *PopulateGruParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateGruParameter(const void *prim) { GruParameter *gru_param = reinterpret_cast<GruParameter *>(malloc(sizeof(GruParameter))); if (gru_param == nullptr) { MS_LOG(ERROR) << "malloc GruParameter failed."; return nullptr; } memset(gru_param, 0, sizeof(GruParameter)); - gru_param->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::Gru *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); + auto *primitive = static_cast<const schema::Primitive *>(prim); + gru_param->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_GRU(); if (param == nullptr) { free(gru_param); MS_LOG(ERROR) << "get Gru param nullptr."; return nullptr; } - gru_param->bidirectional_ = param->GetBidirection(); + gru_param->bidirectional_ = param->bidirectional(); return reinterpret_cast<OpParameter *>(gru_param); } -Registry GruParameterRegistry(schema::PrimitiveType_Gru, PopulateGruParameter); +} // namespace + +Registry g_gruParameterRegistry(schema::PrimitiveType_GRU, PopulateGruParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/hashtable_lookup_populate.cc b/mindspore/lite/src/ops/populate/hashtable_lookup_populate.cc index 3b97fa9f5f..d4ff5ac11e 100644 --- a/mindspore/lite/src/ops/populate/hashtable_lookup_populate.cc +++ b/mindspore/lite/src/ops/populate/hashtable_lookup_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,25 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "src/ops/hashtable_lookup.h" -#include "src/common/string_util.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateHashtableLookupParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateHashtableLookupParameter(const void *prim) { OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (param == nullptr) { MS_LOG(ERROR) << "new OpParameter failed."; return nullptr; } memset(param, 0, sizeof(OpParameter)); - param->type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + param->type_ = primitive->value_type(); return param; } -Registry HashtableLookupParameterRegistry(schema::PrimitiveType_HashtableLookup, PopulateHashtableLookupParameter); +Registry HashtableLookupParameterRegistry(schema::PrimitiveType_HashtableLookup, PopulateHashtableLookupParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/if_populate.cc b/mindspore/lite/src/ops/populate/if_populate.cc new file mode 100644 index 0000000000..57b4aa8018 --- /dev/null +++ b/mindspore/lite/src/ops/populate/if_populate.cc @@ -0,0 +1,23 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/populate_register.h" +#include "src/ops/populate/default_populate.h" + +namespace mindspore { +namespace lite { +Registry g_ifParameterRegistry(schema::PrimitiveType_If, DefaultPopulateParameter, SCHEMA_CUR); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/instance_norm_populate.cc b/mindspore/lite/src/ops/populate/instance_norm_populate.cc index 13d33fd8f8..ca4bf66b59 100644 --- a/mindspore/lite/src/ops/populate/instance_norm_populate.cc +++ b/mindspore/lite/src/ops/populate/instance_norm_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,17 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" -#include "src/ops/instance_norm.h" #include "nnacl/instance_norm_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateInstanceNormParameter(const mindspore::lite::PrimitiveC *primitive) { - const auto param = - reinterpret_cast<mindspore::lite::InstanceNorm *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); +OpParameter *PopulateInstanceNormParameter(const void *prim) { InstanceNormParameter *instance_norm_param = reinterpret_cast<InstanceNormParameter *>(malloc(sizeof(InstanceNormParameter))); if (instance_norm_param == nullptr) { @@ -31,11 +26,14 @@ OpParameter *PopulateInstanceNormParameter(const mindspore::lite::PrimitiveC *pr return nullptr; } memset(instance_norm_param, 0, sizeof(InstanceNormParameter)); - instance_norm_param->op_parameter_.type_ = primitive->Type(); - instance_norm_param->epsilon_ = param->GetEpsilon(); + + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_InstanceNorm(); + instance_norm_param->op_parameter_.type_ = primitive->value_type(); + instance_norm_param->epsilon_ = value->epsilon(); return reinterpret_cast<OpParameter *>(instance_norm_param); } -Registry InstanceNormParameterRegistry(schema::PrimitiveType_InstanceNorm, PopulateInstanceNormParameter); +Registry InstanceNormParameterRegistry(schema::PrimitiveType_InstanceNorm, PopulateInstanceNormParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/invert_permutation_populate.cc b/mindspore/lite/src/ops/populate/invert_permutation_populate.cc new file mode 100644 index 0000000000..4268621d4d --- /dev/null +++ b/mindspore/lite/src/ops/populate/invert_permutation_populate.cc @@ -0,0 +1,24 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/populate_register.h" +#include "src/ops/populate/default_populate.h" + +namespace mindspore { +namespace lite { +Registry g_invertPermutationParameterRegistry(schema::PrimitiveType_InvertPermutation, DefaultPopulateParameter, + SCHEMA_CUR); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/isfinite_populate.cc b/mindspore/lite/src/ops/populate/isfinite_populate.cc new file mode 100644 index 0000000000..9148ecb8b2 --- /dev/null +++ b/mindspore/lite/src/ops/populate/isfinite_populate.cc @@ -0,0 +1,23 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/populate_register.h" +#include "src/ops/populate/default_populate.h" + +namespace mindspore { +namespace lite { +Registry g_isFiniteParameterRegistry(schema::PrimitiveType_IsFinite, DefaultPopulateParameter, SCHEMA_CUR); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/l2_norm_populate.cc b/mindspore/lite/src/ops/populate/l2_norm_populate.cc index cfcd249873..1bfe2ef74e 100644 --- a/mindspore/lite/src/ops/populate/l2_norm_populate.cc +++ b/mindspore/lite/src/ops/populate/l2_norm_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,52 +13,47 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/l2_norm.h" #include <cstdint> -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/l2_norm_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateL2NormParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateL2NormParameter(const void *prim) { L2NormParameter *l2_norm_parameter = reinterpret_cast<L2NormParameter *>(malloc(sizeof(L2NormParameter))); if (l2_norm_parameter == nullptr) { MS_LOG(ERROR) << "malloc L2NormParameter failed."; return nullptr; } memset(l2_norm_parameter, 0, sizeof(L2NormParameter)); - l2_norm_parameter->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::L2Norm *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - MS_ASSERT(param); - auto axis_vec = param->GetAxis(); - l2_norm_parameter->axis_num_ = axis_vec.size(); - if (axis_vec.size() > SIZE_MAX / sizeof(int)) { - MS_LOG(ERROR) << "axis_vec size too big"; - free(l2_norm_parameter); - return nullptr; - } - MS_ASSERT(axis_vec.size() < 8); - for (size_t i = 0; i < axis_vec.size(); i++) { - l2_norm_parameter->axis_[i] = axis_vec[i]; + + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_L2NormalizeFusion(); + l2_norm_parameter->op_parameter_.type_ = primitive->value_type(); + + auto axis_vec = value->axis(); + l2_norm_parameter->axis_num_ = axis_vec->size(); + + MS_ASSERT(axis_vec->size() < 8); + for (size_t i = 0; i < axis_vec->size(); i++) { + l2_norm_parameter->axis_[i] = static_cast<int>(axis_vec->Get(i)); } - if (param->GetEpsilon() < 1e-6) { + if (value->epsilon() < 1e-6) { l2_norm_parameter->epsilon_ = 1e-6; } else { - l2_norm_parameter->epsilon_ = param->GetEpsilon(); + l2_norm_parameter->epsilon_ = value->epsilon(); } - if (param->GetActivationType() == static_cast<int>(schema::ActivationType_RELU)) { + if (value->activation_type() == static_cast<int>(schema::ActivationType_RELU)) { l2_norm_parameter->act_type_ = ActType_Relu; - } else if (param->GetActivationType() == static_cast<int>(schema::ActivationType_RELU6)) { + } else if (value->activation_type() == static_cast<int>(schema::ActivationType_RELU6)) { l2_norm_parameter->act_type_ = ActType_Relu6; } else { l2_norm_parameter->act_type_ = ActType_No; } return reinterpret_cast<OpParameter *>(l2_norm_parameter); } -Registry L2NormParameterRegistry(schema::PrimitiveType_L2Norm, PopulateL2NormParameter); +Registry L2NormParameterRegistry(schema::PrimitiveType_L2NormalizeFusion, PopulateL2NormParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/layer_norm_populate.cc b/mindspore/lite/src/ops/populate/layer_norm_populate.cc index e87edc40d4..b99bb95a16 100644 --- a/mindspore/lite/src/ops/populate/layer_norm_populate.cc +++ b/mindspore/lite/src/ops/populate/layer_norm_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,35 +13,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - #include "nnacl/layer_norm_parameter.h" -#include "src/ops/layer_norm.h" -#include "src/ops/primitive_c.h" +#include <cstdint> #include "src/ops/populate/populate_register.h" - namespace mindspore { namespace lite { -OpParameter *PopulateLayerNormParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateLayerNormParameter(const void *prim) { auto layer_norm_parameter = reinterpret_cast<LayerNormParameter *>(malloc(sizeof(LayerNormParameter))); if (layer_norm_parameter == nullptr) { MS_LOG(ERROR) << "malloc LayerNormParameter failed."; return nullptr; } memset(layer_norm_parameter, 0, sizeof(LayerNormParameter)); - layer_norm_parameter->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::LayerNorm *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - auto normalized_shape = param->GetNormlizedShape(); - layer_norm_parameter->normalized_dims_ = normalized_shape.size(); - MS_ASSERT(normalized_shape.size() < 8); - for (size_t i = 0; i < normalized_shape.size(); i++) { - layer_norm_parameter->normalized_shape_[i] = normalized_shape[i]; - } - layer_norm_parameter->epsilon_ = param->GetEpsilon(); - layer_norm_parameter->begin_norm_axis_ = param->GetBeginNormAxis(); - layer_norm_parameter->begin_params_axis_ = param->GetBeginParamsAxis(); + auto *primitive = static_cast<const schema::Primitive *>(prim); + layer_norm_parameter->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_LayerNormFusion(); + layer_norm_parameter->epsilon_ = param->epsilon(); + layer_norm_parameter->elementwise_affine_ = param->elementwise_affine(); + layer_norm_parameter->begin_norm_axis_ = static_cast<int>(param->begin_norm_axis()); + layer_norm_parameter->begin_params_axis_ = static_cast<int>(param->begin_params_axis()); return reinterpret_cast<OpParameter *>(layer_norm_parameter); } -Registry LayerNormParameterRegistry(schema::PrimitiveType_LayerNorm, PopulateLayerNormParameter); +Registry g_layerNormParameterRegistry(schema::PrimitiveType_LayerNormFusion, PopulateLayerNormParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/lin_space_populate.cc b/mindspore/lite/src/ops/populate/lin_space_populate.cc new file mode 100644 index 0000000000..fdc4f1a1db --- /dev/null +++ b/mindspore/lite/src/ops/populate/lin_space_populate.cc @@ -0,0 +1,23 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/populate_register.h" +#include "src/ops/populate/default_populate.h" + +namespace mindspore { +namespace lite { +Registry g_linSpaceParameterRegistry(schema::PrimitiveType_LinSpace, DefaultPopulateParameter, SCHEMA_CUR); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/local_response_normalization_populate.cc b/mindspore/lite/src/ops/populate/local_response_normalization_populate.cc index 36fc15ce04..a208a78a34 100644 --- a/mindspore/lite/src/ops/populate/local_response_normalization_populate.cc +++ b/mindspore/lite/src/ops/populate/local_response_normalization_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,18 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/local_response_normalization.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/local_response_norm_fp32.h" namespace mindspore { namespace lite { -OpParameter *PopulateLocalResponseNormParameter(const mindspore::lite::PrimitiveC *primitive) { - auto local_response_norm_attr = reinterpret_cast<mindspore::lite::LocalResponseNormalization *>( - const_cast<mindspore::lite::PrimitiveC *>(primitive)); +OpParameter *PopulateLocalResponseNormParameter(const void *prim) { LocalResponseNormParameter *lrn_param = reinterpret_cast<LocalResponseNormParameter *>(malloc(sizeof(LocalResponseNormParameter))); if (lrn_param == nullptr) { @@ -32,16 +27,18 @@ OpParameter *PopulateLocalResponseNormParameter(const mindspore::lite::Primitive return nullptr; } memset(lrn_param, 0, sizeof(LocalResponseNormParameter)); - lrn_param->op_parameter_.type_ = primitive->Type(); - lrn_param->depth_radius_ = local_response_norm_attr->GetDepthRadius(); - lrn_param->bias_ = local_response_norm_attr->GetBias(); - lrn_param->alpha_ = local_response_norm_attr->GetAlpha(); - lrn_param->beta_ = local_response_norm_attr->GetBeta(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_LRN(); + lrn_param->op_parameter_.type_ = primitive->value_type(); + lrn_param->depth_radius_ = value->depth_radius(); + lrn_param->bias_ = value->bias(); + lrn_param->alpha_ = value->alpha(); + lrn_param->beta_ = value->beta(); return reinterpret_cast<OpParameter *>(lrn_param); } -Registry LocalResponseNormalizationParameterRegistry(schema::PrimitiveType_LocalResponseNormalization, - PopulateLocalResponseNormParameter); +Registry LocalResponseNormalizationParameterRegistry(schema::PrimitiveType_LRN, PopulateLocalResponseNormParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/lsh_projection_populate.cc b/mindspore/lite/src/ops/populate/lsh_projection_populate.cc index 70fccf14da..f3b59f7aba 100644 --- a/mindspore/lite/src/ops/populate/lsh_projection_populate.cc +++ b/mindspore/lite/src/ops/populate/lsh_projection_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,15 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "src/ops/lsh_projection.h" #include "nnacl/lsh_projection_parameter.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateLshProjectionParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateLshProjectionParameter(const void *prim) { LshProjectionParameter *lsh_project_param = reinterpret_cast<LshProjectionParameter *>(malloc(sizeof(LshProjectionParameter))); if (lsh_project_param == nullptr) { @@ -29,12 +27,15 @@ OpParameter *PopulateLshProjectionParameter(const mindspore::lite::PrimitiveC *p return nullptr; } memset(lsh_project_param, 0, sizeof(LshProjectionParameter)); - lsh_project_param->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::LshProjection *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - lsh_project_param->lsh_type_ = param->GetLshType(); + + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_LshProjection(); + lsh_project_param->op_parameter_.type_ = primitive->value_type(); + lsh_project_param->lsh_type_ = value->type(); return reinterpret_cast<OpParameter *>(lsh_project_param); } -Registry LshProjectionParameterRegistry(schema::PrimitiveType_LshProjection, PopulateLshProjectionParameter); +Registry LshProjectionParameterRegistry(schema::PrimitiveType_LshProjection, PopulateLshProjectionParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/lstm_populate.cc b/mindspore/lite/src/ops/populate/lstm_populate.cc index 95642daec0..e94dcceeaa 100644 --- a/mindspore/lite/src/ops/populate/lstm_populate.cc +++ b/mindspore/lite/src/ops/populate/lstm_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,32 +13,34 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/lstm.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/lstm_fp32.h" namespace mindspore { namespace lite { -OpParameter *PopulateLstmParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateLstmParameter(const void *prim) { LstmParameter *lstm_param = reinterpret_cast<LstmParameter *>(malloc(sizeof(LstmParameter))); if (lstm_param == nullptr) { MS_LOG(ERROR) << "malloc LstmParameter failed."; return nullptr; } memset(lstm_param, 0, sizeof(LstmParameter)); - lstm_param->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::Lstm *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); + auto primitive = static_cast<const schema::Primitive *>(prim); + lstm_param->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_LSTM(); if (param == nullptr) { free(lstm_param); MS_LOG(ERROR) << "get Lstm param nullptr."; return nullptr; } - lstm_param->bidirectional_ = param->GetBidirection(); - lstm_param->smooth_ = param->GetSmooth(); + + lstm_param->bidirectional_ = param->bidirectional(); + lstm_param->zoneout_cell_ = param->zoneout_cell(); + lstm_param->zoneout_hidden_ = param->zoneout_hidden(); return reinterpret_cast<OpParameter *>(lstm_param); } -Registry LstmParameterRegistry(schema::PrimitiveType_Lstm, PopulateLstmParameter); +} // namespace +Registry g_lstmParameterRegistry(schema::PrimitiveType_LSTM, PopulateLstmParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/matmul_populate.cc b/mindspore/lite/src/ops/populate/matmul_populate.cc index 3c824202dc..f4bb92c109 100644 --- a/mindspore/lite/src/ops/populate/matmul_populate.cc +++ b/mindspore/lite/src/ops/populate/matmul_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,31 +13,29 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/matmul.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/matmul_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateMatMulParameter(const mindspore::lite::PrimitiveC *primitive) { - auto param = reinterpret_cast<mindspore::lite::MatMul *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); +OpParameter *PopulateMatMulParameter(const void *prim) { MatMulParameter *matmul_param = reinterpret_cast<MatMulParameter *>(malloc(sizeof(MatMulParameter))); if (matmul_param == nullptr) { MS_LOG(ERROR) << "malloc MatMulParameter failed."; return nullptr; } memset(matmul_param, 0, sizeof(MatMulParameter)); - matmul_param->op_parameter_.type_ = primitive->Type(); - matmul_param->b_transpose_ = param->GetTransposeB(); - matmul_param->a_transpose_ = param->GetTransposeA(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_MatMul(); + matmul_param->op_parameter_.type_ = primitive->value_type(); + matmul_param->b_transpose_ = value->transpose_b(); + matmul_param->a_transpose_ = value->transpose_a(); matmul_param->has_bias_ = false; matmul_param->act_type_ = ActType_No; return reinterpret_cast<OpParameter *>(matmul_param); } -Registry MatMulParameterRegistry(schema::PrimitiveType_MatMul, PopulateMatMulParameter); +Registry MatMulParameterRegistry(schema::PrimitiveType_MatMul, PopulateMatMulParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/merge_populate.cc b/mindspore/lite/src/ops/populate/merge_populate.cc index 1945864f05..8ab485a46d 100644 --- a/mindspore/lite/src/ops/populate/merge_populate.cc +++ b/mindspore/lite/src/ops/populate/merge_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,22 +13,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateMergeParameter(const mindspore::lite::PrimitiveC *primitive) { + +OpParameter *PopulateMergeParameter(const void *prim) { OpParameter *merge_parameter = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (merge_parameter == nullptr) { MS_LOG(ERROR) << "malloc Merge parameter failed."; return nullptr; } memset(merge_parameter, 0, sizeof(OpParameter)); - merge_parameter->type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + merge_parameter->type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(merge_parameter); } -Registry MergeParameterRegistry(schema::PrimitiveType_Merge, PopulateMergeParameter); +Registry MergeParameterRegistry(schema::PrimitiveType_Merge, PopulateMergeParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/mfcc_populate.cc b/mindspore/lite/src/ops/populate/mfcc_populate.cc new file mode 100644 index 0000000000..22b2cea316 --- /dev/null +++ b/mindspore/lite/src/ops/populate/mfcc_populate.cc @@ -0,0 +1,39 @@ +/** + * Copyright 2019-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/populate_register.h" +#include "nnacl/infer/mfcc_infer.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateMfccParameter(const void *prim) { + MfccParameter *arg_param = reinterpret_cast<MfccParameter *>(malloc(sizeof(MfccParameter))); + if (arg_param == nullptr) { + MS_LOG(ERROR) << "malloc MfccParameter failed."; + return nullptr; + } + memset(arg_param, 0, sizeof(MfccParameter)); + auto *primitive = static_cast<const schema::Primitive *>(prim); + arg_param->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_Mfcc(); + arg_param->dct_coeff_num_ = param->dct_coeff_num(); + return reinterpret_cast<OpParameter *>(arg_param); +} +} // namespace + +Registry g_mfccParameterRegistry(schema::PrimitiveType_Mfcc, PopulateMfccParameter, SCHEMA_CUR); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/mul_populate.cc b/mindspore/lite/src/ops/populate/mul_populate.cc index 1d7b709eda..b56957d39b 100644 --- a/mindspore/lite/src/ops/populate/mul_populate.cc +++ b/mindspore/lite/src/ops/populate/mul_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,27 +13,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/mul.h" #include "nnacl/arithmetic.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "src/ops/populate/arithmetic_populate.h" namespace mindspore { namespace lite { - -OpParameter *PopulateMulParameter(const mindspore::lite::PrimitiveC *primitive) { - ArithmeticParameter *param = PopulateArithmeticCommonPara(primitive); +namespace { +OpParameter *PopulateMulParameter(const void *prim) { + ArithmeticParameter *param = PopulateArithmeticCommonPara(prim); if (param == nullptr) { MS_LOG(ERROR) << "PopulateArithmeticCommonPara failed."; return nullptr; } - param->activation_type_ = reinterpret_cast<const mindspore::lite::Mul *>(primitive)->GetActivationType(); + auto *primitive = static_cast<const schema::Primitive *>(prim); + param->op_parameter_.type_ = primitive->value_type(); + // auto mul_prim = primitive->value_as_Mul(); + // param->activation_type_ = mul_prim->activationType(); return reinterpret_cast<OpParameter *>(param); } +} // namespace -Registry MulParameterRegistry(schema::PrimitiveType_Mul, PopulateMulParameter); - +Registry g_mulParameterRegistry(schema::PrimitiveType_MulFusion, PopulateMulParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/nchw2nhwc_populate.cc b/mindspore/lite/src/ops/populate/nchw2nhwc_populate.cc deleted file mode 100644 index 47ba44e401..0000000000 --- a/mindspore/lite/src/ops/populate/nchw2nhwc_populate.cc +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/primitive_c.h" -#include "src/ops/populate/populate_register.h" -#include "src/common/common.h" -#include "nnacl/transpose.h" - -namespace mindspore { -namespace lite { - -OpParameter *PopulateNchw2NhwcParameter(const mindspore::lite::PrimitiveC *primitive) { - TransposeParameter *parameter = reinterpret_cast<TransposeParameter *>(malloc(sizeof(TransposeParameter))); - if (parameter == nullptr) { - MS_LOG(ERROR) << "malloc OpParameter failed."; - return nullptr; - } - memset(parameter, 0, sizeof(OpParameter)); - parameter->op_parameter_.type_ = primitive->Type(); - parameter->num_axes_ = 4; - parameter->perm_[0] = 0; - parameter->perm_[1] = 2; - parameter->perm_[2] = 3; - parameter->perm_[3] = 1; - return reinterpret_cast<OpParameter *>(parameter); -} -Registry Nchw2NhwcParameterRegistry(schema::PrimitiveType_Nchw2Nhwc, PopulateNchw2NhwcParameter); - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/nhwc2nchw_populate.cc b/mindspore/lite/src/ops/populate/nhwc2nchw_populate.cc deleted file mode 100644 index 5156fa0b25..0000000000 --- a/mindspore/lite/src/ops/populate/nhwc2nchw_populate.cc +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/primitive_c.h" -#include "src/ops/populate/populate_register.h" -#include "src/common/common.h" -#include "nnacl/transpose.h" - -namespace mindspore { -namespace lite { - -OpParameter *PopulateNhwc2NchwParameter(const mindspore::lite::PrimitiveC *primitive) { - TransposeParameter *parameter = reinterpret_cast<TransposeParameter *>(malloc(sizeof(TransposeParameter))); - if (parameter == nullptr) { - MS_LOG(ERROR) << "malloc OpParameter failed."; - return nullptr; - } - memset(parameter, 0, sizeof(OpParameter)); - parameter->op_parameter_.type_ = primitive->Type(); - parameter->num_axes_ = 4; - parameter->perm_[0] = 0; - parameter->perm_[1] = 3; - parameter->perm_[2] = 1; - parameter->perm_[3] = 2; - return reinterpret_cast<OpParameter *>(parameter); -} - -Registry Nhwc2NchwParameterRegistry(schema::PrimitiveType_Nhwc2Nchw, PopulateNhwc2NchwParameter); - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/non_max_suppression_populate.cc b/mindspore/lite/src/ops/populate/non_max_suppression_populate.cc index 3fc60b09ca..95a49ca95f 100644 --- a/mindspore/lite/src/ops/populate/non_max_suppression_populate.cc +++ b/mindspore/lite/src/ops/populate/non_max_suppression_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,30 +13,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/non_max_suppression.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/non_max_suppression_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateNonMaxSuppressionParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateNonMaxSuppressionParameter(const void *prim) { NMSParameter *param = reinterpret_cast<NMSParameter *>(malloc(sizeof(NMSParameter))); if (param == nullptr) { MS_LOG(ERROR) << "malloc param failed."; return nullptr; } memset(param, 0, sizeof(NMSParameter)); - param->op_parameter_.type_ = primitive->Type(); - auto prim = - reinterpret_cast<mindspore::lite::NonMaxSuppression *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - param->center_point_box_ = prim->GetCenterPointBox(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_NonMaxSuppression(); + param->op_parameter_.type_ = primitive->value_type(); + param->center_point_box_ = value->center_point_box(); return reinterpret_cast<OpParameter *>(param); } -Registry NonMaxSuppressionParameterRegistry(schema::PrimitiveType_NonMaxSuppression, - PopulateNonMaxSuppressionParameter); +Registry NonMaxSuppressionParameterRegistry(schema::PrimitiveType_NonMaxSuppression, PopulateNonMaxSuppressionParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/nonzero_populate.cc b/mindspore/lite/src/ops/populate/nonzero_populate.cc index 7436c1965b..dfa5e2d22d 100644 --- a/mindspore/lite/src/ops/populate/nonzero_populate.cc +++ b/mindspore/lite/src/ops/populate/nonzero_populate.cc @@ -13,22 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" +#include "src/ops/populate/default_populate.h" namespace mindspore { namespace lite { - -OpParameter *PopulateNonZeroParameter(const mindspore::lite::PrimitiveC *primitive) { - auto nonzero_parameter = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); - if (nonzero_parameter == nullptr) { - MS_LOG(ERROR) << "malloc Where parameter failed."; - return nullptr; - } - memset(nonzero_parameter, 0, sizeof(OpParameter)); - nonzero_parameter->type_ = primitive->Type(); - return nonzero_parameter; -} -Registry NonZeroParameterRegistry(schema::PrimitiveType_NonZero, PopulateNonZeroParameter); +Registry g_nonZeroParameterRegistry(schema::PrimitiveType_NonZero, DefaultPopulateParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/one_hot_populate.cc b/mindspore/lite/src/ops/populate/one_hot_populate.cc index 2964637be0..1343c3d68f 100644 --- a/mindspore/lite/src/ops/populate/one_hot_populate.cc +++ b/mindspore/lite/src/ops/populate/one_hot_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,33 +13,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/one_hot.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/one_hot_fp32.h" namespace mindspore { namespace lite { -OpParameter *PopulateOneHotParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateOneHotParameter(const void *prim) { OneHotParameter *one_hot_param = reinterpret_cast<OneHotParameter *>(malloc(sizeof(OneHotParameter))); if (one_hot_param == nullptr) { MS_LOG(ERROR) << "malloc OneHotParameter failed."; return nullptr; } memset(one_hot_param, 0, sizeof(OneHotParameter)); - one_hot_param->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::OneHot *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - if (param == nullptr) { - free(one_hot_param); - MS_LOG(ERROR) << "get OneHot param nullptr."; - return nullptr; - } - one_hot_param->axis_ = param->GetAxis(); + + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_OneHot(); + one_hot_param->op_parameter_.type_ = primitive->value_type(); + one_hot_param->axis_ = value->axis(); return reinterpret_cast<OpParameter *>(one_hot_param); } -Registry OneHotParameterRegistry(schema::PrimitiveType_OneHot, PopulateOneHotParameter); +Registry OneHotParameterRegistry(schema::PrimitiveType_OneHot, PopulateOneHotParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/oneslike_populate.cc b/mindspore/lite/src/ops/populate/oneslike_populate.cc index 71b5a05a62..2882d8df26 100644 --- a/mindspore/lite/src/ops/populate/oneslike_populate.cc +++ b/mindspore/lite/src/ops/populate/oneslike_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,24 +13,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/oneslike.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateOnesLikeParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateOnesLikeParameter(const void *prim) { OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (param == nullptr) { MS_LOG(ERROR) << "malloc OnesLike Parameter failed."; return nullptr; } memset(param, 0, sizeof(OpParameter)); - param->type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + param->type_ = primitive->value_type(); return param; } -Registry OnesLikeParameterRegistry(schema::PrimitiveType_OnesLike, PopulateOnesLikeParameter); +Registry OnesLikeParameterRegistry(schema::PrimitiveType_OnesLike, PopulateOnesLikeParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/p_relu_populate.cc b/mindspore/lite/src/ops/populate/p_relu_populate.cc index d666069659..18a3274142 100644 --- a/mindspore/lite/src/ops/populate/p_relu_populate.cc +++ b/mindspore/lite/src/ops/populate/p_relu_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,28 +13,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/p_relu.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/prelu_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulatePReLUParameter(const mindspore::lite::PrimitiveC *primitive) { - auto param = reinterpret_cast<mindspore::lite::PReLU *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - PReluParameter *prelu_param = reinterpret_cast<PReluParameter *>(malloc(sizeof(PReluParameter))); - if (prelu_param == nullptr) { +OpParameter *PopulatePReLUParameter(const void *prim) { + PReluParameter *param = reinterpret_cast<PReluParameter *>(malloc(sizeof(PReluParameter))); + if (param == nullptr) { MS_LOG(ERROR) << "malloc PReluParameter failed."; return nullptr; } - memset(prelu_param, 0, sizeof(PReluParameter)); - prelu_param->op_parameter_.type_ = primitive->Type(); - prelu_param->channelShared = param->GetChannelShared(); - return reinterpret_cast<OpParameter *>(prelu_param); + memset(param, 0, sizeof(PReluParameter)); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_PReLUFusion(); + param->op_parameter_.type_ = primitive->value_type(); + param->channelShared = value->channel_shared(); + return reinterpret_cast<OpParameter *>(param); } -Registry PReLUParameterRegistry(schema::PrimitiveType_PReLU, PopulatePReLUParameter); - +Registry PReLUParameterRegistry(schema::PrimitiveType_PReLUFusion, PopulatePReLUParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/pad_populate.cc b/mindspore/lite/src/ops/populate/pad_populate.cc index d9f19f20e2..58a3945711 100644 --- a/mindspore/lite/src/ops/populate/pad_populate.cc +++ b/mindspore/lite/src/ops/populate/pad_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,44 +13,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/pad.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/pad_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulatePadParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulatePadParameter(const void *prim) { PadParameter *pad_param = reinterpret_cast<PadParameter *>(malloc(sizeof(PadParameter))); if (pad_param == nullptr) { MS_LOG(ERROR) << "malloc PadParameter failed."; return nullptr; } memset(pad_param, 0, sizeof(PadParameter)); - pad_param->op_parameter_.type_ = primitive->Type(); - auto pad_node = reinterpret_cast<mindspore::lite::Pad *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - pad_param->pad_mode_ = pad_node->GetPaddingMode(); - pad_param->constant_value_ = pad_node->GetConstantValue(); - auto size = pad_node->GetPaddings().size(); - if (size > MAX_PAD_SIZE) { - MS_LOG(ERROR) << "Invalid padding size: " << size; - free(pad_param); - return nullptr; - } - - for (size_t i = 0; i < MAX_PAD_SIZE - size; ++i) { - pad_param->paddings_[i] = 0; - } - for (size_t i = 0; i < size; i++) { - pad_param->paddings_[MAX_PAD_SIZE - size + i] = pad_node->GetPaddings()[i]; - } - pad_param->padding_length = MAX_PAD_SIZE; - + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_PadFusion(); + pad_param->op_parameter_.type_ = primitive->value_type(); + pad_param->pad_mode_ = value->padding_mode(); + pad_param->constant_value_ = value->constant_value(); return reinterpret_cast<OpParameter *>(pad_param); } -Registry PadParameterRegistry(schema::PrimitiveType_Pad, PopulatePadParameter); +Registry PadParameterRegistry(schema::PrimitiveType_PadFusion, PopulatePadParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/partial_populate.cc b/mindspore/lite/src/ops/populate/partial_populate.cc index 300f5e2827..f808a530af 100644 --- a/mindspore/lite/src/ops/populate/partial_populate.cc +++ b/mindspore/lite/src/ops/populate/partial_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,9 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/partial.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { @@ -25,20 +22,20 @@ typedef struct PartialParameter { int sub_graph_index_; } PartialParameter; -OpParameter *PopulatePartialParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulatePartialParameter(const void *prim) { PartialParameter *partial_parameter = reinterpret_cast<PartialParameter *>(malloc(sizeof(PartialParameter))); if (partial_parameter == nullptr) { MS_LOG(ERROR) << "malloc partial parameter failed."; return nullptr; } memset(partial_parameter, 0, sizeof(PartialParameter)); - partial_parameter->op_parameter_.type_ = primitive->Type(); - - auto param = reinterpret_cast<mindspore::lite::Partial *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - partial_parameter->sub_graph_index_ = param->GetSubGraphIndex(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_PartialFusion(); + partial_parameter->op_parameter_.type_ = primitive->value_type(); + partial_parameter->sub_graph_index_ = value->sub_graph_index(); return reinterpret_cast<OpParameter *>(partial_parameter); } -Registry PartialParameterRegistry(schema::PrimitiveType_Partial, PopulatePartialParameter); +Registry PartialParameterRegistry(schema::PrimitiveType_PartialFusion, PopulatePartialParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/pooling_populate.cc b/mindspore/lite/src/ops/populate/pooling_populate.cc index 4943ff9275..af368282bb 100644 --- a/mindspore/lite/src/ops/populate/pooling_populate.cc +++ b/mindspore/lite/src/ops/populate/pooling_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,65 +13,98 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/pooling.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/pooling_parameter.h" namespace mindspore { namespace lite { - -OpParameter *PopulatePoolingParameter(const mindspore::lite::PrimitiveC *primitive) { - auto pooling_primitive = - reinterpret_cast<mindspore::lite::Pooling *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); +namespace { +OpParameter *PopulateAvgPoolParameter(const void *primitive) { PoolingParameter *pooling_param = reinterpret_cast<PoolingParameter *>(malloc(sizeof(PoolingParameter))); if (pooling_param == nullptr) { MS_LOG(ERROR) << "malloc PoolingParameter failed."; return nullptr; } memset(pooling_param, 0, sizeof(PoolingParameter)); - pooling_param->op_parameter_.type_ = primitive->Type(); - pooling_param->global_ = pooling_primitive->GetGlobal(); - pooling_param->window_w_ = pooling_primitive->GetWindowW(); - pooling_param->window_h_ = pooling_primitive->GetWindowH(); - auto pooling_lite_primitive = (lite::Pooling *)primitive; - pooling_param->pad_u_ = pooling_lite_primitive->PadUp(); - pooling_param->pad_d_ = pooling_lite_primitive->PadDown(); - pooling_param->pad_l_ = pooling_lite_primitive->PadLeft(); - pooling_param->pad_r_ = pooling_lite_primitive->PadRight(); - pooling_param->stride_w_ = pooling_primitive->GetStrideW(); - pooling_param->stride_h_ = pooling_primitive->GetStrideH(); - pooling_param->avg_mode_ = pooling_primitive->GetAvgMode(); - auto pad_mode = pooling_primitive->GetPadMode(); - switch (pad_mode) { - case schema::PadMode_SAME_UPPER: - pooling_param->pad_mode_ = Pad_Same; + auto pooling_prim = static_cast<const schema::Primitive *>(primitive); + pooling_param->op_parameter_.type_ = pooling_prim->value_type(); + auto pooling_primitive = pooling_prim->value_as_AvgPoolFusion(); + pooling_param->pool_mode_ = PoolMode_AvgPool; + pooling_param->global_ = pooling_primitive->global(); + pooling_param->stride_w_ = static_cast<int>(*(pooling_primitive->strides()->begin() + 1)); + pooling_param->stride_h_ = static_cast<int>(*(pooling_primitive->strides()->begin())); + if (pooling_primitive->pad() != nullptr) { + pooling_param->pad_u_ = static_cast<int>(*(pooling_primitive->pad()->begin())); + pooling_param->pad_d_ = static_cast<int>(*(pooling_primitive->pad()->begin() + 1)); + pooling_param->pad_l_ = static_cast<int>(*(pooling_primitive->pad()->begin() + 2)); + pooling_param->pad_r_ = static_cast<int>(*(pooling_primitive->pad()->begin() + 3)); + } + if (!pooling_param->global_) { + pooling_param->window_w_ = static_cast<int>(*(pooling_primitive->kernel_size()->begin() + 1)); + pooling_param->window_h_ = static_cast<int>(*(pooling_primitive->kernel_size()->begin())); + } + + auto round_mode = pooling_primitive->round_mode(); + switch (round_mode) { + case schema::RoundMode_FLOOR: + pooling_param->round_mode_ = RoundMode_Floor; break; - case schema::PadMode_VALID: - pooling_param->pad_mode_ = Pad_Valid; + case schema::RoundMode_CEIL: + pooling_param->round_mode_ = RoundMode_Ceil; break; default: - pooling_param->pad_mode_ = Pad_No; + pooling_param->round_mode_ = RoundMode_No; break; } - auto is_global = pooling_primitive->GetGlobal(); - pooling_param->global_ = is_global; - auto pool_mode = pooling_primitive->GetPoolingMode(); - switch (pool_mode) { - case schema::PoolMode_MAX_POOLING: - pooling_param->pool_mode_ = PoolMode_MaxPool; + if (pooling_primitive->activation_type() == schema::ActivationType_RELU) { + pooling_param->act_type_ = ActType_Relu; + } else if (pooling_primitive->activation_type() == schema::ActivationType_RELU6) { + pooling_param->act_type_ = ActType_Relu6; + } else { + pooling_param->act_type_ = ActType_No; + } + + switch (pooling_primitive->pad_mode()) { + case schema::PadMode_SAME: + pooling_param->pad_mode_ = Pad_same; break; - case schema::PoolMode_MEAN_POOLING: - pooling_param->pool_mode_ = PoolMode_AvgPool; + case schema::PadMode_VALID: + pooling_param->pad_mode_ = Pad_valid; break; default: - pooling_param->pool_mode_ = PoolMode_No; + pooling_param->pad_mode_ = Pad_pad; break; } + return reinterpret_cast<OpParameter *>(pooling_param); +} + +OpParameter *PopulateMaxPoolParameter(const void *primitive) { + PoolingParameter *pooling_param = reinterpret_cast<PoolingParameter *>(malloc(sizeof(PoolingParameter))); + if (pooling_param == nullptr) { + MS_LOG(ERROR) << "malloc PoolingParameter failed."; + return nullptr; + } + memset(pooling_param, 0, sizeof(PoolingParameter)); + auto pooling_prim = static_cast<const schema::Primitive *>(primitive); + pooling_param->op_parameter_.type_ = pooling_prim->value_type(); + auto max_pool_prim = pooling_prim->value_as_MaxPoolFusion(); + pooling_param->pool_mode_ = PoolMode_MaxPool; + pooling_param->global_ = max_pool_prim->global(); + if (!pooling_param->global_) { + pooling_param->window_w_ = static_cast<int>(*(max_pool_prim->kernel_size()->begin() + 1)); + pooling_param->window_h_ = static_cast<int>(*(max_pool_prim->kernel_size()->begin())); + pooling_param->stride_w_ = static_cast<int>(*(max_pool_prim->strides()->begin() + 1)); + pooling_param->stride_h_ = static_cast<int>(*(max_pool_prim->strides()->begin())); + if (max_pool_prim->pad() != nullptr) { + pooling_param->pad_u_ = static_cast<int>(*(max_pool_prim->pad()->begin())); + pooling_param->pad_d_ = static_cast<int>(*(max_pool_prim->pad()->begin() + 1)); + pooling_param->pad_l_ = static_cast<int>(*(max_pool_prim->pad()->begin() + 2)); + pooling_param->pad_r_ = static_cast<int>(*(max_pool_prim->pad()->begin() + 3)); + } + } - auto round_mode = pooling_primitive->GetRoundMode(); + auto round_mode = max_pool_prim->round_mode(); switch (round_mode) { case schema::RoundMode_FLOOR: pooling_param->round_mode_ = RoundMode_Floor; @@ -84,17 +117,30 @@ OpParameter *PopulatePoolingParameter(const mindspore::lite::PrimitiveC *primiti break; } - if (pooling_primitive->GetActivationType() == schema::ActivationType_RELU) { + if (max_pool_prim->activation_type() == schema::ActivationType_RELU) { pooling_param->act_type_ = ActType_Relu; - } else if (pooling_primitive->GetActivationType() == schema::ActivationType_RELU6) { + } else if (max_pool_prim->activation_type() == schema::ActivationType_RELU6) { pooling_param->act_type_ = ActType_Relu6; } else { pooling_param->act_type_ = ActType_No; } + + switch (max_pool_prim->pad_mode()) { + case schema::PadMode_SAME: + pooling_param->pad_mode_ = Pad_same; + break; + case schema::PadMode_VALID: + pooling_param->pad_mode_ = Pad_valid; + break; + default: + pooling_param->pad_mode_ = Pad_pad; + break; + } return reinterpret_cast<OpParameter *>(pooling_param); } +} // namespace -Registry PoolingParameterRegistry(schema::PrimitiveType_Pooling, PopulatePoolingParameter); - +Registry g_avgPoolParameterRegistry(schema::PrimitiveType_AvgPoolFusion, PopulateAvgPoolParameter, SCHEMA_CUR); +Registry g_maxPoolParameterRegistry(schema::PrimitiveType_MaxPoolFusion, PopulateMaxPoolParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/populate_register.h b/mindspore/lite/src/ops/populate/populate_register.h index 9e80d30c41..3811edf741 100644 --- a/mindspore/lite/src/ops/populate/populate_register.h +++ b/mindspore/lite/src/ops/populate/populate_register.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,11 +18,14 @@ #define LITE_MINDSPORE_LITE_C_OPS_OP_POPULATE_REGISTER_H #include <map> -#include "src/ops/primitive_c.h" +#include "schema/model_generated.h" +#include "nnacl/op_base.h" +#include "src/common/common.h" +#include "src/common/prim_util.h" namespace mindspore { namespace lite { - +typedef OpParameter *(*ParameterGen)(const void *prim); class PopulateRegistry { public: static PopulateRegistry *GetInstance() { @@ -30,25 +33,30 @@ class PopulateRegistry { return &registry; } - void InsertParameterMap(schema::PrimitiveType type, ParameterCreator creator) { parameter_creators[type] = creator; } + void InsertParameterMap(int type, ParameterGen creator, int version) { + parameters_[GenPrimVersionKey(type, version)] = creator; + } - ParameterCreator GetParameterCreator(schema::PrimitiveType type) { - if (parameter_creators.find(type) != parameter_creators.end()) { - return parameter_creators[type]; - } else { - MS_LOG(ERROR) << "Unsupported parameter type in Create : " << schema::EnumNamePrimitiveType(type); + ParameterGen GetParameterCreator(int type, int version) { + ParameterGen param_creator = nullptr; + auto iter = parameters_.find(GenPrimVersionKey(type, version)); + if (iter == parameters_.end()) { + MS_LOG(ERROR) << "Unsupported parameter type in Create : " << type; return nullptr; } + param_creator = iter->second; + return param_creator; } protected: - std::map<schema::PrimitiveType, ParameterCreator> parameter_creators; + // key:type * 1000 + schema_version + std::map<int, ParameterGen> parameters_; }; class Registry { public: - Registry(schema::PrimitiveType primitive_type, ParameterCreator creator) { - PopulateRegistry::GetInstance()->InsertParameterMap(primitive_type, creator); + Registry(int primitive_type, ParameterGen creator, int version) { + PopulateRegistry::GetInstance()->InsertParameterMap(primitive_type, creator, version); } ~Registry() = default; }; diff --git a/mindspore/lite/src/ops/populate/power_populate.cc b/mindspore/lite/src/ops/populate/power_populate.cc index a2e805c086..26b5bfdcc9 100644 --- a/mindspore/lite/src/ops/populate/power_populate.cc +++ b/mindspore/lite/src/ops/populate/power_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,31 +13,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/power.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/power_parameter.h" namespace mindspore { namespace lite { - -OpParameter *PopulatePowerParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulatePowerParameter(const void *prim) { PowerParameter *power_param = reinterpret_cast<PowerParameter *>(malloc(sizeof(PowerParameter))); if (power_param == nullptr) { MS_LOG(ERROR) << "malloc PowerParameter failed."; return nullptr; } memset(power_param, 0, sizeof(PowerParameter)); - power_param->op_parameter_.type_ = primitive->Type(); - auto power = reinterpret_cast<mindspore::lite::Power *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - power_param->power_ = power->GetPower(); - power_param->scale_ = power->GetScale(); - power_param->shift_ = power->GetShift(); + auto primitive = static_cast<const schema::Primitive *>(prim); + power_param->op_parameter_.type_ = primitive->value_type(); + auto power_prim = primitive->value_as_PowFusion(); + power_param->scale_ = power_prim->scale(); + power_param->shift_ = power_prim->shift(); return reinterpret_cast<OpParameter *>(power_param); } +} // namespace -Registry PowerParameterRegistry(schema::PrimitiveType_Power, PopulatePowerParameter); - +Registry g_powerParameterRegistry(schema::PrimitiveType_PowFusion, PopulatePowerParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/prior_box_populate.cc b/mindspore/lite/src/ops/populate/prior_box_populate.cc index b9c20f5bb4..611b594dcf 100644 --- a/mindspore/lite/src/ops/populate/prior_box_populate.cc +++ b/mindspore/lite/src/ops/populate/prior_box_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,71 +13,63 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/prior_box.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" -#include "mindspore/lite/nnacl/prior_box_parameter.h" +#include "nnacl/prior_box_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulatePriorBoxParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulatePriorBoxParameter(const void *prim) { PriorBoxParameter *prior_box_param = reinterpret_cast<PriorBoxParameter *>(malloc(sizeof(PriorBoxParameter))); if (prior_box_param == nullptr) { MS_LOG(ERROR) << "malloc PriorBoxParameter failed."; return nullptr; } memset(prior_box_param, 0, sizeof(PriorBoxParameter)); - prior_box_param->op_parameter_.type_ = primitive->Type(); - auto prior_box_attr = - reinterpret_cast<mindspore::lite::PriorBox *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - if (prior_box_attr->GetMinSizes().size() > MAX_SHAPE_SIZE) { - MS_LOG(ERROR) << "PriorBox min_sizes size exceeds max num " << MAX_SHAPE_SIZE << ", got " - << prior_box_attr->GetMinSizes(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_PriorBox(); + prior_box_param->op_parameter_.type_ = primitive->value_type(); + if (value->min_sizes()->size() > MAX_SHAPE_SIZE) { + MS_LOG(ERROR) << "PriorBox min_sizes size exceeds max num " << MAX_SHAPE_SIZE << ", got " << value->min_sizes(); free(prior_box_param); return nullptr; } - prior_box_param->min_sizes_size = prior_box_attr->GetMinSizes().size(); - if (prior_box_attr->GetMaxSizes().size() > MAX_SHAPE_SIZE) { - MS_LOG(ERROR) << "PriorBox max_sizes size exceeds max num " << MAX_SHAPE_SIZE << ", got " - << prior_box_attr->GetMaxSizes(); + prior_box_param->min_sizes_size = value->min_sizes()->size(); + if (value->max_sizes()->size() > MAX_SHAPE_SIZE) { + MS_LOG(ERROR) << "PriorBox max_sizes size exceeds max num " << MAX_SHAPE_SIZE << ", got " << value->max_sizes(); free(prior_box_param); return nullptr; } - prior_box_param->max_sizes_size = prior_box_attr->GetMaxSizes().size(); - memcpy(prior_box_param->max_sizes, prior_box_attr->GetMaxSizes().data(), - prior_box_attr->GetMaxSizes().size() * sizeof(int32_t)); - memcpy(prior_box_param->min_sizes, prior_box_attr->GetMinSizes().data(), - prior_box_attr->GetMinSizes().size() * sizeof(int32_t)); + prior_box_param->max_sizes_size = value->max_sizes()->size(); + memcpy(prior_box_param->max_sizes, value->max_sizes()->data(), value->max_sizes()->size() * sizeof(int32_t)); + memcpy(prior_box_param->min_sizes, value->min_sizes()->data(), value->min_sizes()->size() * sizeof(int32_t)); - if (prior_box_attr->GetAspectRatios().size() > MAX_SHAPE_SIZE) { + if (value->aspect_ratios()->size() > MAX_SHAPE_SIZE) { MS_LOG(ERROR) << "PriorBox aspect_ratios size exceeds max num " << MAX_SHAPE_SIZE << ", got " - << prior_box_attr->GetAspectRatios(); + << value->aspect_ratios(); free(prior_box_param); return nullptr; } - prior_box_param->aspect_ratios_size = prior_box_attr->GetAspectRatios().size(); - memcpy(prior_box_param->aspect_ratios, prior_box_attr->GetAspectRatios().data(), - prior_box_attr->GetAspectRatios().size() * sizeof(float)); - if (prior_box_attr->GetVariances().size() != COMM_SHAPE_SIZE) { - MS_LOG(ERROR) << "PriorBox variances size should be " << COMM_SHAPE_SIZE << ", got " - << prior_box_attr->GetVariances().size(); + prior_box_param->aspect_ratios_size = value->aspect_ratios()->size(); + memcpy(prior_box_param->aspect_ratios, value->aspect_ratios()->data(), + value->aspect_ratios()->size() * sizeof(float)); + if (value->variances()->size() != COMM_SHAPE_SIZE) { + MS_LOG(ERROR) << "PriorBox variances size should be " << COMM_SHAPE_SIZE << ", got " << value->variances()->size(); free(prior_box_param); return nullptr; } - memcpy(prior_box_param->variances, prior_box_attr->GetVariances().data(), COMM_SHAPE_SIZE * sizeof(float)); - prior_box_param->flip = prior_box_attr->GetFlip(); - prior_box_param->clip = prior_box_attr->GetClip(); - prior_box_param->offset = prior_box_attr->GetOffset(); - prior_box_param->image_size_h = prior_box_attr->GetImageSizeH(); - prior_box_param->image_size_w = prior_box_attr->GetImageSizeW(); - prior_box_param->step_h = prior_box_attr->GetStepH(); - prior_box_param->step_w = prior_box_attr->GetStepW(); + memcpy(prior_box_param->variances, value->variances()->data(), COMM_SHAPE_SIZE * sizeof(float)); + prior_box_param->flip = value->flip(); + prior_box_param->clip = value->clip(); + prior_box_param->offset = value->offset(); + prior_box_param->image_size_h = value->image_size_h(); + prior_box_param->image_size_w = value->image_size_w(); + prior_box_param->step_h = value->step_h(); + prior_box_param->step_w = value->step_w(); return reinterpret_cast<OpParameter *>(prior_box_param); } -Registry PriorBoxParameterRegistry(schema::PrimitiveType_PriorBox, PopulatePriorBoxParameter); +Registry PriorBoxParameterRegistry(schema::PrimitiveType_PriorBox, PopulatePriorBoxParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/quant_dtype_cast_populate.cc b/mindspore/lite/src/ops/populate/quant_dtype_cast_populate.cc index b91238ea39..d49640110a 100644 --- a/mindspore/lite/src/ops/populate/quant_dtype_cast_populate.cc +++ b/mindspore/lite/src/ops/populate/quant_dtype_cast_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,16 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/quant_dtype_cast.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/int8/quant_dtype_cast_int8.h" namespace mindspore { namespace lite { -OpParameter *PopulateQuantDTypeCastParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateQuantDTypeCastParameter(const void *prim) { QuantDTypeCastParameter *parameter = reinterpret_cast<QuantDTypeCastParameter *>(malloc(sizeof(QuantDTypeCastParameter))); if (parameter == nullptr) { @@ -30,14 +27,15 @@ OpParameter *PopulateQuantDTypeCastParameter(const mindspore::lite::PrimitiveC * return nullptr; } memset(parameter, 0, sizeof(QuantDTypeCastParameter)); - parameter->op_parameter_.type_ = primitive->Type(); - auto quant_dtype_cast_param = - reinterpret_cast<mindspore::lite::QuantDTypeCast *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - parameter->srcT = quant_dtype_cast_param->GetSrcT(); - parameter->dstT = quant_dtype_cast_param->GetDstT(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_QuantDTypeCast(); + parameter->op_parameter_.type_ = primitive->value_type(); + parameter->srcT = value->src_t(); + parameter->dstT = value->dst_t(); return reinterpret_cast<OpParameter *>(parameter); } -Registry QuantDTypeCastParameterRegistry(schema::PrimitiveType_QuantDTypeCast, PopulateQuantDTypeCastParameter); +Registry QuantDTypeCastParameterRegistry(schema::PrimitiveType_QuantDTypeCast, PopulateQuantDTypeCastParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/random_standard_normal_populate.cc b/mindspore/lite/src/ops/populate/random_standard_normal_populate.cc index 89fddd46d5..df22a0575d 100644 --- a/mindspore/lite/src/ops/populate/random_standard_normal_populate.cc +++ b/mindspore/lite/src/ops/populate/random_standard_normal_populate.cc @@ -14,14 +14,13 @@ * limitations under the License. */ -#include "src/ops/random_standard_normal.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/random_standard_normal_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateRandomStandardNormalParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateRandomStandardNormalParameter(const void *prim) { RandomStandardNormalParam *random_parameter = reinterpret_cast<RandomStandardNormalParam *>(malloc(sizeof(RandomStandardNormalParam))); if (random_parameter == nullptr) { @@ -29,14 +28,15 @@ OpParameter *PopulateRandomStandardNormalParameter(const mindspore::lite::Primit return nullptr; } memset(random_parameter, 0, sizeof(RandomStandardNormalParam)); - random_parameter->op_parameter_.type_ = primitive->Type(); - auto param = - reinterpret_cast<mindspore::lite::RandomStandardNormal *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - random_parameter->seed_ = param->GetSeed(); - random_parameter->seed2_ = param->GetSeed2(); + auto *primitive = static_cast<const schema::Primitive *>(prim); + random_parameter->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_RandomStandardNormal(); + random_parameter->seed_ = param->seed(); + random_parameter->seed2_ = param->seed2(); return reinterpret_cast<OpParameter *>(random_parameter); } -Registry RandomStandardNormalParameterRegistry(schema::PrimitiveType_RandomStandardNormal, - PopulateRandomStandardNormalParameter); +} // namespace +Registry g_randomStandardNormalParameterRegistry(schema::PrimitiveType_RandomStandardNormal, + PopulateRandomStandardNormalParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/range_populate.cc b/mindspore/lite/src/ops/populate/range_populate.cc index 71baee7067..2f30e10d92 100644 --- a/mindspore/lite/src/ops/populate/range_populate.cc +++ b/mindspore/lite/src/ops/populate/range_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,31 +13,30 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/range.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/range_fp32.h" namespace mindspore { namespace lite { - -OpParameter *PopulateRangeParameter(const mindspore::lite::PrimitiveC *primitive) { - auto range_attr = reinterpret_cast<mindspore::lite::Range *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); +namespace { +OpParameter *PopulateRangeParameter(const void *prim) { RangeParameter *range_param = reinterpret_cast<RangeParameter *>(malloc(sizeof(RangeParameter))); if (range_param == nullptr) { MS_LOG(ERROR) << "malloc RangeParameter failed."; return nullptr; } memset(range_param, 0, sizeof(RangeParameter)); - range_param->op_parameter_.type_ = primitive->Type(); - range_param->start_ = range_attr->GetStart(); - range_param->limit_ = range_attr->GetLimit(); - range_param->delta_ = range_attr->GetDelta(); - range_param->dType_ = range_attr->GetDType(); + auto primitive = static_cast<const schema::Primitive *>(prim); + range_param->op_parameter_.type_ = primitive->value_type(); + auto range_prim = primitive->value_as_Range(); + range_param->start_ = range_prim->start(); + range_param->limit_ = range_prim->limit(); + range_param->delta_ = range_prim->delta(); + range_param->dType_ = range_prim->d_type(); return reinterpret_cast<OpParameter *>(range_param); } -Registry RangeParameterRegistry(schema::PrimitiveType_Range, PopulateRangeParameter); +} // namespace +Registry g_rangeParameterRegistry(schema::PrimitiveType_Range, PopulateRangeParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/rank_populate.cc b/mindspore/lite/src/ops/populate/rank_populate.cc new file mode 100644 index 0000000000..f9dd3373fa --- /dev/null +++ b/mindspore/lite/src/ops/populate/rank_populate.cc @@ -0,0 +1,36 @@ +/** + * Copyright 2019-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateRankParameter(const void *prim) { + OpParameter *rank_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (rank_param == nullptr) { + MS_LOG(ERROR) << "malloc RankParameter failed."; + return nullptr; + } + memset(rank_param, 0, sizeof(OpParameter)); + auto primitive = static_cast<const schema::Primitive *>(prim); + rank_param->type_ = primitive->value_type(); + return reinterpret_cast<OpParameter *>(rank_param); +} +} // namespace + +Registry g_rankParameterRegistry(schema::PrimitiveType_Rank, PopulateRankParameter, SCHEMA_CUR); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/reduce_populate.cc b/mindspore/lite/src/ops/populate/reduce_populate.cc index b881fbd479..84c478a5c9 100644 --- a/mindspore/lite/src/ops/populate/reduce_populate.cc +++ b/mindspore/lite/src/ops/populate/reduce_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,44 +13,31 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/reduce.h" #include <memory> -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/reduce_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateReduceParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateReduceParameter(const void *prim) { ReduceParameter *reduce_param = reinterpret_cast<ReduceParameter *>(malloc(sizeof(ReduceParameter))); if (reduce_param == nullptr) { MS_LOG(ERROR) << "malloc ReduceParameter failed."; return nullptr; } memset(reduce_param, 0, sizeof(ReduceParameter)); - reduce_param->op_parameter_.type_ = primitive->Type(); - auto reduce = reinterpret_cast<mindspore::lite::Reduce *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - reduce_param->keep_dims_ = reduce->GetKeepDims(); - reduce_param->reduce_to_end_ = reduce->GetReduceToEnd(); - reduce_param->coeff = reduce->GetCoeff(); - auto axisVector = reduce->GetAxes(); - if (axisVector.size() > MAX_SHAPE_SIZE) { - MS_LOG(ERROR) << "Reduce axes size " << axisVector.size() << " exceed limit " << MAX_SHAPE_SIZE; - free(reduce_param); - return nullptr; - } - reduce_param->num_axes_ = static_cast<int>(axisVector.size()); - int i = 0; - for (auto iter = axisVector.begin(); iter != axisVector.end(); iter++) { - reduce_param->axes_[i++] = *iter; - } - reduce_param->mode_ = static_cast<int>(reduce->GetMode()); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_ReduceFusion(); + reduce_param->op_parameter_.type_ = primitive->value_type(); + reduce_param->keep_dims_ = value->keep_dims(); + reduce_param->reduce_to_end_ = value->reduce_to_end(); + reduce_param->coeff = value->coeff(); + reduce_param->mode_ = static_cast<int>(value->mode()); return reinterpret_cast<OpParameter *>(reduce_param); } -Registry ReduceParameterRegistry(schema::PrimitiveType_Reduce, PopulateReduceParameter); +Registry ReduceParameterRegistry(schema::PrimitiveType_ReduceFusion, PopulateReduceParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/reshape_populate.cc b/mindspore/lite/src/ops/populate/reshape_populate.cc index 556ba8d306..a4685326d1 100644 --- a/mindspore/lite/src/ops/populate/reshape_populate.cc +++ b/mindspore/lite/src/ops/populate/reshape_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,36 +13,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" #include "nnacl/reshape_parameter.h" -#include "src/ops/reshape.h" namespace mindspore { namespace lite { - -OpParameter *PopulateReshapeParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateReshapeParameter(const void *prim) { ReshapeParameter *reshape_param = reinterpret_cast<ReshapeParameter *>(malloc(sizeof(ReshapeParameter))); if (reshape_param == nullptr) { MS_LOG(ERROR) << "malloc ReshapeParameter failed."; return nullptr; } memset(reshape_param, 0, sizeof(ReshapeParameter)); - reshape_param->op_parameter_.type_ = primitive->Type(); - auto reshape_lite_primitive = (lite::Reshape *)primitive; - auto shape = reshape_lite_primitive->GetShape(); - reshape_param->shape_dim_ = shape.size(); - int i = 0; - for (auto iter = shape.begin(); iter != shape.end(); iter++) { - reshape_param->shape_[i++] = *iter; - } + auto *primitive = static_cast<const schema::Primitive *>(prim); + reshape_param->op_parameter_.type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(reshape_param); } +} // namespace -Registry ReshapeParameterRegistry(schema::PrimitiveType_Reshape, PopulateReshapeParameter); - +Registry g_reshapeParameterRegistry(schema::PrimitiveType_Reshape, PopulateReshapeParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/resize_populate.cc b/mindspore/lite/src/ops/populate/resize_populate.cc index af6be62d77..a67cacacab 100644 --- a/mindspore/lite/src/ops/populate/resize_populate.cc +++ b/mindspore/lite/src/ops/populate/resize_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020-2021 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,34 +13,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/resize.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/resize_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateResizeParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateResizeParameter(const void *prim) { ResizeParameter *resize_param = reinterpret_cast<ResizeParameter *>(malloc(sizeof(ResizeParameter))); if (resize_param == nullptr) { MS_LOG(ERROR) << "malloc ResizeParameter failed."; return nullptr; } memset(resize_param, 0, sizeof(ResizeParameter)); - resize_param->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::Resize *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - resize_param->method_ = static_cast<int>(param->GetMethod()); - resize_param->new_height_ = param->new_height(); - resize_param->new_width_ = param->new_width(); - resize_param->coordinate_transform_mode_ = param->GetCoordinateTransformMode(); - resize_param->preserve_aspect_ratio_ = param->GetPreserveAspectRatio(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_Resize(); + resize_param->op_parameter_.type_ = primitive->value_type(); + + resize_param->method_ = static_cast<int>(value->method()); + resize_param->new_height_ = value->new_height(); + resize_param->new_width_ = value->new_width(); + resize_param->coordinate_transform_mode_ = value->coordinate_transform_mode(); + resize_param->preserve_aspect_ratio_ = value->preserve_aspect_ratio(); return reinterpret_cast<OpParameter *>(resize_param); } -Registry ResizeParameterRegistry(schema::PrimitiveType_Resize, PopulateResizeParameter); - +Registry ResizeParameterRegistry(schema::PrimitiveType_Resize, PopulateResizeParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/reverse_populate.cc b/mindspore/lite/src/ops/populate/reverse_populate.cc index 08a4c989c6..04beec67fa 100644 --- a/mindspore/lite/src/ops/populate/reverse_populate.cc +++ b/mindspore/lite/src/ops/populate/reverse_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,35 +13,33 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/reverse.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/reverse_fp32.h" namespace mindspore { namespace lite { -OpParameter *PopulateReverseParameter(const mindspore::lite::PrimitiveC *primitive) { - auto reverse_attr = - reinterpret_cast<mindspore::lite::Reverse *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); +OpParameter *PopulateReverseParameter(const void *prim) { ReverseParameter *reverse_param = reinterpret_cast<ReverseParameter *>(malloc(sizeof(ReverseParameter))); if (reverse_param == nullptr) { MS_LOG(ERROR) << "malloc ReverseParameter failed."; return nullptr; } memset(reverse_param, 0, sizeof(ReverseParameter)); - reverse_param->op_parameter_.type_ = primitive->Type(); - auto flatAxis = reverse_attr->GetAxis(); - reverse_param->num_axis_ = flatAxis.size(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_ReverseV2(); + reverse_param->op_parameter_.type_ = primitive->value_type(); + + auto flatAxis = value->axis(); + reverse_param->num_axis_ = flatAxis->size(); int i = 0; - for (auto iter = flatAxis.begin(); iter != flatAxis.end(); iter++) { + for (auto iter = flatAxis->begin(); iter != flatAxis->end(); iter++) { reverse_param->axis_[i++] = *iter; } return reinterpret_cast<OpParameter *>(reverse_param); } -Registry ReverseParameterRegistry(schema::PrimitiveType_Reverse, PopulateReverseParameter); +Registry ReverseParameterRegistry(schema::PrimitiveType_ReverseV2, PopulateReverseParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/reverse_sequence_populate.cc b/mindspore/lite/src/ops/populate/reverse_sequence_populate.cc index d566a37a31..86eaa35c05 100644 --- a/mindspore/lite/src/ops/populate/reverse_sequence_populate.cc +++ b/mindspore/lite/src/ops/populate/reverse_sequence_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,16 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/reverse_sequence.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" -#include "mindspore/lite/nnacl/fp32/reverse_sequence_fp32.h" +#include "nnacl/reverse_sequence_parameter.h" namespace mindspore { namespace lite { - -OpParameter *PopulateReverseSequenceParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateReverseSequenceParameter(const void *prim) { ReverseSequenceParameter *reverse_sequence_param = reinterpret_cast<ReverseSequenceParameter *>(malloc(sizeof(ReverseSequenceParameter))); if (reverse_sequence_param == nullptr) { @@ -30,14 +27,17 @@ OpParameter *PopulateReverseSequenceParameter(const mindspore::lite::PrimitiveC return nullptr; } memset(reverse_sequence_param, 0, sizeof(ReverseSequenceParameter)); - auto param = - reinterpret_cast<mindspore::lite::ReverseSequence *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - reverse_sequence_param->op_parameter_.type_ = primitive->Type(); - reverse_sequence_param->seq_axis_ = param->GetSeqAxis(); - reverse_sequence_param->batch_axis_ = param->GetBatchAxis(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto param = primitive->value_as_ReverseSequence(); + reverse_sequence_param->op_parameter_.type_ = primitive->value_type(); + reverse_sequence_param->seq_axis_ = static_cast<int>(param->seq_dim()); + reverse_sequence_param->batch_axis_ = static_cast<int>(param->batch_dim()); return reinterpret_cast<OpParameter *>(reverse_sequence_param); } -Registry ReverseSequenceParameterRegistry(schema::PrimitiveType_ReverseSequence, PopulateReverseSequenceParameter); +} // namespace + +Registry ReverseSequenceParameterRegistry(schema::PrimitiveType_ReverseSequence, PopulateReverseSequenceParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/roi_pooling_populate.cc b/mindspore/lite/src/ops/populate/roi_pooling_populate.cc index cd3aa3f085..5867a0701f 100644 --- a/mindspore/lite/src/ops/populate/roi_pooling_populate.cc +++ b/mindspore/lite/src/ops/populate/roi_pooling_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,32 +13,30 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/roi_pooling.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/roi_pooling_fp32.h" namespace mindspore { namespace lite { - -OpParameter *PopulateROIPoolingParameter(const mindspore::lite::PrimitiveC *primitive) { - const auto param = - reinterpret_cast<mindspore::lite::ROIPooling *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - ROIPoolingParameter *roi_pooling_param = reinterpret_cast<ROIPoolingParameter *>(malloc(sizeof(ROIPoolingParameter))); - if (roi_pooling_param == nullptr) { +namespace { +OpParameter *PopulateROIPoolingParameter(const void *prim) { + ROIPoolingParameter *roi_param = reinterpret_cast<ROIPoolingParameter *>(malloc(sizeof(ROIPoolingParameter))); + if (roi_param == nullptr) { MS_LOG(ERROR) << "malloc ROIPoolingParameter failed."; return nullptr; } - memset(roi_pooling_param, 0, sizeof(ROIPoolingParameter)); - roi_pooling_param->op_parameter_.type_ = primitive->Type(); - roi_pooling_param->pooledH_ = param->GetPooledH(); - roi_pooling_param->pooledW_ = param->GetPooledW(); - roi_pooling_param->scale_ = param->GetScale(); - return reinterpret_cast<OpParameter *>(roi_pooling_param); -} -Registry ROIPoolingParameterRegistry(schema::PrimitiveType_ROIPooling, PopulateROIPoolingParameter); + memset(roi_param, 0, sizeof(ROIPoolingParameter)); + auto primitive = static_cast<const schema::Primitive *>(prim); + roi_param->op_parameter_.type_ = primitive->value_type(); + auto roi_prim = primitive->value_as_ROIPooling(); + roi_param->pooledH_ = roi_prim->pooled_h(); + roi_param->pooledW_ = roi_prim->pooled_w(); + roi_param->scale_ = roi_prim->scale(); + return reinterpret_cast<OpParameter *>(roi_param); +} +} // namespace +Registry g_ROIPoolingParameterRegistry(schema::PrimitiveType_ROIPooling, PopulateROIPoolingParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/scale_populate.cc b/mindspore/lite/src/ops/populate/scale_populate.cc index f71294cc55..923fbcbf8f 100644 --- a/mindspore/lite/src/ops/populate/scale_populate.cc +++ b/mindspore/lite/src/ops/populate/scale_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,33 +13,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/scale.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/scale.h" namespace mindspore { namespace lite { - -OpParameter *PopulateScaleParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "input primitive is nullptr"; - return nullptr; - } +namespace { +OpParameter *PopulateScaleParameter(const void *prim) { ScaleParameter *scale_param = reinterpret_cast<ScaleParameter *>(malloc(sizeof(ScaleParameter))); if (scale_param == nullptr) { MS_LOG(ERROR) << "malloc ScaleParameter failed."; return nullptr; } memset(scale_param, 0, sizeof(ScaleParameter)); - scale_param->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::Scale *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - scale_param->axis_ = param->GetAxis(); - scale_param->activation_type_ = param->GetActivationType(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_ScaleFusion(); + scale_param->op_parameter_.type_ = primitive->value_type(); + scale_param->axis_ = value->axis(); + scale_param->activation_type_ = value->activation_type(); return reinterpret_cast<OpParameter *>(scale_param); } -Registry ScaleParameterRegistry(schema::PrimitiveType_Scale, PopulateScaleParameter); +} // namespace +Registry g_scaleParameterRegistry(schema::PrimitiveType_ScaleFusion, PopulateScaleParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/scatter_nd_populate.cc b/mindspore/lite/src/ops/populate/scatter_nd_populate.cc index 46b3fb22ee..4a8dfa4b17 100644 --- a/mindspore/lite/src/ops/populate/scatter_nd_populate.cc +++ b/mindspore/lite/src/ops/populate/scatter_nd_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,24 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/scatter_nd.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateScatterNDParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateScatterNDParameter(const void *prim) { OpParameter *scatter_nd_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (scatter_nd_param == nullptr) { MS_LOG(ERROR) << "malloc ScatterNDParameter failed."; return nullptr; } memset(scatter_nd_param, 0, sizeof(OpParameter)); - scatter_nd_param->type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + scatter_nd_param->type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(scatter_nd_param); } -Registry ScatterNDParameterRegistry(schema::PrimitiveType_ScatterND, PopulateScatterNDParameter); +} // namespace +Registry g_scatterNDParameterRegistry(schema::PrimitiveType_ScatterNd, PopulateScatterNDParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/select_populate.cc b/mindspore/lite/src/ops/populate/select_populate.cc index efee92d035..2ed6cd9d2d 100644 --- a/mindspore/lite/src/ops/populate/select_populate.cc +++ b/mindspore/lite/src/ops/populate/select_populate.cc @@ -13,24 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/select.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" +#include "src/ops/populate/default_populate.h" namespace mindspore { namespace lite { -OpParameter *PopulateSelectParameter(const mindspore::lite::PrimitiveC *primitive) { - OpParameter *select_parameter = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); - if (select_parameter == nullptr) { - MS_LOG(ERROR) << "malloc SelectParameter failed."; - return nullptr; - } - memset(select_parameter, 0, sizeof(OpParameter)); - select_parameter->type_ = primitive->Type(); - - return reinterpret_cast<OpParameter *>(select_parameter); -} -Registry SelectParameterRegistry(schema::PrimitiveType_Select, PopulateSelectParameter); +Registry g_selectParameterRegistry(schema::PrimitiveType_Select, DefaultPopulateParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/shape_populate.cc b/mindspore/lite/src/ops/populate/shape_populate.cc index d6f392b102..eab41e1144 100644 --- a/mindspore/lite/src/ops/populate/shape_populate.cc +++ b/mindspore/lite/src/ops/populate/shape_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,25 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "src/common/log_adapter.h" #include "src/tensor.h" namespace mindspore { namespace lite { -OpParameter *PopulateShapeParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateShapeParameter(const void *prim) { OpParameter *shape_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (shape_param == nullptr) { MS_LOG(ERROR) << "malloc ShapeParameter failed."; return nullptr; } memset(shape_param, 0, sizeof(OpParameter)); - shape_param->type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + shape_param->type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(shape_param); } +} // namespace -Registry ShapeParameterRegistry(schema::PrimitiveType_Shape, PopulateShapeParameter); +Registry g_shapeParameterRegistry(schema::PrimitiveType_Shape, PopulateShapeParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/size_populate.cc b/mindspore/lite/src/ops/populate/size_populate.cc new file mode 100644 index 0000000000..7c35d9e7c4 --- /dev/null +++ b/mindspore/lite/src/ops/populate/size_populate.cc @@ -0,0 +1,23 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/populate_register.h" +#include "src/ops/populate/default_populate.h" + +namespace mindspore { +namespace lite { +Registry g_sizeParameterRegistry(schema::PrimitiveType_Size, DefaultPopulateParameter, SCHEMA_CUR); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/skip_gram_populate.cc b/mindspore/lite/src/ops/populate/skip_gram_populate.cc index 4760edcdf4..bfe59511d9 100644 --- a/mindspore/lite/src/ops/populate/skip_gram_populate.cc +++ b/mindspore/lite/src/ops/populate/skip_gram_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,30 +13,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/skip_gram.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" -#include "mindspore/lite/nnacl/skip_gram_parameter.h" +#include "nnacl/skip_gram_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateSkipGramParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateSkipGramParameter(const void *prim) { SkipGramParameter *skipGramParameter = reinterpret_cast<SkipGramParameter *>(malloc(sizeof(SkipGramParameter))); if (skipGramParameter == nullptr) { MS_LOG(ERROR) << "malloc SkipGramParameter failed."; return nullptr; } memset(skipGramParameter, 0, sizeof(SkipGramParameter)); - skipGramParameter->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::SkipGram *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - skipGramParameter->ngram_size = param->GetNgramSize(); - skipGramParameter->max_skip_size = param->GetMaxSkipSize(); - skipGramParameter->include_all_ngrams = param->GetIncludeAllNgrams(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_SkipGram(); + skipGramParameter->op_parameter_.type_ = primitive->value_type(); + skipGramParameter->ngram_size = value->ngram_size(); + skipGramParameter->max_skip_size = value->max_skip_size(); + skipGramParameter->include_all_ngrams = value->include_all_grams(); return reinterpret_cast<OpParameter *>(skipGramParameter); } -Registry SkipGramParameterRegistry(schema::PrimitiveType_SkipGram, PopulateSkipGramParameter); +Registry SkipGramParameterRegistry(schema::PrimitiveType_SkipGram, PopulateSkipGramParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/slice_populate.cc b/mindspore/lite/src/ops/populate/slice_populate.cc index 0873836cbc..fa224d2cd4 100644 --- a/mindspore/lite/src/ops/populate/slice_populate.cc +++ b/mindspore/lite/src/ops/populate/slice_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,40 +13,28 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/slice.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/slice_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateSliceParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateSliceParameter(const void *prim) { SliceParameter *slice_param = reinterpret_cast<SliceParameter *>(malloc(sizeof(SliceParameter))); if (slice_param == nullptr) { MS_LOG(ERROR) << "malloc SliceParameter failed."; return nullptr; } memset(slice_param, 0, sizeof(SliceParameter)); - auto param = reinterpret_cast<mindspore::lite::Slice *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - slice_param->op_parameter_.type_ = primitive->Type(); - auto param_begin = param->GetPostProcessBegin(); - auto param_size = param->GetPostProcessSize(); - if (param_begin.size() != param_size.size()) { - free(slice_param); - return nullptr; - } - slice_param->param_length_ = static_cast<int32_t>(param_begin.size()); - for (int32_t i = 0; i < slice_param->param_length_; ++i) { - slice_param->begin_[i] = param_begin.at(i); - slice_param->size_[i] = param_size.at(i); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_SliceFusion(); + slice_param->op_parameter_.type_ = primitive->value_type(); + for (size_t i = 0; i < value->axes()->size(); ++i) { + slice_param->axis_[i] = value->axes()->Get(i); } return reinterpret_cast<OpParameter *>(slice_param); } -Registry SliceParameterRegistry(schema::PrimitiveType_Slice, PopulateSliceParameter); +Registry SliceParameterRegistry(schema::PrimitiveType_SliceFusion, PopulateSliceParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/softmax_populate.cc b/mindspore/lite/src/ops/populate/softmax_populate.cc index fa29b6eaaa..8e1aeaee93 100644 --- a/mindspore/lite/src/ops/populate/softmax_populate.cc +++ b/mindspore/lite/src/ops/populate/softmax_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,30 +13,31 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/softmax.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/softmax_parameter.h" namespace mindspore { namespace lite { - -OpParameter *PopulateSoftmaxParameter(const mindspore::lite::PrimitiveC *primitive) { - auto softmax_primitive = - reinterpret_cast<mindspore::lite::SoftMax *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); +namespace { +OpParameter *PopulateSoftmaxParameter(const void *prim) { SoftmaxParameter *softmax_param = reinterpret_cast<SoftmaxParameter *>(malloc(sizeof(SoftmaxParameter))); if (softmax_param == nullptr) { MS_LOG(ERROR) << "malloc SoftmaxParameter failed."; return nullptr; } memset(softmax_param, 0, sizeof(SoftmaxParameter)); - softmax_param->op_parameter_.type_ = primitive->Type(); - softmax_param->axis_ = softmax_primitive->GetAxis(); + auto primitive = static_cast<const schema::Primitive *>(prim); + softmax_param->op_parameter_.type_ = primitive->value_type(); + auto prim_softmax = primitive->value_as_Softmax(); + if (prim_softmax->axis()->size() != 1) { + MS_LOG(ERROR) << "axis number invalid!number: " << prim_softmax->axis()->size(); + return nullptr; + } + softmax_param->axis_ = prim_softmax->axis()->data()[0]; return reinterpret_cast<OpParameter *>(softmax_param); } +} // namespace -Registry SoftMaxParameterRegistry(schema::PrimitiveType_SoftMax, PopulateSoftmaxParameter); - +Registry g_softmaxParameterRegistry(schema::PrimitiveType_Softmax, PopulateSoftmaxParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/space_to_batch_nd_populate.cc b/mindspore/lite/src/ops/populate/space_to_batch_nd_populate.cc index 0682c9fe31..af71de033f 100644 --- a/mindspore/lite/src/ops/populate/space_to_batch_nd_populate.cc +++ b/mindspore/lite/src/ops/populate/space_to_batch_nd_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,47 +13,60 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/space_to_batch_nd.h" -#include "src/common/common.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/space_to_batch_fp32.h" namespace mindspore { namespace lite { -OpParameter *PopulateSpaceToBatchNDParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateSpaceToBatchNDParameter(const void *prim) { auto *space_batch_param_nd = reinterpret_cast<SpaceToBatchParameter *>(malloc(sizeof(SpaceToBatchParameter))); if (space_batch_param_nd == nullptr) { MS_LOG(ERROR) << "malloc SpaceToBatchParameter failed."; return nullptr; } - - space_batch_param_nd->op_parameter_.type_ = primitive->Type(); - auto block_sizes = ((mindspore::lite::SpaceToBatchND *)primitive)->GetBlockShape(); - if (block_sizes.empty()) { + memset(space_batch_param_nd, 0, sizeof(SpaceToBatchParameter)); + const schema::Primitive *primitive = static_cast<const schema::Primitive *>(prim); + space_batch_param_nd->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_SpaceToBatchND(); + if (param->block_shape() == nullptr) { return reinterpret_cast<OpParameter *>(space_batch_param_nd); } - space_batch_param_nd->m_ = block_sizes.size(); - if (block_sizes.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { - MS_LOG(ERROR) << "The value of block_sizes.size() is too big"; + auto block_shapes = std::vector<int64_t>(param->block_shape()->begin(), param->block_shape()->end()); + if (block_shapes.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { + MS_LOG(ERROR) << "The value of block_shapes.size() is too big"; free(space_batch_param_nd); return nullptr; } - memcpy(space_batch_param_nd->block_sizes_, (block_sizes.data()), block_sizes.size() * sizeof(int)); - auto paddings = ((mindspore::lite::SpaceToBatchND *)primitive)->GetPaddings(); - if (paddings.empty()) { - return reinterpret_cast<OpParameter *>(space_batch_param_nd); - } - if (paddings.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { - MS_LOG(ERROR) << "The value of paddings.size() is too big"; + space_batch_param_nd->m_ = block_shapes.size(); + + auto fb_paddings = param->paddings()->data(); + if (fb_paddings->size() == 0 || + static_cast<uint64_t>(fb_paddings->size() * (*(fb_paddings->begin()))->data()->size()) > + std::numeric_limits<size_t>::max() / sizeof(int64_t)) { + MS_LOG(ERROR) << "The value of paddings.size() is zero or too big"; free(space_batch_param_nd); return nullptr; } - memcpy(space_batch_param_nd->paddings_, (paddings.data()), paddings.size() * sizeof(int)); + std::vector<int64_t> paddings; + for (auto iter = fb_paddings->begin(); iter != fb_paddings->end(); ++iter) { + auto paddings_data = (*iter)->data(); + auto paddings_vec = std::vector<int64_t>(paddings_data->begin(), paddings_data->end()); + paddings.insert(paddings.end(), paddings_vec.begin(), paddings_vec.end()); + } + + for (size_t i = 0; i < block_shapes.size(); ++i) { + space_batch_param_nd->block_sizes_[i] = static_cast<int>(block_shapes[i]); + } + + space_batch_param_nd->m_ = block_shapes.size(); + + for (size_t i = 0; i < paddings.size(); ++i) { + space_batch_param_nd->paddings_[i] = static_cast<int>(paddings[i]); + } return reinterpret_cast<OpParameter *>(space_batch_param_nd); } -Registry SpaceToBatchNDParameterRegistry(schema::PrimitiveType_SpaceToBatchND, PopulateSpaceToBatchNDParameter); - +} // namespace +Registry g_spaceToBatchNDRegistry(schema::PrimitiveType_SpaceToBatchND, PopulateSpaceToBatchNDParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/space_to_batch_populate.cc b/mindspore/lite/src/ops/populate/space_to_batch_populate.cc index d4d803f3b9..75077c72bd 100644 --- a/mindspore/lite/src/ops/populate/space_to_batch_populate.cc +++ b/mindspore/lite/src/ops/populate/space_to_batch_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,17 +13,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/space_to_batch.h" -#include "src/common/common.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/space_to_batch_fp32.h" namespace mindspore { namespace lite { - -OpParameter *PopulateSpaceToBatchParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateSpaceToBatchParameter(const void *prim) { SpaceToBatchParameter *space_batch_param = reinterpret_cast<SpaceToBatchParameter *>(malloc(sizeof(SpaceToBatchParameter))); if (space_batch_param == nullptr) { @@ -31,25 +27,42 @@ OpParameter *PopulateSpaceToBatchParameter(const mindspore::lite::PrimitiveC *pr return nullptr; } memset(space_batch_param, 0, sizeof(SpaceToBatchParameter)); - space_batch_param->op_parameter_.type_ = primitive->Type(); - auto block_sizes = ((mindspore::lite::SpaceToBatch *)primitive)->BlockSizes(); - space_batch_param->m_ = block_sizes.size(); + const schema::Primitive *primitive = static_cast<const schema::Primitive *>(prim); + space_batch_param->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_SpaceToBatch(); + auto block_sizes = std::vector<int64_t>(param->block_size()->begin(), param->block_size()->end()); if (block_sizes.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { MS_LOG(ERROR) << "The value of block_sizes.size() is too big"; free(space_batch_param); return nullptr; } - memcpy(space_batch_param->block_sizes_, (block_sizes.data()), block_sizes.size() * sizeof(int)); - auto paddings = ((mindspore::lite::SpaceToBatch *)primitive)->Paddings(); - if (paddings.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { - MS_LOG(ERROR) << "The value of paddings.size() is too big"; + space_batch_param->m_ = block_sizes.size(); + + auto fb_paddings = param->paddings()->data(); + if (fb_paddings->size() == 0 || + static_cast<uint64_t>(fb_paddings->size() * (*(fb_paddings->begin()))->data()->size()) > + std::numeric_limits<size_t>::max() / sizeof(int64_t)) { + MS_LOG(ERROR) << "The value of paddings.size() is zero or too big"; free(space_batch_param); return nullptr; } - memcpy(space_batch_param->paddings_, (paddings.data()), paddings.size() * sizeof(int)); + std::vector<int64_t> paddings; + for (auto iter = fb_paddings->begin(); iter != fb_paddings->end(); ++iter) { + auto paddings_data = (*iter)->data(); + auto paddings_vec = std::vector<int64_t>(paddings_data->begin(), paddings_data->end()); + paddings.insert(paddings.end(), paddings_vec.begin(), paddings_vec.end()); + } + + for (size_t i = 0; i < block_sizes.size(); ++i) { + space_batch_param->block_sizes_[i] = static_cast<int>(block_sizes[i]); + } + + for (size_t i = 0; i < paddings.size(); ++i) { + space_batch_param->paddings_[i] = static_cast<int>(paddings[i]); + } return reinterpret_cast<OpParameter *>(space_batch_param); } -Registry SpaceToBatchParameterRegistry(schema::PrimitiveType_SpaceToBatch, PopulateSpaceToBatchParameter); - +} // namespace +Registry g_spaceToBatchRegistry(schema::PrimitiveType_SpaceToBatch, PopulateSpaceToBatchParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/space_to_depth_populate.cc b/mindspore/lite/src/ops/populate/space_to_depth_populate.cc index 2207470d3d..c78e23382d 100644 --- a/mindspore/lite/src/ops/populate/space_to_depth_populate.cc +++ b/mindspore/lite/src/ops/populate/space_to_depth_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,16 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/space_to_depth.h" -#include "src/common/common.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/space_to_depth_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateSpaceToDepthParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateSpaceToDepthParameter(const void *prim) { SpaceToDepthParameter *space_depth_param = reinterpret_cast<SpaceToDepthParameter *>(malloc(sizeof(SpaceToDepthParameter))); if (space_depth_param == nullptr) { @@ -30,17 +26,17 @@ OpParameter *PopulateSpaceToDepthParameter(const mindspore::lite::PrimitiveC *pr return nullptr; } memset(space_depth_param, 0, sizeof(SpaceToDepthParameter)); - space_depth_param->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::SpaceToDepth *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - space_depth_param->op_parameter_.type_ = primitive->Type(); - space_depth_param->block_size_ = param->GetBlockSize(); - if (param->GetFormat() != schema::Format::Format_NHWC) { + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_SpaceToDepth(); + space_depth_param->op_parameter_.type_ = primitive->value_type(); + space_depth_param->block_size_ = value->block_size(); + if (value->format() != schema::Format::Format_NHWC) { MS_LOG(ERROR) << "Currently only NHWC format is supported."; free(space_depth_param); return nullptr; } return reinterpret_cast<OpParameter *>(space_depth_param); } -Registry SpaceToDepthParameterRegistry(schema::PrimitiveType_SpaceToDepth, PopulateSpaceToDepthParameter); +Registry SpaceToDepthParameterRegistry(schema::PrimitiveType_SpaceToDepth, PopulateSpaceToDepthParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/sparse_to_dense_populate.cc b/mindspore/lite/src/ops/populate/sparse_to_dense_populate.cc index 85f759eee7..578824279d 100644 --- a/mindspore/lite/src/ops/populate/sparse_to_dense_populate.cc +++ b/mindspore/lite/src/ops/populate/sparse_to_dense_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,26 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/sparse_to_dense.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/sparse_to_dense_parameter.h" namespace mindspore { namespace lite { - -OpParameter *PopulateSparseToDenseParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateSparseToDenseParameter(const void *prim) { auto *sparse_to_dense_param = reinterpret_cast<SparseToDenseParameter *>(malloc(sizeof(SparseToDenseParameter))); if (sparse_to_dense_param == nullptr) { MS_LOG(ERROR) << "malloc SparseToDenseParameter failed."; return nullptr; } memset(sparse_to_dense_param, 0, sizeof(SparseToDenseParameter)); - sparse_to_dense_param->op_parameter_.type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + sparse_to_dense_param->op_parameter_.type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(sparse_to_dense_param); } +} // namespace -Registry SparseToDenseParameterRegistry(schema::PrimitiveType_SparseToDense, PopulateSparseToDenseParameter); +Registry g_sparseToDenseParameterRegistry(schema::PrimitiveType_SparseToDense, PopulateSparseToDenseParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/split_populate.cc b/mindspore/lite/src/ops/populate/split_populate.cc index 9fcd931506..74c32b024d 100644 --- a/mindspore/lite/src/ops/populate/split_populate.cc +++ b/mindspore/lite/src/ops/populate/split_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,25 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/split.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/split_parameter.h" namespace mindspore { namespace lite { - -OpParameter *PopulateSplitParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateSplitParameter(const void *prim) { auto *split_param = reinterpret_cast<SplitParameter *>(malloc(sizeof(SplitParameter))); if (split_param == nullptr) { MS_LOG(ERROR) << "malloc SplitParameter failed."; return nullptr; } memset(split_param, 0, sizeof(SplitParameter)); - auto param = reinterpret_cast<mindspore::lite::Split *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - split_param->op_parameter_.type_ = primitive->Type(); - split_param->num_split_ = param->num_split(); + + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_Split(); + split_param->op_parameter_.type_ = primitive->value_type(); + split_param->num_split_ = value->output_num(); if (split_param->num_split_ > std::numeric_limits<int>::max() / static_cast<int>(sizeof(int))) { MS_LOG(ERROR) << "The value of split_param->num_split_ is too big"; free(split_param); @@ -46,15 +45,20 @@ OpParameter *PopulateSplitParameter(const mindspore::lite::PrimitiveC *primitive return nullptr; } memset(split_param->split_sizes_, 0, split_param->num_split_ * sizeof(int)); - - auto split_sizes_vector_ = param->size_splits(); - for (size_t i = 0; i < split_sizes_vector_.size(); i++) { - split_param->split_sizes_[i] = split_sizes_vector_[i]; + auto split_sizes_vector_ = value->size_splits(); + if (split_sizes_vector_ != NULL) { + int i = 0; + for (auto iter : *split_sizes_vector_) { + split_param->split_sizes_[i++] = iter; + } + split_param->split_count_ = split_param->num_split_; + } else { + split_param->split_count_ = 0; } - - split_param->split_dim_ = param->GetSplitDim(); + split_param->split_dim_ = value->axis(); return reinterpret_cast<OpParameter *>(split_param); } -Registry SplitParameterRegistry(schema::PrimitiveType_Split, PopulateSplitParameter); +} // namespace +Registry g_splitParameterRegistry(schema::PrimitiveType_Split, PopulateSplitParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/squeeze_populate.cc b/mindspore/lite/src/ops/populate/squeeze_populate.cc index c589483e44..0b0e91bc69 100644 --- a/mindspore/lite/src/ops/populate/squeeze_populate.cc +++ b/mindspore/lite/src/ops/populate/squeeze_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,22 +13,35 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" +#include "nnacl/squeeze_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateSqueezeParameter(const mindspore::lite::PrimitiveC *primitive) { - OpParameter *squeeze_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); +namespace { +OpParameter *PopulateSqueezeParameter(const void *prim) { + SqueezeParameter *squeeze_param = reinterpret_cast<SqueezeParameter *>(malloc(sizeof(SqueezeParameter))); if (squeeze_param == nullptr) { MS_LOG(ERROR) << "malloc SqueezeParameter failed."; return nullptr; } - memset(squeeze_param, 0, sizeof(OpParameter)); - squeeze_param->type_ = primitive->Type(); + memset(squeeze_param, 0, sizeof(SqueezeParameter)); + const schema::Primitive *primitive = static_cast<const schema::Primitive *>(prim); + squeeze_param->op_parameter_.type_ = primitive->value_type(); + + auto squeeze_prim = primitive->value_as_Squeeze(); + if (squeeze_prim->axis() != nullptr) { + squeeze_param->axis_size_ = squeeze_prim->axis()->size(); + for (size_t i = 0; i < squeeze_param->axis_size_; i++) { + squeeze_param->axis_[i] = *(squeeze_prim->axis()->begin() + i); + } + } else { + squeeze_param->axis_size_ = 0; + } + return reinterpret_cast<OpParameter *>(squeeze_param); } -Registry SqueezeParameterRegistry(schema::PrimitiveType_Squeeze, PopulateSqueezeParameter); +} // namespace +Registry g_squeezeParameterRegistry(schema::PrimitiveType_Squeeze, PopulateSqueezeParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/stack_populate.cc b/mindspore/lite/src/ops/populate/stack_populate.cc index 728b197688..7eede89858 100644 --- a/mindspore/lite/src/ops/populate/stack_populate.cc +++ b/mindspore/lite/src/ops/populate/stack_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,27 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/stack.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/stack_parameter.h" namespace mindspore { namespace lite { - -OpParameter *PopulateStackParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateStackParameter(const void *prim) { StackParameter *stack_param = reinterpret_cast<StackParameter *>(malloc(sizeof(StackParameter))); if (stack_param == nullptr) { MS_LOG(ERROR) << "malloc StackParameter failed."; return nullptr; } memset(stack_param, 0, sizeof(StackParameter)); - auto param = reinterpret_cast<mindspore::lite::Stack *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - stack_param->op_parameter_.type_ = primitive->Type(); - stack_param->axis_ = param->GetAxis(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_Stack(); + stack_param->op_parameter_.type_ = primitive->value_type(); + stack_param->axis_ = static_cast<int>(value->axis()); return reinterpret_cast<OpParameter *>(stack_param); } -Registry StackParameterRegistry(schema::PrimitiveType_Stack, PopulateStackParameter); +} // namespace +Registry g_stackParameterRegistry(schema::PrimitiveType_Stack, PopulateStackParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/strided_slice_populate.cc b/mindspore/lite/src/ops/populate/strided_slice_populate.cc index fdf29bda75..894a6b121e 100644 --- a/mindspore/lite/src/ops/populate/strided_slice_populate.cc +++ b/mindspore/lite/src/ops/populate/strided_slice_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,17 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - #include "src/ops/populate/strided_slice_populate.h" -#include <limits> -#include "src/ops/strided_slice.h" -#include "src/ops/primitive_c.h" -#include "src/ops/populate/populate_register.h" -#include "nnacl/strided_slice_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateStridedSliceParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateStridedSliceParameter(const void *prim) { StridedSliceParameter *strided_slice_param = reinterpret_cast<StridedSliceParameter *>(malloc(sizeof(StridedSliceParameter))); if (strided_slice_param == nullptr) { @@ -31,42 +25,20 @@ OpParameter *PopulateStridedSliceParameter(const mindspore::lite::PrimitiveC *pr return nullptr; } memset(strided_slice_param, 0, sizeof(StridedSliceParameter)); - strided_slice_param->op_parameter_.type_ = primitive->Type(); - auto n_dims = ((lite::StridedSlice *)primitive)->NDims(); - strided_slice_param->num_axes_ = n_dims; - auto begin = ((lite::StridedSlice *)primitive)->GetBegins(); - if (begin.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { - MS_LOG(ERROR) << "The value of begin.size() is too big"; - free(strided_slice_param); - return nullptr; - } - memcpy(strided_slice_param->begins_, (begin.data()), begin.size() * sizeof(int)); - auto end = ((lite::StridedSlice *)primitive)->GetEnds(); - if (end.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { - MS_LOG(ERROR) << "The value of end.size() is too big"; - free(strided_slice_param); - return nullptr; - } - memcpy(strided_slice_param->ends_, (end.data()), end.size() * sizeof(int)); - auto stride = ((lite::StridedSlice *)primitive)->GetStrides(); - if (stride.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { - MS_LOG(ERROR) << "The value of stride.size() is too big"; - free(strided_slice_param); - return nullptr; - } - memcpy(strided_slice_param->strides_, (stride.data()), stride.size() * sizeof(int)); - auto in_shape = ((lite::StridedSlice *)primitive)->GetInShape(); - if (in_shape.size() > std::numeric_limits<size_t>::max() / sizeof(int)) { - MS_LOG(ERROR) << "The value of in_shape.size() is too big"; - free(strided_slice_param); - return nullptr; - } - memcpy(strided_slice_param->in_shape_, (in_shape.data()), in_shape.size() * sizeof(int)); - strided_slice_param->in_shape_length_ = static_cast<int>(in_shape.size()); + + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_StridedSlice(); + strided_slice_param->op_parameter_.type_ = primitive->value_type(); + + strided_slice_param->begins_mask_ = value->begin_mask(); + strided_slice_param->ends_mask_ = value->end_mask(); + strided_slice_param->ellipsisMask_ = value->ellipsis_mask(); + strided_slice_param->newAxisMask_ = value->new_axis_mask(); + strided_slice_param->shrinkAxisMask_ = value->shrink_axis_mask(); return reinterpret_cast<OpParameter *>(strided_slice_param); } -Registry StridedSliceParameterRegistry(schema::PrimitiveType_StridedSlice, PopulateStridedSliceParameter); +Registry StridedSliceParameterRegistry(schema::PrimitiveType_StridedSlice, PopulateStridedSliceParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/strided_slice_populate.h b/mindspore/lite/src/ops/populate/strided_slice_populate.h index d7efaae086..bebc3ad647 100644 --- a/mindspore/lite/src/ops/populate/strided_slice_populate.h +++ b/mindspore/lite/src/ops/populate/strided_slice_populate.h @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,16 +13,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + #ifndef MINDSPORE_LITE_SRC_OPS_POPULATE_STRIDED_SLICE_POPULATE_H_ #define MINDSPORE_LITE_SRC_OPS_POPULATE_STRIDED_SLICE_POPULATE_H_ -#include "src/ops/arithmetic.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/strided_slice_parameter.h" namespace mindspore { namespace lite { - -OpParameter *PopulateStridedSliceParameter(const mindspore::lite::PrimitiveC *primitive); - +OpParameter *PopulateStridedSliceParameter(const void *prim); } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_SRC_OPS_POPULATE_STRIDED_SLICE_POPULATE_H_ diff --git a/mindspore/lite/src/ops/populate/sub_populate.cc b/mindspore/lite/src/ops/populate/sub_populate.cc index 5685851953..78c8d5186d 100644 --- a/mindspore/lite/src/ops/populate/sub_populate.cc +++ b/mindspore/lite/src/ops/populate/sub_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,9 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/sub.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/arithmetic.h" #include "src/ops/populate/arithmetic_populate.h" @@ -23,17 +20,18 @@ namespace mindspore { namespace lite { -OpParameter *PopulateSubParameter(const mindspore::lite::PrimitiveC *primitive) { - ArithmeticParameter *param = PopulateArithmeticCommonPara(primitive); +OpParameter *PopulateSubParameter(const void *prim) { + ArithmeticParameter *param = PopulateArithmeticCommonPara(prim); if (param == nullptr) { MS_LOG(ERROR) << "PopulateArithmeticCommonPara failed."; return nullptr; } - param->activation_type_ = reinterpret_cast<const mindspore::lite::Sub *>(primitive)->GetActivationType(); + auto primitive = static_cast<const schema::Primitive *>(prim); + param->op_parameter_.type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(param); } -Registry SubParameterRegistry(schema::PrimitiveType_Sub, PopulateSubParameter); +Registry g_subParameterRegistry(schema::PrimitiveType_SubFusion, PopulateSubParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/switch_populate.cc b/mindspore/lite/src/ops/populate/switch_populate.cc index c895b9ae6c..b06e0de518 100644 --- a/mindspore/lite/src/ops/populate/switch_populate.cc +++ b/mindspore/lite/src/ops/populate/switch_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,24 +13,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/switch.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateSwitchParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateSwitchParameter(const void *prim) { OpParameter *switch_parameter = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (switch_parameter == nullptr) { MS_LOG(ERROR) << "malloc SwitchParameter failed."; return nullptr; } memset(switch_parameter, 0, sizeof(OpParameter)); - switch_parameter->type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + switch_parameter->type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(switch_parameter); } -Registry SwitchParameterRegistry(schema::PrimitiveType_Switch, PopulateSwitchParameter); +Registry SwitchParameterRegistry(schema::PrimitiveType_Switch, PopulateSwitchParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/tensorlistfromtensor_populate.cc b/mindspore/lite/src/ops/populate/tensorlistfromtensor_populate.cc index 3c7f157d30..f80b76263b 100644 --- a/mindspore/lite/src/ops/populate/tensorlistfromtensor_populate.cc +++ b/mindspore/lite/src/ops/populate/tensorlistfromtensor_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,28 +15,26 @@ */ #include "nnacl/tensorlist_parameter.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" -#include "src/ops/tensorlist_fromtensor.h" namespace mindspore { namespace lite { -OpParameter *PopulateTensorListFromTensorParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateTensorListFromTensorParameter(const void *prim) { TensorListParameter *TensorList_param = reinterpret_cast<TensorListParameter *>(malloc(sizeof(TensorListParameter))); if (TensorList_param == nullptr) { MS_LOG(ERROR) << "malloc TensorListParameter failed."; return nullptr; } memset(TensorList_param, 0, sizeof(TensorListParameter)); - TensorList_param->op_parameter_.type_ = primitive->Type(); - auto tensorList = - reinterpret_cast<mindspore::lite::TensorListFromTensor *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - TensorList_param->shape_type_ = (TypeId)(tensorList->GetShapeType()); - TensorList_param->element_dtype_ = (TypeId)(tensorList->GetElementDType()); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_TensorListFromTensor(); + TensorList_param->op_parameter_.type_ = primitive->value_type(); + TensorList_param->shape_type_ = value->shape_type(); + TensorList_param->element_dtype_ = value->element_dtype(); return reinterpret_cast<OpParameter *>(TensorList_param); } Registry TensorListFromTensorParameterRegistry(schema::PrimitiveType_TensorListFromTensor, - PopulateTensorListFromTensorParameter); + PopulateTensorListFromTensorParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/tensorlistgetitem_populate.cc b/mindspore/lite/src/ops/populate/tensorlistgetitem_populate.cc index 18c8b3508a..40db0cde97 100644 --- a/mindspore/lite/src/ops/populate/tensorlistgetitem_populate.cc +++ b/mindspore/lite/src/ops/populate/tensorlistgetitem_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,29 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/tensorlist_getitem.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/tensorlist_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateTensorListGetItemParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateTensorListGetItemParameter(const void *prim) { TensorListParameter *getItem_param = reinterpret_cast<TensorListParameter *>(malloc(sizeof(TensorListParameter))); if (getItem_param == nullptr) { MS_LOG(ERROR) << "malloc TensorListParameter failed."; return nullptr; } memset(getItem_param, 0, sizeof(TensorListParameter)); - getItem_param->op_parameter_.type_ = primitive->Type(); - auto getItem = - reinterpret_cast<mindspore::lite::TensorListGetItem *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - getItem_param->element_dtype_ = getItem->GetElementDType(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_TensorListGetItem(); + getItem_param->op_parameter_.type_ = primitive->value_type(); + getItem_param->element_dtype_ = value->element_dtype(); return reinterpret_cast<OpParameter *>(getItem_param); } -Registry TensorListGetItemParameterRegistry(schema::PrimitiveType_TensorListGetItem, - PopulateTensorListGetItemParameter); +Registry TensorListGetItemParameterRegistry(schema::PrimitiveType_TensorListGetItem, PopulateTensorListGetItemParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/tensorlistreserve_populate.cc b/mindspore/lite/src/ops/populate/tensorlistreserve_populate.cc index 76a007cd02..dadcb9f799 100644 --- a/mindspore/lite/src/ops/populate/tensorlistreserve_populate.cc +++ b/mindspore/lite/src/ops/populate/tensorlistreserve_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,29 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/tensorlist_reserve.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/tensorlist_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateTensorListReserveParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateTensorListReserveParameter(const void *prim) { TensorListParameter *reserve_param = reinterpret_cast<TensorListParameter *>(malloc(sizeof(TensorListParameter))); if (reserve_param == nullptr) { MS_LOG(ERROR) << "malloc TensorListParameter failed."; return nullptr; } memset(reserve_param, 0, sizeof(TensorListParameter)); - reserve_param->op_parameter_.type_ = primitive->Type(); - auto reserve = - reinterpret_cast<mindspore::lite::TensorListReserve *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - reserve_param->element_dtype_ = reserve->GetElementDType(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_TensorListReserve(); + reserve_param->op_parameter_.type_ = primitive->value_type(); + reserve_param->element_dtype_ = value->element_dtype(); return reinterpret_cast<OpParameter *>(reserve_param); } -Registry TensorListReserveParameterRegistry(schema::PrimitiveType_TensorListReserve, - PopulateTensorListReserveParameter); +Registry TensorListReserveParameterRegistry(schema::PrimitiveType_TensorListReserve, PopulateTensorListReserveParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/tensorlistsetlitem_populate.cc b/mindspore/lite/src/ops/populate/tensorlistsetlitem_populate.cc index 73b463e67b..36a0788b29 100644 --- a/mindspore/lite/src/ops/populate/tensorlistsetlitem_populate.cc +++ b/mindspore/lite/src/ops/populate/tensorlistsetlitem_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,28 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/tensorlist_setitem.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/tensorlist_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateTensorListSetItemParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateTensorListSetItemParameter(const void *prim) { TensorListParameter *setItem_param = reinterpret_cast<TensorListParameter *>(malloc(sizeof(TensorListParameter))); if (setItem_param == nullptr) { MS_LOG(ERROR) << "malloc TensorListParameter failed."; return nullptr; } memset(setItem_param, 0, sizeof(TensorListParameter)); - setItem_param->op_parameter_.type_ = primitive->Type(); - auto setItem = - reinterpret_cast<mindspore::lite::TensorListSetItem *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - setItem_param->element_dtype_ = setItem->GetElementDType(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_TensorListSetItem(); + setItem_param->op_parameter_.type_ = primitive->value_type(); + setItem_param->element_dtype_ = value->element_dtype(); return reinterpret_cast<OpParameter *>(setItem_param); } -Registry TensorListSetItemParameterRegistry(schema::PrimitiveType_TensorListSetItem, - PopulateTensorListSetItemParameter); +Registry TensorListSetItemParameterRegistry(schema::PrimitiveType_TensorListSetItem, PopulateTensorListSetItemParameter, + SCHEMA_CUR); + } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/tensorliststack_populate.cc b/mindspore/lite/src/ops/populate/tensorliststack_populate.cc index a06638ca24..615c142a75 100644 --- a/mindspore/lite/src/ops/populate/tensorliststack_populate.cc +++ b/mindspore/lite/src/ops/populate/tensorliststack_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,29 +13,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/tensorlist_stack.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/tensorlist_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateTensorListStackParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateTensorListStackParameter(const void *prim) { TensorListParameter *stack_param = reinterpret_cast<TensorListParameter *>(malloc(sizeof(TensorListParameter))); if (stack_param == nullptr) { MS_LOG(ERROR) << "malloc TensorListParameter failed."; return nullptr; } memset(stack_param, 0, sizeof(TensorListParameter)); - stack_param->op_parameter_.type_ = primitive->Type(); - auto stack = - reinterpret_cast<mindspore::lite::TensorListStack *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - stack_param->element_dtype_ = stack->GetElementDType(); - stack_param->num_element_ = stack->GetNumElements(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_TensorListStack(); + stack_param->op_parameter_.type_ = primitive->value_type(); + stack_param->element_dtype_ = value->element_dtype(); + stack_param->num_element_ = value->num_elements(); return reinterpret_cast<OpParameter *>(stack_param); } -Registry TensorListStackParameterRegistry(schema::PrimitiveType_TensorListStack, PopulateTensorListStackParameter); +Registry TensorListStackParameterRegistry(schema::PrimitiveType_TensorListStack, PopulateTensorListStackParameter, + SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/tile_populate.cc b/mindspore/lite/src/ops/populate/tile_populate.cc index 09721b5354..73eafa0b6a 100644 --- a/mindspore/lite/src/ops/populate/tile_populate.cc +++ b/mindspore/lite/src/ops/populate/tile_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,46 +13,33 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/tile.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/base/tile_base.h" namespace mindspore { namespace lite { -OpParameter *PopulateTileParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateTileParameter(const void *prim) { TileParameter *tile_param = reinterpret_cast<TileParameter *>(malloc(sizeof(TileParameter))); if (tile_param == nullptr) { MS_LOG(ERROR) << "malloc TileParameter failed."; return nullptr; } memset(tile_param, 0, sizeof(TileParameter)); - tile_param->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::Tile *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); -#ifdef SUPPORT_TRAIN - auto multiples = param->GetMultiples(); - tile_param->in_dim_ = multiples.size(); - for (int i = 0; i < tile_param->in_dim_; ++i) { - tile_param->multiples_[i] = multiples[i]; - } -#else - auto dims = param->GetDims(); - auto multiples = param->GetMultiples(); - for (size_t i = 0; i < kQuadrupleNum; ++i) { - tile_param->multiples_[i] = 1; - } - if (!dims.empty() && !multiples.empty()) { - for (size_t i = 0; i < dims.size(); ++i) { - tile_param->multiples_[dims[i]] = multiples[i]; + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_TileFusion(); + tile_param->op_parameter_.type_ = primitive->value_type(); + auto dims = value->dims(); + if (dims != nullptr) { + for (size_t i = 0; i < dims->size(); ++i) { + tile_param->dims_[i] = static_cast<int>(dims->Get(i)); } + tile_param->dims_size_ = dims->size(); } -#endif return reinterpret_cast<OpParameter *>(tile_param); } -Registry TileParameterRegistry(schema::PrimitiveType_Tile, PopulateTileParameter); +Registry TileParameterRegistry(schema::PrimitiveType_TileFusion, PopulateTileParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/topk_populate.cc b/mindspore/lite/src/ops/populate/topk_populate.cc index bafb35493e..17b03004c8 100644 --- a/mindspore/lite/src/ops/populate/topk_populate.cc +++ b/mindspore/lite/src/ops/populate/topk_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,29 +13,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/topk.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/topk_fp32.h" namespace mindspore { namespace lite { - -OpParameter *PopulateTopKParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateTopKParameter(const void *prim) { TopkParameter *topk_param = reinterpret_cast<TopkParameter *>(malloc(sizeof(TopkParameter))); if (topk_param == nullptr) { MS_LOG(ERROR) << "malloc TopkParameter failed."; return nullptr; } memset(topk_param, 0, sizeof(TopkParameter)); - topk_param->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::TopK *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - topk_param->k_ = param->GetK(); - topk_param->sorted_ = param->GetSorted(); + auto primitive = static_cast<const schema::Primitive *>(prim); + topk_param->op_parameter_.type_ = primitive->value_type(); + auto param = primitive->value_as_TopKFusion(); + topk_param->sorted_ = param->sorted(); return reinterpret_cast<OpParameter *>(topk_param); } -Registry TopKParameterRegistry(schema::PrimitiveType_TopK, PopulateTopKParameter); +} // namespace +Registry g_topKParameterRegistry(schema::PrimitiveType_TopKFusion, PopulateTopKParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/transpose_populate.cc b/mindspore/lite/src/ops/populate/transpose_populate.cc index ecd2686b01..8c647dbddb 100644 --- a/mindspore/lite/src/ops/populate/transpose_populate.cc +++ b/mindspore/lite/src/ops/populate/transpose_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,36 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/transpose.h" -#include <memory> -#include "src/common/log_adapter.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/transpose.h" namespace mindspore { namespace lite { - -OpParameter *PopulateTransposeParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateTransposeParameter(const void *prim) { TransposeParameter *transpose_param = reinterpret_cast<TransposeParameter *>(malloc(sizeof(TransposeParameter))); if (transpose_param == nullptr) { MS_LOG(ERROR) << "malloc TransposeParameter failed."; return nullptr; } memset(transpose_param, 0, sizeof(TransposeParameter)); - auto param = reinterpret_cast<mindspore::lite::Transpose *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - transpose_param->op_parameter_.type_ = primitive->Type(); - auto perm_vector_ = param->GetPerm(); - int i = 0; - for (auto iter = perm_vector_.begin(); iter != perm_vector_.end(); iter++) { - transpose_param->perm_[i++] = *iter; - } - transpose_param->num_axes_ = i; + auto primitive = static_cast<const schema::Primitive *>(prim); + transpose_param->op_parameter_.type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(transpose_param); } +} // namespace -Registry TransposeParameterRegistry(schema::PrimitiveType_Transpose, PopulateTransposeParameter); +Registry g_transposeParameterRegistry(schema::PrimitiveType_Transpose, PopulateTransposeParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/uniform_real_populate.cc b/mindspore/lite/src/ops/populate/uniform_real_populate.cc new file mode 100644 index 0000000000..f8edf8b0a7 --- /dev/null +++ b/mindspore/lite/src/ops/populate/uniform_real_populate.cc @@ -0,0 +1,23 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/populate_register.h" +#include "src/ops/populate/default_populate.h" + +namespace mindspore { +namespace lite { +Registry g_uniformRealParameterRegistry(schema::PrimitiveType_UniformReal, DefaultPopulateParameter, SCHEMA_CUR); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/unique_populate.cc b/mindspore/lite/src/ops/populate/unique_populate.cc index 1ba3424ab8..abc028d76b 100644 --- a/mindspore/lite/src/ops/populate/unique_populate.cc +++ b/mindspore/lite/src/ops/populate/unique_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,27 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/unique.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/fp32/unique_fp32.h" namespace mindspore { namespace lite { - -OpParameter *PopulateUniqueParameter(const mindspore::lite::PrimitiveC *primitive) { +namespace { +OpParameter *PopulateUniqueParameter(const void *prim) { UniqueParameter *unique_param = reinterpret_cast<UniqueParameter *>(malloc(sizeof(UniqueParameter))); if (unique_param == nullptr) { MS_LOG(ERROR) << "malloc UniqueParameter failed."; return nullptr; } memset(unique_param, 0, sizeof(UniqueParameter)); - unique_param->op_parameter_.type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + unique_param->op_parameter_.type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(unique_param); } +} // namespace -Registry UniqueParameterRegistry(schema::PrimitiveType_Unique, PopulateUniqueParameter); +Registry g_uniqueParameterRegistry(schema::PrimitiveType_Unique, PopulateUniqueParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/unsorted_segment_sum_populate.cc b/mindspore/lite/src/ops/populate/unsorted_segment_sum_populate.cc index 0d72aaf912..1066a3baab 100644 --- a/mindspore/lite/src/ops/populate/unsorted_segment_sum_populate.cc +++ b/mindspore/lite/src/ops/populate/unsorted_segment_sum_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,25 +13,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/unsorted_segment_sum.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { namespace lite { -OpParameter *PopulateUnsortedSegmentSumParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateUnsortedSegmentSumParameter(const void *prim) { OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (param == nullptr) { MS_LOG(ERROR) << "malloc UnsortedSegmentSum Parameter failed."; return nullptr; } memset(param, 0, sizeof(OpParameter)); - param->type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + param->type_ = primitive->value_type(); return param; } Registry UnsortedSegmentSumParameterRegistry(schema::PrimitiveType_UnsortedSegmentSum, - PopulateUnsortedSegmentSumParameter); + PopulateUnsortedSegmentSumParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/unsqueeze_populate.cc b/mindspore/lite/src/ops/populate/unsqueeze_populate.cc index 6e1cdd387a..aefa55b564 100644 --- a/mindspore/lite/src/ops/populate/unsqueeze_populate.cc +++ b/mindspore/lite/src/ops/populate/unsqueeze_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,33 +13,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/unsqueeze.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" -#include "mindspore/lite/nnacl/unsqueeze_parameter.h" +#include "nnacl/unsqueeze_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateUnsqueezeParameter(const mindspore::lite::PrimitiveC *primitive) { - auto unsqueeze_attr = reinterpret_cast<lite::Unsqueeze *>(const_cast<lite::PrimitiveC *>(primitive)); +namespace { +OpParameter *PopulateUnsqueezeParameter(const void *prim) { UnSqueezeParameter *unsqueeze_param = reinterpret_cast<UnSqueezeParameter *>(malloc(sizeof(UnSqueezeParameter))); if (unsqueeze_param == nullptr) { - MS_LOG(ERROR) << "malloc UnsqueezeParameter failed."; + MS_LOG(ERROR) << "malloc UnSqueezeParameter failed."; return nullptr; } memset(unsqueeze_param, 0, sizeof(UnSqueezeParameter)); - unsqueeze_param->op_parameter_.type_ = primitive->Type(); - auto flatAxis = unsqueeze_attr->GetAxis(); + auto primitive = static_cast<const schema::Primitive *>(prim); + unsqueeze_param->op_parameter_.type_ = primitive->value_type(); + auto unsqueeze_prim = primitive->value_as_Unsqueeze(); + auto flat_axis = std::vector<int>(unsqueeze_prim->axis()->begin(), unsqueeze_prim->axis()->end()); + unsqueeze_param->num_dim_ = flat_axis.size(); int i = 0; - for (auto iter = flatAxis.begin(); iter != flatAxis.end(); iter++) { + for (auto iter = flat_axis.begin(); iter != flat_axis.end(); ++iter) { unsqueeze_param->dims_[i++] = *iter; } return reinterpret_cast<OpParameter *>(unsqueeze_param); } -Registry UnsqueezeParameterRegistry(schema::PrimitiveType_Unsqueeze, PopulateUnsqueezeParameter); +} // namespace +Registry g_unsqueezeParameterRegistry(schema::PrimitiveType_Unsqueeze, PopulateUnsqueezeParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/unstack_populate.cc b/mindspore/lite/src/ops/populate/unstack_populate.cc index a6e5b6e749..ab2bc59295 100644 --- a/mindspore/lite/src/ops/populate/unstack_populate.cc +++ b/mindspore/lite/src/ops/populate/unstack_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,27 +13,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/unstack.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" #include "nnacl/unstack_parameter.h" namespace mindspore { namespace lite { -OpParameter *PopulateUnstackParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateUnstackParameter(const void *prim) { UnstackParameter *unstack_param = reinterpret_cast<UnstackParameter *>(malloc(sizeof(UnstackParameter))); if (unstack_param == nullptr) { MS_LOG(ERROR) << "malloc UnstackParameter failed."; return nullptr; } memset(unstack_param, 0, sizeof(UnstackParameter)); - auto param = reinterpret_cast<mindspore::lite::Unstack *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - unstack_param->op_parameter_.type_ = primitive->Type(); - unstack_param->axis_ = param->GetAxis(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_Unstack(); + unstack_param->op_parameter_.type_ = primitive->value_type(); + unstack_param->axis_ = value->axis(); return reinterpret_cast<OpParameter *>(unstack_param); } -Registry UnstackParameterRegistry(schema::PrimitiveType_Unstack, PopulateUnstackParameter); +Registry UnstackParameterRegistry(schema::PrimitiveType_Unstack, PopulateUnstackParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/upsample_populate.cc b/mindspore/lite/src/ops/populate/upsample_populate.cc deleted file mode 100644 index 617196552c..0000000000 --- a/mindspore/lite/src/ops/populate/upsample_populate.cc +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/upsample.h" -#include "src/ops/primitive_c.h" -#include "src/ops/populate/populate_register.h" -#include "nnacl/upsample_parameter.h" - -namespace mindspore { -namespace lite { - -OpParameter *PopulateUpsampleParameter(const mindspore::lite::PrimitiveC *primitive) { - UpsampleParameter *upsample_parameter = reinterpret_cast<UpsampleParameter *>(malloc(sizeof(UpsampleParameter))); - if (upsample_parameter == nullptr) { - MS_LOG(ERROR) << "malloc Upsample Parameter failed."; - return nullptr; - } - memset(upsample_parameter, 0, sizeof(UpsampleParameter)); - auto param = reinterpret_cast<mindspore::lite::Upsample *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - upsample_parameter->op_parameter_.type_ = primitive->Type(); - auto method = param->GetMode(); - if (method == "linear") { - upsample_parameter->method_ = 0; - } else { - upsample_parameter->method_ = 1; - } - return reinterpret_cast<OpParameter *>(upsample_parameter); -} -Registry UpsampleParemeterRegistry(schema::PrimitiveType_Upsample, PopulateUpsampleParameter); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/activation_grad_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/activation_grad_populate_v0.cc new file mode 100644 index 0000000000..0e06a24f48 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/activation_grad_populate_v0.cc @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32_grad/activation_grad.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateActivationGradParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto activation_grad_prim = primitive->value_as_ActivationGrad(); + ActivationGradParameter *act_param = + reinterpret_cast<ActivationGradParameter *>(malloc(sizeof(ActivationGradParameter))); + if (act_param == nullptr) { + MS_LOG(ERROR) << "malloc ActivationParameter failed."; + return nullptr; + } + memset(act_param, 0, sizeof(ActivationGradParameter)); + act_param->op_parameter.type_ = schema::PrimitiveType_ActivationGrad; + + act_param->type_ = static_cast<int>(activation_grad_prim->type()); + act_param->alpha_ = activation_grad_prim->alpha(); + return reinterpret_cast<OpParameter *>(act_param); +} +} // namespace + +Registry g_activationGradV0ParameterRegistry(schema::v0::PrimitiveType_ActivationGrad, PopulateActivationGradParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/activation_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/activation_populate_v0.cc new file mode 100644 index 0000000000..a08c3d9c0c --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/activation_populate_v0.cc @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/activation_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateActivationParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto activation_prim = primitive->value_as_Activation(); + ActivationParameter *act_param = reinterpret_cast<ActivationParameter *>(malloc(sizeof(ActivationParameter))); + if (act_param == nullptr) { + MS_LOG(ERROR) << "malloc ActivationParameter failed."; + return nullptr; + } + memset(act_param, 0, sizeof(ActivationParameter)); + act_param->op_parameter_.type_ = schema::PrimitiveType_Activation; + + act_param->type_ = static_cast<int>(activation_prim->type()); + act_param->alpha_ = activation_prim->alpha(); + act_param->min_val_ = activation_prim->min_val(); + act_param->max_val_ = activation_prim->max_val(); + return reinterpret_cast<OpParameter *>(act_param); +} +} // namespace + +Registry g_activationV0ParameterRegistry(schema::v0::PrimitiveType_Activation, PopulateActivationParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/adam_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/adam_populate_v0.cc new file mode 100644 index 0000000000..ab70b40556 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/adam_populate_v0.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateAdamParameter(const void *prim) { + OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "malloc Adam Parameter failed."; + return nullptr; + } + memset(param, 0, sizeof(OpParameter)); + param->type_ = schema::PrimitiveType_Adam; + return param; +} +} // namespace + +Registry g_adamV0ParameterRegistry(schema::v0::PrimitiveType_Adam, PopulateAdamParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/add_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/add_populate_v0.cc new file mode 100644 index 0000000000..9f80d6ca1d --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/add_populate_v0.cc @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/arithmetic.h" +#include "src/ops/populate/v0/arithmetic_populate_v0.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateAddParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto add_prim = primitive->value_as_Add(); + ArithmeticParameter *param = PopulateArithmeticV0CommonPara(primitive); + if (param == nullptr) { + MS_LOG(ERROR) << "PopulateArithmeticCommonPara failed."; + return nullptr; + } + + param->op_parameter_.type_ = schema::PrimitiveType_AddFusion; + param->activation_type_ = add_prim->activationType(); + return reinterpret_cast<OpParameter *>(param); +} +} // namespace + +Registry g_addV0ParameterRegistry(schema::v0::PrimitiveType_Add, PopulateAddParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/addn_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/addn_populate_v0.cc new file mode 100644 index 0000000000..3b678868f6 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/addn_populate_v0.cc @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/op_base.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateAddNParameter(const void *prim) { + OpParameter *addn_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (addn_param == nullptr) { + MS_LOG(ERROR) << "malloc OpParameter failed."; + return nullptr; + } + memset(addn_param, 0, sizeof(OpParameter)); + addn_param->type_ = schema::PrimitiveType_AddN; + return reinterpret_cast<OpParameter *>(addn_param); +} +} // namespace + +Registry g_addNV0ParameterRegistry(schema::v0::PrimitiveType_AddN, PopulateAddNParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/argmax_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/argmax_populate_v0.cc new file mode 100644 index 0000000000..0e1682d865 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/argmax_populate_v0.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/arg_min_max_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateArgMaxParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto argmax_prim = primitive->value_as_ArgMax(); + ArgMinMaxParameter *arg_param = reinterpret_cast<ArgMinMaxParameter *>(malloc(sizeof(ArgMinMaxParameter))); + if (arg_param == nullptr) { + MS_LOG(ERROR) << "malloc ArgMinMaxParameter failed."; + return nullptr; + } + memset(arg_param, 0, sizeof(ArgMinMaxParameter)); + arg_param->op_parameter_.type_ = schema::PrimitiveType_ArgMaxFusion; + + arg_param->axis_ = argmax_prim->axis(); + arg_param->topk_ = argmax_prim->topK(); + arg_param->axis_type_ = argmax_prim->axisType(); + arg_param->out_value_ = argmax_prim->outMaxValue(); + arg_param->keep_dims_ = argmax_prim->keepDims(); + arg_param->get_max_ = true; + return reinterpret_cast<OpParameter *>(arg_param); +} +} // namespace + +Registry g_argMaxV0ParameterRegistry(schema::v0::PrimitiveType_ArgMax, PopulateArgMaxParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/argmin_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/argmin_populate_v0.cc new file mode 100644 index 0000000000..10d82fa59f --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/argmin_populate_v0.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/arg_min_max_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateArgMinParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto argmin_prim = primitive->value_as_ArgMin(); + ArgMinMaxParameter *arg_param = reinterpret_cast<ArgMinMaxParameter *>(malloc(sizeof(ArgMinMaxParameter))); + if (arg_param == nullptr) { + MS_LOG(ERROR) << "malloc ArgMinMaxParameter failed."; + return nullptr; + } + memset(arg_param, 0, sizeof(ArgMinMaxParameter)); + arg_param->op_parameter_.type_ = schema::PrimitiveType_ArgMinFusion; + + arg_param->axis_ = argmin_prim->axis(); + arg_param->topk_ = argmin_prim->topK(); + arg_param->axis_type_ = argmin_prim->axisType(); + arg_param->out_value_ = argmin_prim->outMaxValue(); + arg_param->keep_dims_ = argmin_prim->keepDims(); + arg_param->get_max_ = false; + return reinterpret_cast<OpParameter *>(arg_param); +} +} // namespace + +Registry g_argMinV0ParameterRegistry(schema::v0::PrimitiveType_ArgMin, PopulateArgMinParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/arithmetic_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/arithmetic_populate_v0.cc new file mode 100644 index 0000000000..8454e57a75 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/arithmetic_populate_v0.cc @@ -0,0 +1,91 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/v0/arithmetic_populate_v0.h" +#include "src/common/log_adapter.h" +#include "src/ops/populate/populate_register.h" +#include "src/common/common.h" + +namespace mindspore { +namespace lite { +ArithmeticParameter *PopulateArithmeticV0CommonPara(const void *prim) { + auto *param = reinterpret_cast<ArithmeticParameter *>(malloc(sizeof(ArithmeticParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "malloc ArithmeticParameter failed."; + return nullptr; + } + memset(param, 0, sizeof(ArithmeticParameter)); + const auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + param->op_parameter_.type_ = primitive->value_type(); + param->broadcasting_ = false; + param->ndim_ = 0; + param->activation_type_ = 0; + return param; +} + +OpParameter *PopulateArithmeticV0(const void *primitive) { + ArithmeticParameter *param = PopulateArithmeticV0CommonPara(primitive); + if (param == nullptr) { + MS_LOG(ERROR) << "PopulateArithmeticCommonPara failed."; + return nullptr; + } + int type = param->op_parameter_.type_; + if (type == schema::v0::PrimitiveType_RealDiv) { + param->op_parameter_.type_ = schema::PrimitiveType_RealDiv; + } else if (type == schema::v0::PrimitiveType_LogicalAnd) { + param->op_parameter_.type_ = schema::PrimitiveType_LogicalAnd; + } else if (type == schema::v0::PrimitiveType_LogicalOr) { + param->op_parameter_.type_ = schema::PrimitiveType_LogicalOr; + } else if (type == schema::v0::PrimitiveType_Equal) { + param->op_parameter_.type_ = schema::PrimitiveType_Equal; + } else if (type == schema::v0::PrimitiveType_NotEqual) { + param->op_parameter_.type_ = schema::PrimitiveType_NotEqual; + } else if (type == schema::v0::PrimitiveType_Less) { + param->op_parameter_.type_ = schema::PrimitiveType_Less; + } else if (type == schema::v0::PrimitiveType_LessEqual) { + param->op_parameter_.type_ = schema::PrimitiveType_LessEqual; + } else if (type == schema::v0::PrimitiveType_Greater) { + param->op_parameter_.type_ = schema::PrimitiveType_Greater; + } else if (type == schema::v0::PrimitiveType_GreaterEqual) { + param->op_parameter_.type_ = schema::PrimitiveType_GreaterEqual; + } else if (type == schema::v0::PrimitiveType_Maximum) { + param->op_parameter_.type_ = schema::PrimitiveType_Maximum; + } else if (type == schema::v0::PrimitiveType_Minimum) { + param->op_parameter_.type_ = schema::PrimitiveType_Minimum; + } else if (type == schema::v0::PrimitiveType_FloorDiv) { + param->op_parameter_.type_ = schema::PrimitiveType_FloorDiv; + } else if (type == schema::v0::PrimitiveType_FloorMod) { + param->op_parameter_.type_ = schema::PrimitiveType_FloorMod; + } + return reinterpret_cast<OpParameter *>(param); +} + +Registry g_realDivV0ParameterRegistry(schema::v0::PrimitiveType_RealDiv, PopulateArithmeticV0, SCHEMA_V0); +Registry g_logicalAndV0ParameterRegistry(schema::v0::PrimitiveType_LogicalAnd, PopulateArithmeticV0, SCHEMA_V0); +Registry g_logicalOrV0parameterRegistry(schema::v0::PrimitiveType_LogicalOr, PopulateArithmeticV0, SCHEMA_V0); +Registry g_equalV0ParameterRegistry(schema::v0::PrimitiveType_Equal, PopulateArithmeticV0, SCHEMA_V0); +Registry g_notEqualV0ParameterRegistry(schema::v0::PrimitiveType_NotEqual, PopulateArithmeticV0, SCHEMA_V0); +Registry g_lessV0ParameterRegistry(schema::v0::PrimitiveType_Less, PopulateArithmeticV0, SCHEMA_V0); +Registry g_lessEqualV0ParameterRegistry(schema::v0::PrimitiveType_LessEqual, PopulateArithmeticV0, SCHEMA_V0); +Registry g_greaterV0ParameterRegistry(schema::v0::PrimitiveType_Greater, PopulateArithmeticV0, SCHEMA_V0); +Registry g_greaterEqualV0ParameterRegistry(schema::v0::PrimitiveType_GreaterEqual, PopulateArithmeticV0, SCHEMA_V0); +Registry g_maximumV0ParameterRegistry(schema::v0::PrimitiveType_Maximum, PopulateArithmeticV0, SCHEMA_V0); +Registry g_minimumV0ParameterRegistry(schema::v0::PrimitiveType_Minimum, PopulateArithmeticV0, SCHEMA_V0); +Registry g_floorDivV0ParameterRegistry(schema::v0::PrimitiveType_FloorDiv, PopulateArithmeticV0, SCHEMA_V0); +Registry g_floorModV0ParameterRegistry(schema::v0::PrimitiveType_FloorMod, PopulateArithmeticV0, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/arithmetic_populate_v0.h b/mindspore/lite/src/ops/populate/v0/arithmetic_populate_v0.h new file mode 100644 index 0000000000..c2612e6c20 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/arithmetic_populate_v0.h @@ -0,0 +1,28 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_OPS_POPULATE_V0_ARITHMETIC_POPULATE_H_ +#define MINDSPORE_LITE_SRC_OPS_POPULATE_V0_ARITHMETIC_POPULATE_H_ + +#include "nnacl/arithmetic.h" + +namespace mindspore { +namespace lite { +ArithmeticParameter *PopulateArithmeticV0CommonPara(const void *primitive); +OpParameter *PopulateArithmeticV0(const void *primitive); + +} // namespace lite +} // namespace mindspore +#endif // MINDSPORE_LITE_SRC_OPS_POPULATE_V0_ARITHMETIC_POPULATE_H_ diff --git a/mindspore/lite/src/ops/populate/v0/arithmetic_self_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/arithmetic_self_populate_v0.cc new file mode 100644 index 0000000000..5e497faf61 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/arithmetic_self_populate_v0.cc @@ -0,0 +1,83 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/common/log_adapter.h" +#include "nnacl/arithmetic_self_parameter.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateArithmeticSelfV0(const void *prim) { + ArithmeticSelfParameter *arithmetic_self_param = + reinterpret_cast<ArithmeticSelfParameter *>(malloc(sizeof(ArithmeticSelfParameter))); + if (arithmetic_self_param == nullptr) { + MS_LOG(ERROR) << "malloc ArithmeticSelfParameter failed."; + return nullptr; + } + memset(arithmetic_self_param, 0, sizeof(ArithmeticSelfParameter)); + auto primitive = static_cast<const schema::v0::Primitive *>(prim); + int type = primitive->value_type(); + if (type == schema::v0::PrimitiveType_Abs) { + arithmetic_self_param->op_parameter_.type_ = schema::PrimitiveType_Abs; + } else if (type == schema::v0::PrimitiveType_Cos) { + arithmetic_self_param->op_parameter_.type_ = schema::PrimitiveType_Cos; + } else if (type == schema::v0::PrimitiveType_Sin) { + arithmetic_self_param->op_parameter_.type_ = schema::PrimitiveType_Sin; + } else if (type == schema::v0::PrimitiveType_Log) { + arithmetic_self_param->op_parameter_.type_ = schema::PrimitiveType_Log; + } else if (type == schema::v0::PrimitiveType_Neg) { + arithmetic_self_param->op_parameter_.type_ = schema::PrimitiveType_Neg; + } else if (type == schema::v0::PrimitiveType_NegGrad) { + arithmetic_self_param->op_parameter_.type_ = schema::PrimitiveType_NegGrad; + } else if (type == schema::v0::PrimitiveType_LogGrad) { + arithmetic_self_param->op_parameter_.type_ = schema::PrimitiveType_LogGrad; + } else if (type == schema::v0::PrimitiveType_Sqrt) { + arithmetic_self_param->op_parameter_.type_ = schema::PrimitiveType_Sqrt; + } else if (type == schema::v0::PrimitiveType_Square) { + arithmetic_self_param->op_parameter_.type_ = schema::PrimitiveType_Square; + } else if (type == schema::v0::PrimitiveType_Rsqrt) { + arithmetic_self_param->op_parameter_.type_ = schema::PrimitiveType_Rsqrt; + } else if (type == schema::v0::PrimitiveType_LogicalNot) { + arithmetic_self_param->op_parameter_.type_ = schema::PrimitiveType_LogicalNot; + } else if (type == schema::v0::PrimitiveType_Floor) { + arithmetic_self_param->op_parameter_.type_ = schema::PrimitiveType_Floor; + } else if (type == schema::v0::PrimitiveType_Ceil) { + arithmetic_self_param->op_parameter_.type_ = schema::PrimitiveType_Ceil; + } else if (type == schema::v0::PrimitiveType_Round) { + arithmetic_self_param->op_parameter_.type_ = schema::PrimitiveType_Round; + } + return reinterpret_cast<OpParameter *>(arithmetic_self_param); +} +} // namespace + +Registry g_absV0ParameterRegistry(schema::v0::PrimitiveType_Abs, PopulateArithmeticSelfV0, SCHEMA_V0); +Registry g_cosV0ParameterRegistry(schema::v0::PrimitiveType_Cos, PopulateArithmeticSelfV0, SCHEMA_V0); +Registry g_sinV0ParameterRegistry(schema::v0::PrimitiveType_Sin, PopulateArithmeticSelfV0, SCHEMA_V0); +Registry g_logV0ParameterRegistry(schema::v0::PrimitiveType_Log, PopulateArithmeticSelfV0, SCHEMA_V0); +Registry g_negV0ParameterRegistry(schema::v0::PrimitiveType_Neg, PopulateArithmeticSelfV0, SCHEMA_V0); +Registry g_negGradV0ParameterRegistry(schema::v0::PrimitiveType_NegGrad, PopulateArithmeticSelfV0, SCHEMA_V0); +Registry g_logGradV0ParameterRegistry(schema::v0::PrimitiveType_LogGrad, PopulateArithmeticSelfV0, SCHEMA_V0); +Registry g_sqrtV0ParameterRegistry(schema::v0::PrimitiveType_Sqrt, PopulateArithmeticSelfV0, SCHEMA_V0); +Registry g_squareV0ParameterRegistry(schema::v0::PrimitiveType_Square, PopulateArithmeticSelfV0, SCHEMA_V0); +Registry g_rsqrtV0ParameterRegistry(schema::v0::PrimitiveType_Rsqrt, PopulateArithmeticSelfV0, SCHEMA_V0); +Registry g_logicalNotV0ParameterRegistry(schema::v0::PrimitiveType_LogicalNot, PopulateArithmeticSelfV0, SCHEMA_V0); +Registry g_floorV0ParameterRegistry(schema::v0::PrimitiveType_Floor, PopulateArithmeticSelfV0, SCHEMA_V0); +Registry g_ceilV0ParameterRegistry(schema::v0::PrimitiveType_Ceil, PopulateArithmeticSelfV0, SCHEMA_V0); +Registry g_roundV0ParameterRegistry(schema::v0::PrimitiveType_Round, PopulateArithmeticSelfV0, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/assert_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/assert_populate_v0.cc new file mode 100644 index 0000000000..d4ed74d179 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/assert_populate_v0.cc @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateAssertParameter(const void *prim) { + OpParameter *assert_parameter = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (assert_parameter == nullptr) { + MS_LOG(ERROR) << "malloc AssertParameter failed."; + return nullptr; + } + memset(assert_parameter, 0, sizeof(OpParameter)); + assert_parameter->type_ = schema::PrimitiveType_Assert; + + return reinterpret_cast<OpParameter *>(assert_parameter); +} +} // namespace + +Registry g_assertV0ParameterRegistry(schema::v0::PrimitiveType_Assert, PopulateAssertParameter, SCHEMA_CUR); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/assign_add_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/assign_add_populate_v0.cc new file mode 100644 index 0000000000..b3ec9280f8 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/assign_add_populate_v0.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateAssignAddParameter(const void *prim) { + OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "malloc AssignAdd Parameter failed."; + return nullptr; + } + memset(param, 0, sizeof(OpParameter)); + param->type_ = schema::PrimitiveType_AssignAdd; + return param; +} +} // namespace + +Registry g_assignAddV0ParameterRegistry(schema::v0::PrimitiveType_AssignAdd, PopulateAssignAddParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/assign_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/assign_populate_v0.cc new file mode 100644 index 0000000000..80539f0cd6 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/assign_populate_v0.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateAssignParameter(const void *prim) { + OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "malloc Assign Parameter failed."; + return nullptr; + } + memset(param, 0, sizeof(OpParameter)); + param->type_ = schema::PrimitiveType_Assign; + return param; +} +} // namespace + +Registry g_assignV0ParameterRegistry(schema::v0::PrimitiveType_Assign, PopulateAssignParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/batch_norm_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/batch_norm_populate_v0.cc new file mode 100644 index 0000000000..71dee7f1ce --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/batch_norm_populate_v0.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/batchnorm_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateBatchNormParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto batch_norm_prim = primitive->value_as_BatchNorm(); + + BatchNormParameter *batch_norm_param = reinterpret_cast<BatchNormParameter *>(malloc(sizeof(BatchNormParameter))); + if (batch_norm_param == nullptr) { + MS_LOG(ERROR) << "malloc BatchNormParameter failed."; + return nullptr; + } + memset(batch_norm_param, 0, sizeof(BatchNormParameter)); + batch_norm_param->op_parameter_.type_ = schema::PrimitiveType_BatchNorm; + batch_norm_param->epsilon_ = batch_norm_prim->epsilon(); + batch_norm_param->fused_ = false; + return reinterpret_cast<OpParameter *>(batch_norm_param); +} +} // namespace + +Registry g_batchNormV0ParameterRegistry(schema::v0::PrimitiveType_BatchNorm, PopulateBatchNormParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/batch_to_space_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/batch_to_space_populate_v0.cc new file mode 100644 index 0000000000..a8ff816a90 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/batch_to_space_populate_v0.cc @@ -0,0 +1,70 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/batch_to_space.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateBatchToSpaceParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto batch_to_space_prim = primitive->value_as_BatchToSpace(); + BatchToSpaceParameter *batch_space_param = + reinterpret_cast<BatchToSpaceParameter *>(malloc(sizeof(BatchToSpaceParameter))); + if (batch_space_param == nullptr) { + MS_LOG(ERROR) << "malloc BatchToSpaceParameter failed."; + return nullptr; + } + memset(batch_space_param, 0, sizeof(BatchToSpaceParameter)); + if (primitive->value_type() == schema::v0::PrimitiveType_BatchToSpace) { + batch_space_param->op_parameter_.type_ = schema::PrimitiveType_BatchToSpace; + } else { + batch_space_param->op_parameter_.type_ = schema::PrimitiveType_BatchToSpaceND; + } + + auto block_shape = batch_to_space_prim->blockShape(); + if (block_shape->size() != BATCH_TO_SPACE_BLOCK_SHAPE_SIZE) { + MS_LOG(ERROR) << "batch_to_space blockShape size should be " << BATCH_TO_SPACE_BLOCK_SHAPE_SIZE; + free(batch_space_param); + return nullptr; + } + + auto crops = batch_to_space_prim->crops(); + if (crops->size() != COMM_SHAPE_SIZE) { + MS_LOG(ERROR) << "batch_to_space crops size should be " << COMM_SHAPE_SIZE; + free(batch_space_param); + return nullptr; + } + + for (int i = 0; i < BATCH_TO_SPACE_BLOCK_SHAPE_SIZE; ++i) { + batch_space_param->block_shape_[i] = *(block_shape->begin() + i); + } + + for (int i = 0; i < COMM_SHAPE_SIZE; ++i) { + batch_space_param->crops_[i] = *(crops->begin() + i); + } + return reinterpret_cast<OpParameter *>(batch_space_param); +} +} // namespace + +Registry g_batchToSpaceV0ParameterRegistry(schema::v0::PrimitiveType_BatchToSpace, PopulateBatchToSpaceParameter, + SCHEMA_V0); +Registry g_batchToSpaceNDV0ParameterRegistry(schema::v0::PrimitiveType_BatchToSpaceND, PopulateBatchToSpaceParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/bias_add_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/bias_add_populate_v0.cc new file mode 100644 index 0000000000..be86128c38 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/bias_add_populate_v0.cc @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/arithmetic.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateBiasAddParameter(const void *prim) { + ArithmeticParameter *arithmetic_param = reinterpret_cast<ArithmeticParameter *>(malloc(sizeof(ArithmeticParameter))); + if (arithmetic_param == nullptr) { + MS_LOG(ERROR) << "malloc ArithmeticParameter failed."; + return nullptr; + } + memset(arithmetic_param, 0, sizeof(ArithmeticParameter)); + arithmetic_param->op_parameter_.type_ = schema::PrimitiveType_BiasAdd; + + return reinterpret_cast<OpParameter *>(arithmetic_param); +} +} // namespace + +Registry g_biasAddV0ParameterRegistry(schema::v0::PrimitiveType_BiasAdd, PopulateBiasAddParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/bias_grad_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/bias_grad_populate_v0.cc new file mode 100644 index 0000000000..d90d9dd4cb --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/bias_grad_populate_v0.cc @@ -0,0 +1,40 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/arithmetic.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateBiasGradParameter(const void *prim) { + ArithmeticParameter *arithmetic_param = reinterpret_cast<ArithmeticParameter *>(malloc(sizeof(ArithmeticParameter))); + if (arithmetic_param == nullptr) { + MS_LOG(ERROR) << "malloc ArithmeticParameter failed."; + return nullptr; + } + memset(arithmetic_param, 0, sizeof(ArithmeticParameter)); + arithmetic_param->op_parameter_.type_ = schema::PrimitiveType_BiasAddGrad; + + return reinterpret_cast<OpParameter *>(arithmetic_param); +} +} // namespace + +Registry g_biasGradV0ParameterParameterRegistry(schema::v0::PrimitiveType_BiasGrad, PopulateBiasGradParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/binary_cross_entropy_grad_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/binary_cross_entropy_grad_populate_v0.cc new file mode 100644 index 0000000000..9dadf89add --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/binary_cross_entropy_grad_populate_v0.cc @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32_grad/binary_cross_entropy_grad.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateBinaryCrossEntropyGradParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto binary_cross_entropy_grad_prim = primitive->value_as_BinaryCrossEntropyGrad(); + BinaryCrossEntropyGradParameter *bce_param = + reinterpret_cast<BinaryCrossEntropyGradParameter *>(malloc(sizeof(BinaryCrossEntropyGradParameter))); + if (bce_param == nullptr) { + MS_LOG(ERROR) << "malloc BinaryCrossEntropyGrad Parameter failed."; + return nullptr; + } + memset(bce_param, 0, sizeof(BinaryCrossEntropyGradParameter)); + bce_param->op_parameter_.type_ = schema::PrimitiveType_BinaryCrossEntropyGrad; + + bce_param->reduction = binary_cross_entropy_grad_prim->reduction(); + return reinterpret_cast<OpParameter *>(bce_param); +} +} // namespace + +Registry g_binaryCrossEntropyGradV0ParameterRegistry(schema::v0::PrimitiveType_BinaryCrossEntropyGrad, + PopulateBinaryCrossEntropyGradParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/binary_cross_entropy_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/binary_cross_entropy_populate_v0.cc new file mode 100644 index 0000000000..2f58d24cad --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/binary_cross_entropy_populate_v0.cc @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32_grad/binary_cross_entropy.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateBinaryCrossEntropyParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto binary_cross_entropy_prim = primitive->value_as_BinaryCrossEntropy(); + BinaryCrossEntropyParameter *bce_param = + reinterpret_cast<BinaryCrossEntropyParameter *>(malloc(sizeof(BinaryCrossEntropyParameter))); + if (bce_param == nullptr) { + MS_LOG(ERROR) << "malloc BinaryCrossEntropy Parameter failed."; + return nullptr; + } + memset(bce_param, 0, sizeof(BinaryCrossEntropyParameter)); + bce_param->op_parameter_.type_ = schema::PrimitiveType_BinaryCrossEntropy; + + bce_param->reduction = binary_cross_entropy_prim->reduction(); + return reinterpret_cast<OpParameter *>(bce_param); +} +} // namespace + +Registry g_binaryCrossEntropyV0ParameterRegistry(schema::v0::PrimitiveType_BinaryCrossEntropy, + PopulateBinaryCrossEntropyParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/broadcast_to_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/broadcast_to_populate_v0.cc new file mode 100644 index 0000000000..2d9a37bad6 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/broadcast_to_populate_v0.cc @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/broadcast_to_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateBroadcastToParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto broadcast_to_prim = primitive->value_as_BroadcastTo(); + BroadcastToParameter *broadcast_param = + reinterpret_cast<BroadcastToParameter *>(malloc(sizeof(BroadcastToParameter))); + if (broadcast_param == nullptr) { + MS_LOG(ERROR) << "malloc BroadcastToParameter failed."; + return nullptr; + } + memset(broadcast_param, 0, sizeof(BroadcastToParameter)); + + broadcast_param->op_parameter_.type_ = schema::PrimitiveType_BroadcastTo; + auto dst_shape = broadcast_to_prim->dst_shape(); + broadcast_param->shape_size_ = dst_shape->size(); + for (size_t i = 0; i < broadcast_param->shape_size_; ++i) { + broadcast_param->shape_[i] = *(dst_shape->begin() + i); + } + return reinterpret_cast<OpParameter *>(broadcast_param); +} +} // namespace + +Registry g_broadcastToV0ParameterRegistry(schema::v0::PrimitiveType_BroadcastTo, PopulateBroadcastToParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/cast_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/cast_populate_v0.cc new file mode 100644 index 0000000000..8f269f8c47 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/cast_populate_v0.cc @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/cast_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateCastParameter(const void *prim) { + CastParameter *cast_param = reinterpret_cast<CastParameter *>(malloc(sizeof(CastParameter))); + if (cast_param == nullptr) { + MS_LOG(ERROR) << "malloc CastParameter failed."; + return nullptr; + } + memset(cast_param, 0, sizeof(CastParameter)); + cast_param->op_parameter_.type_ = schema::PrimitiveType_Cast; + return reinterpret_cast<OpParameter *>(cast_param); +} +} // namespace + +Registry g_castV0ParameterRegistry(schema::v0::PrimitiveType_Cast, PopulateCastParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/clip_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/clip_populate_v0.cc new file mode 100644 index 0000000000..5dc3f4e123 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/clip_populate_v0.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateClipParameter(const void *prim) { + OpParameter *act_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (act_param == nullptr) { + MS_LOG(ERROR) << "malloc ClipParameter failed."; + return nullptr; + } + memset(act_param, 0, sizeof(OpParameter)); + act_param->type_ = schema::PrimitiveType_Clip; + return reinterpret_cast<OpParameter *>(act_param); +} +} // namespace + +Registry g_clipV0ParameterRegistry(schema::v0::PrimitiveType_Clip, PopulateClipParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/common_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/common_populate_v0.cc new file mode 100644 index 0000000000..c8792e143f --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/common_populate_v0.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateCommonParameter(const void *prim) { + auto *common_parameter = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (common_parameter == nullptr) { + MS_LOG(ERROR) << "malloc OpParameter failed."; + return nullptr; + } + memset(common_parameter, 0, sizeof(OpParameter)); + auto type = reinterpret_cast<const schema::v0::Primitive *>(prim)->value_type(); + if (type == schema::v0::PrimitiveType_ZerosLike) { + common_parameter->type_ = schema::PrimitiveType_ZerosLike; + } else { + common_parameter->type_ = type; + } + return common_parameter; +} +} // namespace + +Registry g_zerosLikeV0ParameterRegistry(schema::v0::PrimitiveType_ZerosLike, PopulateCommonParameter, SCHEMA_V0); + +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/concat_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/concat_populate_v0.cc new file mode 100644 index 0000000000..76e362de84 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/concat_populate_v0.cc @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/concat_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateConcatParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto concat_prim = primitive->value_as_Concat(); + ConcatParameter *concat_param = reinterpret_cast<ConcatParameter *>(malloc(sizeof(ConcatParameter))); + if (concat_param == nullptr) { + MS_LOG(ERROR) << "malloc ConcatParameter failed."; + return nullptr; + } + memset(concat_param, 0, sizeof(ConcatParameter)); + concat_param->op_parameter_.type_ = schema::PrimitiveType_Concat; + + concat_param->axis_ = concat_prim->axis(); + return reinterpret_cast<OpParameter *>(concat_param); +} +} // namespace + +Registry g_concatV0ParameterRegistry(schema::v0::PrimitiveType_Concat, PopulateConcatParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/constant_of_shape_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/constant_of_shape_populate_v0.cc new file mode 100644 index 0000000000..761def4190 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/constant_of_shape_populate_v0.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/constant_of_shape.h" + +namespace mindspore::lite { +namespace { +OpParameter *PopulateConstantOfShapeParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto constant_of_shape_prim = primitive->value_as_ConstantOfShape(); + + ConstantOfShapeParameter *param = + reinterpret_cast<ConstantOfShapeParameter *>(malloc(sizeof(ConstantOfShapeParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "malloc ConstantOfShapeParameter failed."; + return nullptr; + } + memset(param, 0, sizeof(ConstantOfShapeParameter)); + param->op_parameter_.type_ = schema::PrimitiveType_ConstantOfShape; + auto value = constant_of_shape_prim->value(); + param->data_type_ = constant_of_shape_prim->dataType(); + if (value->size() == 0 || value->size() > 1) { + MS_LOG(ERROR) << "The value of constant of shape is empty or more than 1."; + } else { + switch (param->data_type_) { + case kNumberTypeFloat32: + param->value_.f32_value_ = constant_of_shape_prim->value()->data()[0]; + break; + case kNumberTypeInt32: + param->value_.int32_value_ = constant_of_shape_prim->value()->data()[0]; + break; + default: + MS_LOG(ERROR) << "The value of constant of shape is invalid"; + } + } + return reinterpret_cast<OpParameter *>(param); +} +} // namespace + +Registry g_constantOfShapeV0ParameterRegistry(schema::v0::PrimitiveType_ConstantOfShape, + PopulateConstantOfShapeParameter, SCHEMA_V0); +} // namespace mindspore::lite diff --git a/mindspore/lite/src/ops/populate/v0/conv2d_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/conv2d_populate_v0.cc new file mode 100644 index 0000000000..fa4292e79b --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/conv2d_populate_v0.cc @@ -0,0 +1,82 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/op_base.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateConvParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto conv2d_prim = primitive->value_as_Conv2D(); + ConvParameter *conv_param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); + if (conv_param == nullptr) { + MS_LOG(ERROR) << "malloc ConvParameter failed."; + return nullptr; + } + memset(conv_param, 0, sizeof(ConvParameter)); + conv_param->op_parameter_.type_ = schema::PrimitiveType_Conv2DFusion; + + conv_param->kernel_h_ = conv2d_prim->kernelH(); + conv_param->kernel_w_ = conv2d_prim->kernelW(); + conv_param->group_ = conv2d_prim->group(); + conv_param->stride_h_ = conv2d_prim->strideH(); + conv_param->stride_w_ = conv2d_prim->strideW(); + + conv_param->pad_u_ = conv2d_prim->padUp(); + conv_param->pad_d_ = conv2d_prim->padDown(); + conv_param->pad_l_ = conv2d_prim->padLeft(); + conv_param->pad_r_ = conv2d_prim->padRight(); + conv_param->dilation_h_ = conv2d_prim->dilateH(); + conv_param->dilation_w_ = conv2d_prim->dilateW(); + conv_param->input_channel_ = conv2d_prim->channelIn(); + conv_param->output_channel_ = conv2d_prim->channelOut(); + conv_param->group_ = conv2d_prim->group(); + auto pad_mode = conv2d_prim->padMode(); + + switch (pad_mode) { + case schema::v0::PadMode_SAME_UPPER: + conv_param->pad_mode_ = Pad_same; + break; + case schema::v0::PadMode_VALID: + conv_param->pad_mode_ = Pad_valid; + break; + default: + conv_param->pad_mode_ = Pad_pad; + break; + } + auto act_type = conv2d_prim->activationType(); + switch (act_type) { + case schema::v0::ActivationType_RELU: + conv_param->act_type_ = ActType_Relu; + break; + case schema::v0::ActivationType_RELU6: + conv_param->act_type_ = ActType_Relu6; + break; + default: + conv_param->act_type_ = ActType_No; + break; + } + return reinterpret_cast<OpParameter *>(conv_param); +} +} // namespace + +Registry g_conv2DV0ParameterRegistry(schema::v0::PrimitiveType_Conv2D, PopulateConvParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/crop_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/crop_populate_v0.cc new file mode 100644 index 0000000000..f597dfb745 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/crop_populate_v0.cc @@ -0,0 +1,51 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/crop_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateCropParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto crop_prim = primitive->value_as_Crop(); + + auto param_offset = crop_prim->offsets(); + if (param_offset->size() > COMM_SHAPE_SIZE) { + MS_LOG(ERROR) << "crop_param offset size(" << param_offset->size() << ") should <= " << COMM_SHAPE_SIZE; + return nullptr; + } + CropParameter *crop_param = reinterpret_cast<CropParameter *>(malloc(sizeof(CropParameter))); + if (crop_param == nullptr) { + MS_LOG(ERROR) << "malloc CropParameter failed."; + return nullptr; + } + memset(crop_param, 0, sizeof(CropParameter)); + crop_param->op_parameter_.type_ = schema::PrimitiveType_Crop; + crop_param->axis_ = crop_prim->axis(); + crop_param->offset_size_ = param_offset->size(); + for (size_t i = 0; i < param_offset->size(); ++i) { + crop_param->offset_[i] = *(param_offset->begin() + i); + } + return reinterpret_cast<OpParameter *>(crop_param); +} +} // namespace + +Registry g_cropV0ParameterRegistry(schema::v0::PrimitiveType_Crop, PopulateCropParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/custom_extract_features_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/custom_extract_features_populate_v0.cc new file mode 100644 index 0000000000..06c5ba9d45 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/custom_extract_features_populate_v0.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateExtractFeaturesParameter(const void *prim) { + OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "new OpParameter failed."; + return nullptr; + } + memset(param, 0, sizeof(OpParameter)); + auto type = reinterpret_cast<const schema::v0::Primitive *>(prim)->value_type(); + if (type == schema::v0::PrimitiveType_CustomExtractFeatures) { + param->type_ = schema::PrimitiveType_CustomExtractFeatures; + } else { + param->type_ = type; + } + return param; +} +} // namespace + +Registry g_customExtractFeaturesV0ParameterRegistry(schema::v0::PrimitiveType_CustomExtractFeatures, + PopulateExtractFeaturesParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/custom_normalize_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/custom_normalize_populate_v0.cc new file mode 100644 index 0000000000..2ee2c36d1f --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/custom_normalize_populate_v0.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateCustomNormalizeParameter(const void *prim) { + OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "new OpParameter failed."; + return nullptr; + } + memset(param, 0, sizeof(OpParameter)); + auto type = reinterpret_cast<const schema::v0::Primitive *>(prim)->value_type(); + if (type == schema::v0::PrimitiveType_CustomNormalize) { + param->type_ = schema::PrimitiveType_CustomNormalize; + } else { + param->type_ = type; + } + return param; +} +} // namespace + +Registry g_customNormalizeV0ParameterRegistry(schema::v0::PrimitiveType_CustomNormalize, + PopulateCustomNormalizeParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/custom_predict_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/custom_predict_populate_v0.cc new file mode 100644 index 0000000000..990e8e5c1e --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/custom_predict_populate_v0.cc @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/predict_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateCustomPredictParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto custom_predict_prim = primitive->value_as_CustomPredict(); + PredictParameter *param = reinterpret_cast<PredictParameter *>(malloc(sizeof(PredictParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "malloc param failed."; + return nullptr; + } + memset(param, 0, sizeof(PredictParameter)); + param->op_parameter_.type_ = schema::PrimitiveType_CustomPredict; + + param->output_num = custom_predict_prim->outputNum(); + param->weight_threshold = custom_predict_prim->weightThreshold(); + return reinterpret_cast<OpParameter *>(param); +} +} // namespace + +Registry g_customPredictV0ParameterRegistry(schema::v0::PrimitiveType_CustomPredict, PopulateCustomPredictParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/deconv2d_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/deconv2d_populate_v0.cc new file mode 100644 index 0000000000..f25ee53454 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/deconv2d_populate_v0.cc @@ -0,0 +1,77 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/conv_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateDeconvParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto deconv2d_prim = primitive->value_as_DeConv2D(); + ConvParameter *conv_param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); + if (conv_param == nullptr) { + MS_LOG(ERROR) << "malloc ConvParameter failed."; + return nullptr; + } + memset(conv_param, 0, sizeof(ConvParameter)); + conv_param->op_parameter_.type_ = schema::PrimitiveType_Conv2dTransposeFusion; + conv_param->group_ = 1; + + conv_param->kernel_h_ = deconv2d_prim->kernelH(); + conv_param->kernel_w_ = deconv2d_prim->kernelW(); + conv_param->stride_h_ = deconv2d_prim->strideH(); + conv_param->stride_w_ = deconv2d_prim->strideW(); + + conv_param->pad_u_ = deconv2d_prim->padUp(); + conv_param->pad_d_ = deconv2d_prim->padDown(); + conv_param->pad_l_ = deconv2d_prim->padLeft(); + conv_param->pad_r_ = deconv2d_prim->padRight(); + conv_param->dilation_h_ = deconv2d_prim->dilateH(); + conv_param->dilation_w_ = deconv2d_prim->dilateW(); + auto act_type = deconv2d_prim->activationType(); + switch (act_type) { + case schema::v0::ActivationType_RELU: + conv_param->act_type_ = ActType_Relu; + break; + case schema::v0::ActivationType_RELU6: + conv_param->act_type_ = ActType_Relu6; + break; + default: + conv_param->act_type_ = ActType_No; + break; + } + auto pad_mode = deconv2d_prim->padMode(); + switch (pad_mode) { + case schema::v0::PadMode_SAME_UPPER: + conv_param->pad_mode_ = Pad_same; + break; + case schema::v0::PadMode_VALID: + conv_param->pad_mode_ = Pad_valid; + break; + default: + conv_param->pad_mode_ = Pad_pad; + break; + } + return reinterpret_cast<OpParameter *>(conv_param); +} +} // namespace + +Registry g_deConv2DV0ParameterRegistry(schema::v0::PrimitiveType_DeConv2D, PopulateDeconvParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/dedepthwise_conv2d_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/dedepthwise_conv2d_populate_v0.cc new file mode 100644 index 0000000000..5463f2520a --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/dedepthwise_conv2d_populate_v0.cc @@ -0,0 +1,82 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/conv_parameter.h" + +namespace mindspore { +namespace lite { +namespace { + +OpParameter *PopulateDeconvDwParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto dedepthwise_conv2d_prim = primitive->value_as_DeDepthwiseConv2D(); + ConvParameter *conv_param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); + if (conv_param == nullptr) { + MS_LOG(ERROR) << "malloc ConvParameter failed."; + return nullptr; + } + memset(conv_param, 0, sizeof(ConvParameter)); + conv_param->op_parameter_.type_ = schema::PrimitiveType_Conv2dTransposeFusion; + + conv_param->group_ = dedepthwise_conv2d_prim->channelIn(); + + conv_param->kernel_h_ = dedepthwise_conv2d_prim->kernelH(); + conv_param->kernel_w_ = dedepthwise_conv2d_prim->kernelW(); + conv_param->stride_h_ = dedepthwise_conv2d_prim->strideH(); + conv_param->stride_w_ = dedepthwise_conv2d_prim->strideW(); + + conv_param->pad_u_ = dedepthwise_conv2d_prim->padUp(); + conv_param->pad_d_ = dedepthwise_conv2d_prim->padDown(); + conv_param->pad_l_ = dedepthwise_conv2d_prim->padLeft(); + conv_param->pad_r_ = dedepthwise_conv2d_prim->padRight(); + conv_param->dilation_h_ = dedepthwise_conv2d_prim->dilateH(); + conv_param->dilation_w_ = dedepthwise_conv2d_prim->dilateW(); + auto act_type = dedepthwise_conv2d_prim->activationType(); + switch (act_type) { + case schema::v0::ActivationType_RELU: + conv_param->act_type_ = ActType_Relu; + break; + case schema::v0::ActivationType_RELU6: + conv_param->act_type_ = ActType_Relu6; + break; + default: + conv_param->act_type_ = ActType_No; + break; + } + + auto pad_mode = dedepthwise_conv2d_prim->padMode(); + switch (pad_mode) { + case schema::v0::PadMode_SAME_UPPER: + conv_param->pad_mode_ = Pad_same; + break; + case schema::v0::PadMode_VALID: + conv_param->pad_mode_ = Pad_valid; + break; + default: + conv_param->pad_mode_ = Pad_pad; + break; + } + conv_param->channel_multiplie_ = dedepthwise_conv2d_prim->channelMultiplier(); + return reinterpret_cast<OpParameter *>(conv_param); +} +} // namespace + +Registry g_deDepthwiseConv2DV0ParameterRegistry(schema::v0::PrimitiveType_DeDepthwiseConv2D, PopulateDeconvDwParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/depth_to_space_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/depth_to_space_populate_v0.cc new file mode 100644 index 0000000000..0f5e4975f2 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/depth_to_space_populate_v0.cc @@ -0,0 +1,46 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/depth_to_space_parameter.h" + +namespace mindspore { +namespace lite { +namespace { + +OpParameter *PopulateDepthToSpaceParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto depth_to_space_prim = primitive->value_as_DepthToSpace(); + DepthToSpaceParameter *depth_space_param = + reinterpret_cast<DepthToSpaceParameter *>(malloc(sizeof(DepthToSpaceParameter))); + if (depth_space_param == nullptr) { + MS_LOG(ERROR) << "malloc DepthToSpaceParameter failed."; + return nullptr; + } + memset(depth_space_param, 0, sizeof(DepthToSpaceParameter)); + + depth_space_param->op_parameter_.type_ = schema::PrimitiveType_DepthToSpace; + depth_space_param->block_size_ = depth_to_space_prim->blockSize(); + return reinterpret_cast<OpParameter *>(depth_space_param); +} +} // namespace + +Registry g_depthToSpaceV0ParameterRegistry(schema::v0::PrimitiveType_DepthToSpace, PopulateDepthToSpaceParameter, + SCHEMA_V0); +} // namespace lite + +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/depthwise_conv2d_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/depthwise_conv2d_populate_v0.cc new file mode 100644 index 0000000000..0a42852a85 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/depthwise_conv2d_populate_v0.cc @@ -0,0 +1,83 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/conv_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateConvDwParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto depthwise_conv2d_prim = primitive->value_as_DepthwiseConv2D(); + ConvParameter *conv_param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); + if (conv_param == nullptr) { + MS_LOG(ERROR) << "malloc ConvParameter failed."; + return nullptr; + } + memset(conv_param, 0, sizeof(ConvParameter)); + conv_param->op_parameter_.type_ = schema::PrimitiveType_Conv2DFusion; + + conv_param->group_ = depthwise_conv2d_prim->channelIn(); + + conv_param->kernel_h_ = depthwise_conv2d_prim->kernelH(); + conv_param->kernel_w_ = depthwise_conv2d_prim->kernelW(); + conv_param->stride_h_ = depthwise_conv2d_prim->strideH(); + conv_param->stride_w_ = depthwise_conv2d_prim->strideW(); + + conv_param->pad_u_ = depthwise_conv2d_prim->padUp(); + conv_param->pad_d_ = depthwise_conv2d_prim->padDown(); + conv_param->pad_l_ = depthwise_conv2d_prim->padLeft(); + conv_param->pad_r_ = depthwise_conv2d_prim->padRight(); + conv_param->input_channel_ = depthwise_conv2d_prim->channelIn(); + conv_param->output_channel_ = depthwise_conv2d_prim->channelIn(); + conv_param->dilation_h_ = depthwise_conv2d_prim->dilateH(); + conv_param->dilation_w_ = depthwise_conv2d_prim->dilateW(); + + auto pad_mode = depthwise_conv2d_prim->padMode(); + switch (pad_mode) { + case schema::v0::PadMode_SAME_UPPER: + conv_param->pad_mode_ = Pad_same; + break; + case schema::v0::PadMode_VALID: + conv_param->pad_mode_ = Pad_valid; + break; + default: + conv_param->pad_mode_ = Pad_pad; + break; + } + auto act_type = depthwise_conv2d_prim->activationType(); + switch (act_type) { + case schema::v0::ActivationType_RELU: + conv_param->act_type_ = ActType_Relu; + break; + case schema::v0::ActivationType_RELU6: + conv_param->act_type_ = ActType_Relu6; + break; + default: + conv_param->act_type_ = ActType_No; + break; + } + conv_param->channel_multiplie_ = depthwise_conv2d_prim->channelMultiplier(); + return reinterpret_cast<OpParameter *>(conv_param); +} +} // namespace + +Registry g_depthwiseConv2DV0ParameterRegistry(schema::v0::PrimitiveType_DepthwiseConv2D, PopulateConvDwParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/detection_post_process_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/detection_post_process_populate_v0.cc new file mode 100644 index 0000000000..499ff04900 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/detection_post_process_populate_v0.cc @@ -0,0 +1,55 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/detection_post_process_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateDetectionPostProcessParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto detection_post_process_prim = primitive->value_as_DetectionPostProcess(); + DetectionPostProcessParameter *detection_post_process_parameter = + reinterpret_cast<DetectionPostProcessParameter *>(malloc(sizeof(DetectionPostProcessParameter))); + if (detection_post_process_parameter == nullptr) { + MS_LOG(ERROR) << "malloc EluParameter failed."; + return nullptr; + } + memset(detection_post_process_parameter, 0, sizeof(DetectionPostProcessParameter)); + detection_post_process_parameter->op_parameter_.type_ = schema::PrimitiveType_DetectionPostProcess; + + detection_post_process_parameter->h_scale_ = detection_post_process_prim->hScale(); + detection_post_process_parameter->w_scale_ = detection_post_process_prim->wScale(); + detection_post_process_parameter->x_scale_ = detection_post_process_prim->xScale(); + detection_post_process_parameter->y_scale_ = detection_post_process_prim->yScale(); + detection_post_process_parameter->nms_iou_threshold_ = + detection_post_process_prim->NmsIouThreshold(); // why is not lower start letter + detection_post_process_parameter->nms_score_threshold_ = detection_post_process_prim->NmsScoreThreshold(); + detection_post_process_parameter->max_detections_ = detection_post_process_prim->MaxDetections(); + detection_post_process_parameter->detections_per_class_ = detection_post_process_prim->DetectionsPerClass(); + detection_post_process_parameter->max_classes_per_detection_ = detection_post_process_prim->MaxClassesPerDetection(); + detection_post_process_parameter->num_classes_ = detection_post_process_prim->NumClasses(); + detection_post_process_parameter->use_regular_nms_ = detection_post_process_prim->UseRegularNms(); + return reinterpret_cast<OpParameter *>(detection_post_process_parameter); +} +} // namespace + +Registry g_detectionPostProcessV0ParameterRegistry(schema::v0::PrimitiveType_DetectionPostProcess, + PopulateDetectionPostProcessParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/div_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/div_populate_v0.cc new file mode 100644 index 0000000000..ead45f94c7 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/div_populate_v0.cc @@ -0,0 +1,40 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "src/ops/populate/arithmetic_populate.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateDivParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto div_prim = primitive->value_as_Div(); + ArithmeticParameter *param = PopulateArithmeticCommonPara(primitive); + if (param == nullptr) { + MS_LOG(ERROR) << "PopulateArithmeticCommonPara failed."; + return nullptr; + } + param->op_parameter_.type_ = schema::PrimitiveType_DivFusion; + param->activation_type_ = div_prim->activationType(); + return reinterpret_cast<OpParameter *>(param); +} +} // namespace + +Registry g_divV0ParameterRegistry(schema::v0::PrimitiveType_Div, PopulateDivParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/eltwise_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/eltwise_populate_v0.cc new file mode 100644 index 0000000000..327c708347 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/eltwise_populate_v0.cc @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "src/ops/populate/v0/arithmetic_populate_v0.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateEltwiseParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + ArithmeticParameter *param = PopulateArithmeticV0CommonPara(primitive); + if (param == nullptr) { + MS_LOG(ERROR) << "PopulateArithmeticV0CommonPara failed."; + return nullptr; + } + param->eltwise_mode_ = primitive->value_as_Eltwise()->mode(); + param->op_parameter_.type_ = schema::PrimitiveType_Eltwise; + return reinterpret_cast<OpParameter *>(param); +} +} // namespace + +Registry g_eltwiseV0ParameterRegistry(schema::v0::PrimitiveType_Eltwise, PopulateEltwiseParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/elu_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/elu_populate_v0.cc new file mode 100644 index 0000000000..a65326a742 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/elu_populate_v0.cc @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/elu_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateEluParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto elu_prim = primitive->value_as_Elu(); + EluParameter *elu_parameter = reinterpret_cast<EluParameter *>(malloc(sizeof(EluParameter))); + if (elu_parameter == nullptr) { + MS_LOG(ERROR) << "malloc EluParameter failed."; + return nullptr; + } + memset(elu_parameter, 0, sizeof(EluParameter)); + elu_parameter->op_parameter_.type_ = schema::PrimitiveType_Elu; + + elu_parameter->alpha_ = elu_prim->alpha(); + return reinterpret_cast<OpParameter *>(elu_parameter); +} +} // namespace + +Registry g_eluV0ParameterRegistry(schema::v0::PrimitiveType_Elu, PopulateEluParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/embedding_lookup_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/embedding_lookup_populate_v0.cc new file mode 100644 index 0000000000..8ab55d43f2 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/embedding_lookup_populate_v0.cc @@ -0,0 +1,50 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/embedding_lookup_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateEmbeddingLookupParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto embedding_lookup_prim = primitive->value_as_EmbeddingLookup(); + EmbeddingLookupParameter *embedding_lookup_parameter = + reinterpret_cast<EmbeddingLookupParameter *>(malloc(sizeof(EmbeddingLookupParameter))); + if (embedding_lookup_parameter == nullptr) { + MS_LOG(ERROR) << "malloc EmbeddingLookupParameter failed."; + return nullptr; + } + memset(embedding_lookup_parameter, 0, sizeof(EmbeddingLookupParameter)); + embedding_lookup_parameter->op_parameter_.type_ = schema::PrimitiveType_EmbeddingLookupFusion; + + embedding_lookup_parameter->max_norm_ = embedding_lookup_prim->maxNorm(); + if (embedding_lookup_parameter->max_norm_ < 0) { + MS_LOG(ERROR) << "Embedding lookup max norm should be positive number, got " + << embedding_lookup_parameter->max_norm_; + free(embedding_lookup_parameter); + return nullptr; + } + return reinterpret_cast<OpParameter *>(embedding_lookup_parameter); +} +} // namespace + +Registry g_embeddingLookupV0ParameterRegistry(schema::v0::PrimitiveType_EmbeddingLookup, + PopulateEmbeddingLookupParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/exp_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/exp_populate_v0.cc new file mode 100644 index 0000000000..165e0db3dc --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/exp_populate_v0.cc @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/exp_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateExpParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto exp_prim = primitive->value_as_Exp(); + ExpParameter *exp_parameter = reinterpret_cast<ExpParameter *>(malloc(sizeof(ExpParameter))); + if (exp_parameter == nullptr) { + MS_LOG(ERROR) << "malloc ExpParameter failed."; + return nullptr; + } + memset(exp_parameter, 0, sizeof(ExpParameter)); + exp_parameter->op_parameter_.type_ = schema::PrimitiveType_ExpFusion; + + exp_parameter->base_ = exp_prim->base(); + exp_parameter->scale_ = exp_prim->scale(); + exp_parameter->shift_ = exp_prim->shift(); + if (exp_parameter->base_ != -1 && exp_parameter->base_ <= 0) { + MS_LOG(ERROR) << "Exp base must be strictly positive, got " << exp_parameter->base_; + free(exp_parameter); + return nullptr; + } + return reinterpret_cast<OpParameter *>(exp_parameter); +} +} // namespace + +Registry g_expV0ParameterRegistry(schema::v0::PrimitiveType_Exp, PopulateExpParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/expand_dims_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/expand_dims_populate_v0.cc new file mode 100644 index 0000000000..4ad78d4118 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/expand_dims_populate_v0.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateExpandDimsParameter(const void *prim) { + OpParameter *expand_dims_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (expand_dims_param == nullptr) { + MS_LOG(ERROR) << "malloc ExpandDimsParameter failed."; + return nullptr; + } + memset(expand_dims_param, 0, sizeof(OpParameter)); + expand_dims_param->type_ = schema::PrimitiveType_ExpandDims; + return reinterpret_cast<OpParameter *>(expand_dims_param); +} +} // namespace + +Registry g_expandDimsV0ParameterRegistry(schema::v0::PrimitiveType_ExpandDims, PopulateExpandDimsParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/fill_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/fill_populate_v0.cc new file mode 100644 index 0000000000..41f2992ce5 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/fill_populate_v0.cc @@ -0,0 +1,36 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/populate_register.h" +#include "schema/model_v0_generated.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateFillParameter(const void *prim) { + OpParameter *fill_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (fill_param == nullptr) { + MS_LOG(ERROR) << "malloc FillParameter failed."; + return nullptr; + } + memset(fill_param, 0, sizeof(OpParameter)); + fill_param->type_ = schema::PrimitiveType_Fill; + return reinterpret_cast<OpParameter *>(fill_param); +} +} // namespace + +Registry g_fillV0ParameterRegistry(schema::v0::PrimitiveType_Fill, PopulateFillParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/flatten_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/flatten_populate_v0.cc new file mode 100644 index 0000000000..f3e425d14a --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/flatten_populate_v0.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateFlattenParameter(const void *prim) { + OpParameter *flatten_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (flatten_param == nullptr) { + MS_LOG(ERROR) << "malloc FlattenParameter failed."; + return nullptr; + } + memset(flatten_param, 0, sizeof(OpParameter)); + flatten_param->type_ = schema::PrimitiveType_Flatten; + return reinterpret_cast<OpParameter *>(flatten_param); +} +} // namespace + +Registry g_flattenV0ParameterRegistry(schema::v0::PrimitiveType_Flatten, PopulateFlattenParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/full_connection_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/full_connection_populate_v0.cc new file mode 100644 index 0000000000..4956967daf --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/full_connection_populate_v0.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/matmul_parameter.h" + +namespace mindspore { +namespace lite { +namespace { + +OpParameter *PopulateFullconnectionParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto full_connection_prim = primitive->value_as_FullConnection(); + + MatMulParameter *matmul_param = reinterpret_cast<MatMulParameter *>(malloc(sizeof(MatMulParameter))); + if (matmul_param == nullptr) { + MS_LOG(ERROR) << "malloc MatMulParameter failed."; + return nullptr; + } + memset(matmul_param, 0, sizeof(MatMulParameter)); + matmul_param->op_parameter_.type_ = schema::PrimitiveType_FullConnection; + matmul_param->b_transpose_ = true; + matmul_param->a_transpose_ = false; + matmul_param->has_bias_ = full_connection_prim->hasBias(); + if (full_connection_prim->activationType() == schema::v0::ActivationType_RELU) { + matmul_param->act_type_ = ActType_Relu; + } else if (full_connection_prim->activationType() == schema::v0::ActivationType_RELU6) { + matmul_param->act_type_ = ActType_Relu6; + } else { + matmul_param->act_type_ = ActType_No; + } + + matmul_param->use_axis_ = full_connection_prim->useAxis(); + matmul_param->axis_ = full_connection_prim->axis(); + return reinterpret_cast<OpParameter *>(matmul_param); +} +} // namespace + +Registry g_fullConnectionV0ParameterRegistry(schema::v0::PrimitiveType_FullConnection, PopulateFullconnectionParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/fused_batchnorm_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/fused_batchnorm_populate_v0.cc new file mode 100644 index 0000000000..f164e3c747 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/fused_batchnorm_populate_v0.cc @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/batchnorm_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateFusedBatchNormParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto fused_batchnorm_prim = primitive->value_as_FusedBatchNorm(); + BatchNormParameter *batch_norm_param = reinterpret_cast<BatchNormParameter *>(malloc(sizeof(BatchNormParameter))); + if (batch_norm_param == nullptr) { + MS_LOG(ERROR) << "malloc BatchNormParameter failed."; + return nullptr; + } + memset(batch_norm_param, 0, sizeof(BatchNormParameter)); + batch_norm_param->op_parameter_.type_ = schema::PrimitiveType_FusedBatchNorm; + + batch_norm_param->epsilon_ = fused_batchnorm_prim->epsilon(); + batch_norm_param->momentum_ = fused_batchnorm_prim->momentum(); + batch_norm_param->fused_ = true; + return reinterpret_cast<OpParameter *>(batch_norm_param); +} +} // namespace + +Registry g_fusedBatchNormV0ParameterRegistry(schema::v0::PrimitiveType_FusedBatchNorm, PopulateFusedBatchNormParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/gather_nd_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/gather_nd_populate_v0.cc new file mode 100644 index 0000000000..3be3eeede9 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/gather_nd_populate_v0.cc @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/gatherNd_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateGatherNdParameter(const void *prim) { + GatherNdParameter *gather_nd_param = reinterpret_cast<GatherNdParameter *>(malloc(sizeof(GatherNdParameter))); + if (gather_nd_param == nullptr) { + MS_LOG(ERROR) << "malloc GatherNdParameter failed."; + return nullptr; + } + memset(gather_nd_param, 0, sizeof(GatherNdParameter)); + gather_nd_param->op_parameter_.type_ = schema::PrimitiveType_GatherNd; + return reinterpret_cast<OpParameter *>(gather_nd_param); +} +} // namespace + +Registry g_gatherNdV0ParameterRegistry(schema::v0::PrimitiveType_GatherNd, PopulateGatherNdParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/gather_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/gather_populate_v0.cc new file mode 100644 index 0000000000..34ecda0034 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/gather_populate_v0.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/gather_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateGatherParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto gather_prim = primitive->value_as_Gather(); + + GatherParameter *gather_param = reinterpret_cast<GatherParameter *>(malloc(sizeof(GatherParameter))); + if (gather_param == nullptr) { + MS_LOG(ERROR) << "malloc GatherParameter failed."; + return nullptr; + } + memset(gather_param, 0, sizeof(GatherParameter)); + gather_param->op_parameter_.type_ = schema::PrimitiveType_Gather; + if (gather_prim->axis() < 0) { + MS_LOG(ERROR) << "axis should be >= 0."; + free(gather_param); + return nullptr; + } + gather_param->axis_ = gather_prim->axis(); + return reinterpret_cast<OpParameter *>(gather_param); +} +} // namespace + +Registry g_gatherV0ParameterRegistry(schema::v0::PrimitiveType_Gather, PopulateGatherParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/hashtable_lookup_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/hashtable_lookup_populate_v0.cc new file mode 100644 index 0000000000..2d1b302985 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/hashtable_lookup_populate_v0.cc @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateHashtableLookupParameter(const void *prim) { + OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "new OpParameter failed."; + return nullptr; + } + memset(param, 0, sizeof(OpParameter)); + param->type_ = schema::PrimitiveType_HashtableLookup; + return param; +} +} // namespace + +Registry g_hashtableLookupV0ParameterRegistry(schema::v0::PrimitiveType_HashtableLookup, + PopulateHashtableLookupParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/instance_norm_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/instance_norm_populate_v0.cc new file mode 100644 index 0000000000..b2603c1a67 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/instance_norm_populate_v0.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/instance_norm_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateInstanceNormParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto instance_norm_prim = primitive->value_as_InstanceNorm(); + InstanceNormParameter *instance_norm_param = + reinterpret_cast<InstanceNormParameter *>(malloc(sizeof(InstanceNormParameter))); + if (instance_norm_param == nullptr) { + MS_LOG(ERROR) << "malloc InstanceNormParameter failed."; + return nullptr; + } + memset(instance_norm_param, 0, sizeof(InstanceNormParameter)); + instance_norm_param->op_parameter_.type_ = schema::PrimitiveType_LayerNormFusion; + instance_norm_param->epsilon_ = instance_norm_prim->epsilon(); + return reinterpret_cast<OpParameter *>(instance_norm_param); +} +} // namespace + +Registry g_instanceNormV0ParameterRegistry(schema::v0::PrimitiveType_InstanceNorm, PopulateInstanceNormParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/l2_norm_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/l2_norm_populate_v0.cc new file mode 100644 index 0000000000..33ab4c9967 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/l2_norm_populate_v0.cc @@ -0,0 +1,65 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/l2_norm_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateL2NormParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto l2_norm_prim = primitive->value_as_L2Norm(); + L2NormParameter *l2_norm_parameter = reinterpret_cast<L2NormParameter *>(malloc(sizeof(L2NormParameter))); + if (l2_norm_parameter == nullptr) { + MS_LOG(ERROR) << "malloc L2NormParameter failed."; + return nullptr; + } + memset(l2_norm_parameter, 0, sizeof(L2NormParameter)); + l2_norm_parameter->op_parameter_.type_ = schema::PrimitiveType_L2NormalizeFusion; + + MS_ASSERT(l2_norm_prim != nullptr); + auto axis_vec = l2_norm_prim->axis(); + l2_norm_parameter->axis_num_ = axis_vec->size(); + if (((size_t)axis_vec->size()) > SIZE_MAX / sizeof(int)) { + MS_LOG(ERROR) << "axis_vec size too big"; + free(l2_norm_parameter); + return nullptr; + } + + for (size_t i = 0; i < axis_vec->size(); i++) { + l2_norm_parameter->axis_[i] = *(axis_vec->begin() + i); + } + if (l2_norm_prim->epsilon() < 1e-6) { + l2_norm_parameter->epsilon_ = 1e-6; + } else { + l2_norm_parameter->epsilon_ = l2_norm_prim->epsilon(); + } + if (l2_norm_prim->activationType() == static_cast<int>(schema::v0::ActivationType_RELU)) { + l2_norm_parameter->act_type_ = ActType_Relu; + } else if (l2_norm_prim->activationType() == static_cast<int>(schema::v0::ActivationType_RELU6)) { + l2_norm_parameter->act_type_ = ActType_Relu6; + } else { + l2_norm_parameter->act_type_ = ActType_No; + } + return reinterpret_cast<OpParameter *>(l2_norm_parameter); +} +} // namespace + +Registry g_l2NormV0ParameterRegistry(schema::v0::PrimitiveType_L2Norm, PopulateL2NormParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/layer_norm_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/layer_norm_populate_v0.cc new file mode 100644 index 0000000000..a0139f1ee2 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/layer_norm_populate_v0.cc @@ -0,0 +1,54 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/v0/layer_norm_populate_v0.h" +#include "nnacl/layer_norm_parameter.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +OpParameter *PopulateLayerNormParameterV0(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto layer_norm_prim = primitive->value_as_LayerNorm(); + auto layer_norm_parameter = reinterpret_cast<LayerNormParameter *>(malloc(sizeof(LayerNormParameter))); + if (layer_norm_parameter == nullptr) { + MS_LOG(ERROR) << "malloc LayerNormParameter failed."; + return nullptr; + } + memset(layer_norm_parameter, 0, sizeof(LayerNormParameter)); + layer_norm_parameter->op_parameter_.type_ = schema::PrimitiveType_LayerNormFusion; + auto normalized_shape = layer_norm_prim->normalizedShape(); + if (normalized_shape != nullptr) { + layer_norm_parameter->normalized_dims_ = normalized_shape->size(); + if (((size_t)normalized_shape->size()) > SIZE_MAX / sizeof(int)) { + MS_LOG(ERROR) << "normalized_shape size too big"; + free(layer_norm_parameter); + return nullptr; + } + for (size_t i = 0; i < normalized_shape->size(); i++) { + layer_norm_parameter->normalized_shape_[i] = *(normalized_shape->begin() + i); + } + } + layer_norm_parameter->epsilon_ = layer_norm_prim->epsilon(); + layer_norm_parameter->elementwise_affine_ = layer_norm_prim->elementwiseAffine(); + + return reinterpret_cast<OpParameter *>(layer_norm_parameter); +} + +Registry g_layerNormV0ParameterRegistry(schema::v0::PrimitiveType_LayerNorm, PopulateLayerNormParameterV0, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/layer_norm_populate_v0.h b/mindspore/lite/src/ops/populate/v0/layer_norm_populate_v0.h new file mode 100644 index 0000000000..ebad3560d0 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/layer_norm_populate_v0.h @@ -0,0 +1,27 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MINDSPORE_LITE_SRC_OPS_POPULATE_STRIDED_LAYER_NORM_POPULATE_H_ +#define MINDSPORE_LITE_SRC_OPS_POPULATE_STRIDED_LAYER_NORM_POPULATE_H_ + +#include "nnacl/arithmetic.h" + +namespace mindspore { +namespace lite { +OpParameter *PopulateLayerNormParameterV0(const void *prim); + +} // namespace lite +} // namespace mindspore +#endif // MINDSPORE_LITE_SRC_OPS_POPULATE_STRIDED_LAYER_NORM_POPULATE_H_ diff --git a/mindspore/lite/src/ops/populate/v0/local_response_normalization_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/local_response_normalization_populate_v0.cc new file mode 100644 index 0000000000..8a3f028c48 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/local_response_normalization_populate_v0.cc @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/local_response_norm_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateLocalResponseNormParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto local_response_normalization_prim = primitive->value_as_LocalResponseNormalization(); + + LocalResponseNormParameter *lrn_param = + reinterpret_cast<LocalResponseNormParameter *>(malloc(sizeof(LocalResponseNormParameter))); + if (lrn_param == nullptr) { + MS_LOG(ERROR) << "malloc LocalResponseNormParameter failed."; + return nullptr; + } + memset(lrn_param, 0, sizeof(LocalResponseNormParameter)); + lrn_param->op_parameter_.type_ = schema::PrimitiveType_LRN; + lrn_param->depth_radius_ = local_response_normalization_prim->depth_radius(); + lrn_param->bias_ = local_response_normalization_prim->bias(); + lrn_param->alpha_ = local_response_normalization_prim->alpha(); + lrn_param->beta_ = local_response_normalization_prim->beta(); + return reinterpret_cast<OpParameter *>(lrn_param); +} +} // namespace + +Registry g_localResponseNormalizationV0ParameterRegistry(schema::v0::PrimitiveType_LocalResponseNormalization, + PopulateLocalResponseNormParameter, SCHEMA_V0); +Registry g_lrnV0ParameterRegistry(schema::v0::PrimitiveType_Lrn, PopulateLocalResponseNormParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/lsh_projection_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/lsh_projection_populate_v0.cc new file mode 100644 index 0000000000..1292c5e013 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/lsh_projection_populate_v0.cc @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/lsh_projection_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateLshProjectionParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto lsh_projection_prim = primitive->value_as_LshProjection(); + LshProjectionParameter *lsh_project_param = + reinterpret_cast<LshProjectionParameter *>(malloc(sizeof(LshProjectionParameter))); + if (lsh_project_param == nullptr) { + MS_LOG(ERROR) << "malloc LshProjectionParameter failed."; + return nullptr; + } + memset(lsh_project_param, 0, sizeof(LshProjectionParameter)); + lsh_project_param->op_parameter_.type_ = schema::PrimitiveType_LshProjection; + + lsh_project_param->lsh_type_ = lsh_projection_prim->type(); + return reinterpret_cast<OpParameter *>(lsh_project_param); +} +} // namespace + +Registry g_lshProjectionV0ParameterRegistry(schema::v0::PrimitiveType_LshProjection, PopulateLshProjectionParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/lstm_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/lstm_populate_v0.cc new file mode 100644 index 0000000000..701909fd14 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/lstm_populate_v0.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/lstm_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateLstmParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto lstm_prim = primitive->value_as_Lstm(); + LstmParameter *lstm_param = reinterpret_cast<LstmParameter *>(malloc(sizeof(LstmParameter))); + if (lstm_param == nullptr) { + MS_LOG(ERROR) << "malloc LstmParameter failed."; + return nullptr; + } + memset(lstm_param, 0, sizeof(LstmParameter)); + lstm_param->op_parameter_.type_ = schema::PrimitiveType_LSTM; + + if (lstm_prim == nullptr) { + free(lstm_param); + MS_LOG(ERROR) << "get Lstm param nullptr."; + return nullptr; + } + lstm_param->bidirectional_ = lstm_prim->bidirection(); + return reinterpret_cast<OpParameter *>(lstm_param); +} +} // namespace + +Registry g_lstmV0ParameterRegistry(schema::v0::PrimitiveType_Lstm, PopulateLstmParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/matmul_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/matmul_populate_v0.cc new file mode 100644 index 0000000000..41d100e922 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/matmul_populate_v0.cc @@ -0,0 +1,46 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/matmul_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateMatMulParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto matmul_prim = primitive->value_as_MatMul(); + + MatMulParameter *matmul_param = reinterpret_cast<MatMulParameter *>(malloc(sizeof(MatMulParameter))); + if (matmul_param == nullptr) { + MS_LOG(ERROR) << "malloc MatMulParameter failed."; + return nullptr; + } + memset(matmul_param, 0, sizeof(MatMulParameter)); + matmul_param->op_parameter_.type_ = schema::PrimitiveType_MatMul; + matmul_param->b_transpose_ = matmul_prim->transposeB(); + matmul_param->a_transpose_ = matmul_prim->transposeA(); + matmul_param->has_bias_ = false; + matmul_param->act_type_ = ActType_No; + + return reinterpret_cast<OpParameter *>(matmul_param); +} +} // namespace + +Registry g_MatMulPV0arameterRegistry(schema::v0::PrimitiveType_MatMul, PopulateMatMulParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/mul_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/mul_populate_v0.cc new file mode 100644 index 0000000000..a8b5b3a9ea --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/mul_populate_v0.cc @@ -0,0 +1,40 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "src/ops/populate/arithmetic_populate.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateMulParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto mul_prim = primitive->value_as_Mul(); + ArithmeticParameter *param = PopulateArithmeticCommonPara(primitive); + if (param == nullptr) { + MS_LOG(ERROR) << "PopulateArithmeticCommonPara failed."; + return nullptr; + } + param->op_parameter_.type_ = schema::PrimitiveType_MulFusion; + param->activation_type_ = mul_prim->activationType(); + return reinterpret_cast<OpParameter *>(param); +} +} // namespace + +Registry g_mulV0ParameterRegistry(schema::v0::PrimitiveType_Mul, PopulateMulParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/nchw2nhwc_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/nchw2nhwc_populate_v0.cc new file mode 100644 index 0000000000..33035d5371 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/nchw2nhwc_populate_v0.cc @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "src/common/common.h" +#include "nnacl/transpose.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateNchw2NhwcParameter(const void *prim) { + TransposeParameter *parameter = reinterpret_cast<TransposeParameter *>(malloc(sizeof(TransposeParameter))); + if (parameter == nullptr) { + MS_LOG(ERROR) << "malloc OpParameter failed."; + return nullptr; + } + memset(parameter, 0, sizeof(OpParameter)); + parameter->op_parameter_.type_ = schema::PrimitiveType_Transpose; + parameter->num_axes_ = 4; + parameter->perm_[0] = 0; + parameter->perm_[1] = 2; + parameter->perm_[2] = 3; + parameter->perm_[3] = 1; + return reinterpret_cast<OpParameter *>(parameter); +} +} // namespace + +Registry g_nchw2NhwcV0ParameterRegistry(schema::v0::PrimitiveType_Nchw2Nhwc, PopulateNchw2NhwcParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/nhwc2nchw_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/nhwc2nchw_populate_v0.cc new file mode 100644 index 0000000000..11f2b11fa5 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/nhwc2nchw_populate_v0.cc @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "src/common/common.h" +#include "nnacl/transpose.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateNhwc2NchwParameter(const void *prim) { + TransposeParameter *parameter = reinterpret_cast<TransposeParameter *>(malloc(sizeof(TransposeParameter))); + if (parameter == nullptr) { + MS_LOG(ERROR) << "malloc OpParameter failed."; + return nullptr; + } + memset(parameter, 0, sizeof(OpParameter)); + parameter->op_parameter_.type_ = schema::PrimitiveType_Transpose; + parameter->num_axes_ = 4; + parameter->perm_[0] = 0; + parameter->perm_[1] = 3; + parameter->perm_[2] = 1; + parameter->perm_[3] = 2; + return reinterpret_cast<OpParameter *>(parameter); +} +} // namespace + +Registry g_nhwc2NchwV0ParameterRegistry(schema::v0::PrimitiveType_Nhwc2Nchw, PopulateNhwc2NchwParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/non_max_suppression_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/non_max_suppression_populate_v0.cc new file mode 100644 index 0000000000..3193862a4c --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/non_max_suppression_populate_v0.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/non_max_suppression_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateNonMaxSuppressionParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto non_max_suppression_prim = primitive->value_as_NonMaxSuppression(); + NMSParameter *param = reinterpret_cast<NMSParameter *>(malloc(sizeof(NMSParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "malloc param failed."; + return nullptr; + } + memset(param, 0, sizeof(NMSParameter)); + param->op_parameter_.type_ = schema::PrimitiveType_NonMaxSuppression; + + param->center_point_box_ = non_max_suppression_prim->centerPointBox(); + return reinterpret_cast<OpParameter *>(param); +} +} // namespace + +Registry g_nonMaxSuppressionV0ParameterRegistry(schema::v0::PrimitiveType_NonMaxSuppression, + PopulateNonMaxSuppressionParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/one_hot_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/one_hot_populate_v0.cc new file mode 100644 index 0000000000..332c903f91 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/one_hot_populate_v0.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/one_hot_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateOneHotParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto one_hot_prim = primitive->value_as_OneHot(); + OneHotParameter *one_hot_param = reinterpret_cast<OneHotParameter *>(malloc(sizeof(OneHotParameter))); + if (one_hot_param == nullptr) { + MS_LOG(ERROR) << "malloc OneHotParameter failed."; + return nullptr; + } + memset(one_hot_param, 0, sizeof(OneHotParameter)); + one_hot_param->op_parameter_.type_ = schema::PrimitiveType_OneHot; + + if (one_hot_prim == nullptr) { + free(one_hot_param); + MS_LOG(ERROR) << "get OneHot param nullptr."; + return nullptr; + } + one_hot_param->axis_ = one_hot_prim->axis(); + return reinterpret_cast<OpParameter *>(one_hot_param); +} +} // namespace + +Registry g_oneHotV0ParameterRegistry(schema::v0::PrimitiveType_OneHot, PopulateOneHotParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/oneslike_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/oneslike_populate_v0.cc new file mode 100644 index 0000000000..da034b0322 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/oneslike_populate_v0.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateOnesLikeParameter(const void *prim) { + OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "malloc OnesLike Parameter failed."; + return nullptr; + } + memset(param, 0, sizeof(OpParameter)); + param->type_ = schema::PrimitiveType_OnesLike; + return param; +} +} // namespace + +Registry g_onesLikeV0ParameterRegistry(schema::v0::PrimitiveType_OnesLike, PopulateOnesLikeParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/p_relu_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/p_relu_populate_v0.cc new file mode 100644 index 0000000000..3cee353dde --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/p_relu_populate_v0.cc @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/prelu_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulatePReLUParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto p_relu_prim = primitive->value_as_PReLU(); + + PReluParameter *prelu_param = reinterpret_cast<PReluParameter *>(malloc(sizeof(PReluParameter))); + if (prelu_param == nullptr) { + MS_LOG(ERROR) << "malloc PReluParameter failed."; + return nullptr; + } + memset(prelu_param, 0, sizeof(PReluParameter)); + prelu_param->op_parameter_.type_ = schema::PrimitiveType_PReLUFusion; + prelu_param->channelShared = p_relu_prim->channelShared(); + return reinterpret_cast<OpParameter *>(prelu_param); +} +} // namespace + +Registry g_pReLUV0ParameterRegistry(schema::v0::PrimitiveType_PReLU, PopulatePReLUParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/pad_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/pad_populate_v0.cc new file mode 100644 index 0000000000..1ed05e2a61 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/pad_populate_v0.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/pad_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulatePadParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto pad_prim = primitive->value_as_Pad(); + PadParameter *pad_param = reinterpret_cast<PadParameter *>(malloc(sizeof(PadParameter))); + if (pad_param == nullptr) { + MS_LOG(ERROR) << "malloc PadParameter failed."; + return nullptr; + } + memset(pad_param, 0, sizeof(PadParameter)); + pad_param->op_parameter_.type_ = schema::PrimitiveType_PadFusion; + + pad_param->pad_mode_ = pad_prim->paddingMode(); + pad_param->constant_value_ = pad_prim->constantValue(); + return reinterpret_cast<OpParameter *>(pad_param); +} +} // namespace + +Registry g_padV0ParameterRegistry(schema::v0::PrimitiveType_Pad, PopulatePadParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/partial_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/partial_populate_v0.cc new file mode 100644 index 0000000000..553f371915 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/partial_populate_v0.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +typedef struct PartialParameter { + OpParameter op_parameter_; + int sub_graph_index_; +} PartialParameter; + +OpParameter *PopulatePartialParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto partial_prim = primitive->value_as_Partial(); + PartialParameter *partial_parameter = reinterpret_cast<PartialParameter *>(malloc(sizeof(PartialParameter))); + if (partial_parameter == nullptr) { + MS_LOG(ERROR) << "malloc partial parameter failed."; + return nullptr; + } + memset(partial_parameter, 0, sizeof(PartialParameter)); + partial_parameter->op_parameter_.type_ = schema::PrimitiveType_PartialFusion; + + partial_parameter->sub_graph_index_ = partial_prim->subGraphIndex(); + + return reinterpret_cast<OpParameter *>(partial_parameter); +} +} // namespace + +Registry g_partialV0ParameterRegistry(schema::v0::PrimitiveType_Partial, PopulatePartialParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/pooling_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/pooling_populate_v0.cc new file mode 100644 index 0000000000..3c074bc7bf --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/pooling_populate_v0.cc @@ -0,0 +1,100 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/pooling_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulatePoolingParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto pooling_prim = primitive->value_as_Pooling(); + + PoolingParameter *pooling_param = reinterpret_cast<PoolingParameter *>(malloc(sizeof(PoolingParameter))); + if (pooling_param == nullptr) { + MS_LOG(ERROR) << "malloc PoolingParameter failed."; + return nullptr; + } + memset(pooling_param, 0, sizeof(PoolingParameter)); + pooling_param->global_ = pooling_prim->global(); + pooling_param->window_w_ = pooling_prim->windowW(); + pooling_param->window_h_ = pooling_prim->windowH(); + pooling_param->pad_u_ = pooling_prim->padUp(); + pooling_param->pad_d_ = pooling_prim->padDown(); + pooling_param->pad_l_ = pooling_prim->padLeft(); + pooling_param->pad_r_ = pooling_prim->padRight(); + pooling_param->stride_w_ = pooling_prim->strideW(); + pooling_param->stride_h_ = pooling_prim->strideH(); + pooling_param->avg_mode_ = pooling_prim->avgMode(); + + auto is_global = pooling_prim->global(); + pooling_param->global_ = is_global; + auto pool_mode = pooling_prim->poolingMode(); + switch (pool_mode) { + case schema::v0::PoolMode_MAX_POOLING: + pooling_param->pool_mode_ = PoolMode_MaxPool; + pooling_param->op_parameter_.type_ = schema::PrimitiveType_MaxPoolFusion; + break; + case schema::v0::PoolMode_MEAN_POOLING: + pooling_param->pool_mode_ = PoolMode_AvgPool; + pooling_param->op_parameter_.type_ = schema::PrimitiveType_AvgPoolFusion; + break; + default: + pooling_param->pool_mode_ = PoolMode_No; + pooling_param->op_parameter_.type_ = primitive->value_type(); + break; + } + + auto round_mode = pooling_prim->roundMode(); + switch (round_mode) { + case schema::v0::RoundMode_FLOOR: + pooling_param->round_mode_ = RoundMode_Floor; + break; + case schema::v0::RoundMode_CEIL: + pooling_param->round_mode_ = RoundMode_Ceil; + break; + default: + pooling_param->round_mode_ = RoundMode_No; + break; + } + + if (pooling_prim->activationType() == schema::v0::ActivationType_RELU) { + pooling_param->act_type_ = ActType_Relu; + } else if (pooling_prim->activationType() == schema::v0::ActivationType_RELU6) { + pooling_param->act_type_ = ActType_Relu6; + } else { + pooling_param->act_type_ = ActType_No; + } + switch (pooling_prim->padMode()) { + case schema::v0::PadMode_SAME_UPPER: + pooling_param->pad_mode_ = Pad_same; + break; + case schema::v0::PadMode_VALID: + pooling_param->pad_mode_ = Pad_valid; + break; + default: + pooling_param->pad_mode_ = Pad_pad; + break; + } + return reinterpret_cast<OpParameter *>(pooling_param); +} +} // namespace + +Registry g_poolingV0ParameterRegistry(schema::v0::PrimitiveType_Pooling, PopulatePoolingParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/power_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/power_populate_v0.cc new file mode 100644 index 0000000000..b7391b0828 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/power_populate_v0.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/power_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulatePowerParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto power_prim = primitive->value_as_Power(); + PowerParameter *power_param = reinterpret_cast<PowerParameter *>(malloc(sizeof(PowerParameter))); + if (power_param == nullptr) { + MS_LOG(ERROR) << "malloc PowerParameter failed."; + return nullptr; + } + memset(power_param, 0, sizeof(PowerParameter)); + power_param->op_parameter_.type_ = schema::PrimitiveType_PowFusion; + + power_param->scale_ = power_prim->scale(); + power_param->shift_ = power_prim->shift(); + return reinterpret_cast<OpParameter *>(power_param); +} +} // namespace + +Registry g_powerV0ParameterRegistry(schema::v0::PrimitiveType_Power, PopulatePowerParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/prior_box_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/prior_box_populate_v0.cc new file mode 100644 index 0000000000..7c5c1ef78c --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/prior_box_populate_v0.cc @@ -0,0 +1,83 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/prior_box_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulatePriorBoxParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto prior_box_prim = primitive->value_as_PriorBox(); + PriorBoxParameter *prior_box_param = reinterpret_cast<PriorBoxParameter *>(malloc(sizeof(PriorBoxParameter))); + if (prior_box_param == nullptr) { + MS_LOG(ERROR) << "malloc PriorBoxParameter failed."; + return nullptr; + } + memset(prior_box_param, 0, sizeof(PriorBoxParameter)); + prior_box_param->op_parameter_.type_ = schema::PrimitiveType_PriorBox; + + if (prior_box_prim->min_sizes()->size() > MAX_SHAPE_SIZE) { + MS_LOG(ERROR) << "PriorBox min_sizes size exceeds max num " << MAX_SHAPE_SIZE << ", got " + << prior_box_prim->min_sizes(); + free(prior_box_param); + return nullptr; + } + prior_box_param->min_sizes_size = prior_box_prim->min_sizes()->size(); + if (prior_box_prim->max_sizes()->size() > MAX_SHAPE_SIZE) { + MS_LOG(ERROR) << "PriorBox max_sizes size exceeds max num " << MAX_SHAPE_SIZE << ", got " + << prior_box_prim->max_sizes(); + free(prior_box_param); + return nullptr; + } + prior_box_param->max_sizes_size = prior_box_prim->max_sizes()->size(); + memcpy(prior_box_param->max_sizes, prior_box_prim->max_sizes()->data(), + prior_box_prim->max_sizes()->size() * sizeof(int32_t)); + memcpy(prior_box_param->min_sizes, prior_box_prim->min_sizes()->data(), + prior_box_prim->min_sizes()->size() * sizeof(int32_t)); + + if (prior_box_prim->aspect_ratios()->size() > MAX_SHAPE_SIZE) { + MS_LOG(ERROR) << "PriorBox aspect_ratios size exceeds max num " << MAX_SHAPE_SIZE << ", got " + << prior_box_prim->aspect_ratios(); + free(prior_box_param); + return nullptr; + } + prior_box_param->aspect_ratios_size = prior_box_prim->aspect_ratios()->size(); + memcpy(prior_box_param->aspect_ratios, prior_box_prim->aspect_ratios()->data(), + prior_box_prim->aspect_ratios()->size() * sizeof(float)); + if (prior_box_prim->variances()->size() != COMM_SHAPE_SIZE) { + MS_LOG(ERROR) << "PriorBox variances size should be " << COMM_SHAPE_SIZE << ", got " + << prior_box_prim->variances()->size(); + free(prior_box_param); + return nullptr; + } + memcpy(prior_box_param->variances, prior_box_prim->variances()->data(), COMM_SHAPE_SIZE * sizeof(float)); + prior_box_param->flip = prior_box_prim->flip(); + prior_box_param->clip = prior_box_prim->clip(); + prior_box_param->offset = prior_box_prim->offset(); + prior_box_param->image_size_h = prior_box_prim->image_size_h(); + prior_box_param->image_size_w = prior_box_prim->image_size_w(); + prior_box_param->step_h = prior_box_prim->step_h(); + prior_box_param->step_w = prior_box_prim->step_w(); + return reinterpret_cast<OpParameter *>(prior_box_param); +} +} // namespace + +Registry g_priorBoxV0ParameterRegistry(schema::v0::PrimitiveType_PriorBox, PopulatePriorBoxParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/quant_dtype_cast_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/quant_dtype_cast_populate_v0.cc new file mode 100644 index 0000000000..b4df8a9cc5 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/quant_dtype_cast_populate_v0.cc @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/int8/quant_dtype_cast_int8.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateQuantDTypeCastParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto quant_dtype_cast_prim = primitive->value_as_QuantDTypeCast(); + QuantDTypeCastParameter *parameter = + reinterpret_cast<QuantDTypeCastParameter *>(malloc(sizeof(QuantDTypeCastParameter))); + if (parameter == nullptr) { + MS_LOG(ERROR) << "malloc QuantDTypeCastParameter failed."; + return nullptr; + } + memset(parameter, 0, sizeof(QuantDTypeCastParameter)); + parameter->op_parameter_.type_ = schema::PrimitiveType_QuantDTypeCast; + + parameter->srcT = quant_dtype_cast_prim->srcT(); + parameter->dstT = quant_dtype_cast_prim->dstT(); + return reinterpret_cast<OpParameter *>(parameter); +} +} // namespace + +Registry g_quantDTypeCastV0ParameterRegistry(schema::v0::PrimitiveType_QuantDTypeCast, PopulateQuantDTypeCastParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/range_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/range_populate_v0.cc new file mode 100644 index 0000000000..52200f24e7 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/range_populate_v0.cc @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/range_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateRangeParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto range_prim = primitive->value_as_Range(); + + RangeParameter *range_param = reinterpret_cast<RangeParameter *>(malloc(sizeof(RangeParameter))); + if (range_param == nullptr) { + MS_LOG(ERROR) << "malloc RangeParameter failed."; + return nullptr; + } + memset(range_param, 0, sizeof(RangeParameter)); + range_param->op_parameter_.type_ = schema::PrimitiveType_Range; + range_param->start_ = range_prim->start(); + range_param->limit_ = range_prim->limit(); + range_param->delta_ = range_prim->delta(); + range_param->dType_ = range_prim->dType(); + return reinterpret_cast<OpParameter *>(range_param); +} +} // namespace + +Registry g_rangeV0ParameterRegistry(schema::v0::PrimitiveType_Range, PopulateRangeParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/rank_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/rank_populate_v0.cc new file mode 100644 index 0000000000..b001f6453a --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/rank_populate_v0.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateRankParameter(const void *prim) { + OpParameter *rank_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (rank_param == nullptr) { + MS_LOG(ERROR) << "malloc RankParameter failed."; + return nullptr; + } + memset(rank_param, 0, sizeof(OpParameter)); + rank_param->type_ = schema::PrimitiveType_Rank; + return reinterpret_cast<OpParameter *>(rank_param); +} +} // namespace + +Registry g_rankV0ParameterRegistry(schema::v0::PrimitiveType_Rank, PopulateRankParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/reduce_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/reduce_populate_v0.cc new file mode 100644 index 0000000000..c23faa5299 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/reduce_populate_v0.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/reduce_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateReduceParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto reduce_prim = primitive->value_as_Reduce(); + ReduceParameter *reduce_param = reinterpret_cast<ReduceParameter *>(malloc(sizeof(ReduceParameter))); + if (reduce_param == nullptr) { + MS_LOG(ERROR) << "malloc ReduceParameter failed."; + return nullptr; + } + memset(reduce_param, 0, sizeof(ReduceParameter)); + reduce_param->op_parameter_.type_ = schema::PrimitiveType_ReduceFusion; + + reduce_param->keep_dims_ = reduce_prim->keepDims(); + reduce_param->reduce_to_end_ = reduce_prim->reduceToEnd(); + reduce_param->coeff = reduce_prim->coeff(); + auto axisVector = reduce_prim->axes(); + if (axisVector->size() > MAX_SHAPE_SIZE) { + MS_LOG(ERROR) << "Reduce axes size " << axisVector->size() << " exceed limit " << MAX_SHAPE_SIZE; + free(reduce_param); + return nullptr; + } + reduce_param->num_axes_ = static_cast<int>(axisVector->size()); + int i = 0; + for (auto iter = axisVector->begin(); iter != axisVector->end(); iter++) { + reduce_param->axes_[i++] = *iter; + } + reduce_param->mode_ = static_cast<int>(reduce_prim->mode()); + return reinterpret_cast<OpParameter *>(reduce_param); +} +} // namespace + +Registry g_reduceV0ParameterRegistry(schema::v0::PrimitiveType_Reduce, PopulateReduceParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/reshape_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/reshape_populate_v0.cc new file mode 100644 index 0000000000..9426688978 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/reshape_populate_v0.cc @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "src/common/log_adapter.h" +#include "nnacl/reshape_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateReshapeParameter(const void *prim) { + ReshapeParameter *reshape_param = reinterpret_cast<ReshapeParameter *>(malloc(sizeof(ReshapeParameter))); + if (reshape_param == nullptr) { + MS_LOG(ERROR) << "malloc ReshapeParameter failed."; + return nullptr; + } + memset(reshape_param, 0, sizeof(ReshapeParameter)); + reshape_param->op_parameter_.type_ = schema::PrimitiveType_Reshape; + return reinterpret_cast<OpParameter *>(reshape_param); +} +} // namespace + +Registry g_reshapeV0ParameterRegistry(schema::v0::PrimitiveType_Reshape, PopulateReshapeParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/resize_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/resize_populate_v0.cc new file mode 100644 index 0000000000..a759c153b7 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/resize_populate_v0.cc @@ -0,0 +1,50 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/resize_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateResizeParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto resize_prim = primitive->value_as_Resize(); + ResizeParameter *resize_param = reinterpret_cast<ResizeParameter *>(malloc(sizeof(ResizeParameter))); + if (resize_param == nullptr) { + MS_LOG(ERROR) << "malloc ResizeParameter failed."; + return nullptr; + } + memset(resize_param, 0, sizeof(ResizeParameter)); + resize_param->op_parameter_.type_ = schema::PrimitiveType_Resize; + + resize_param->method_ = static_cast<int>(resize_prim->method()); + resize_param->new_height_ = resize_prim->newHeight(); + resize_param->new_width_ = resize_prim->newWidth(); + if (resize_prim->alignCorners()) { + resize_param->coordinate_transform_mode_ = 1; + } else { + resize_param->coordinate_transform_mode_ = 0; + } + resize_param->preserve_aspect_ratio_ = resize_prim->preserveAspectRatio(); + return reinterpret_cast<OpParameter *>(resize_param); +} +} // namespace + +Registry g_resizeV0ParameterRegistry(schema::v0::PrimitiveType_Resize, PopulateResizeParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/reverse_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/reverse_populate_v0.cc new file mode 100644 index 0000000000..6f45635fd8 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/reverse_populate_v0.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/reverse_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateReverseParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto reverse_prim = primitive->value_as_Reverse(); + + ReverseParameter *reverse_param = reinterpret_cast<ReverseParameter *>(malloc(sizeof(ReverseParameter))); + if (reverse_param == nullptr) { + MS_LOG(ERROR) << "malloc ReverseParameter failed."; + return nullptr; + } + memset(reverse_param, 0, sizeof(ReverseParameter)); + reverse_param->op_parameter_.type_ = schema::PrimitiveType_ReverseV2; + auto flatAxis = reverse_prim->axis(); + reverse_param->num_axis_ = flatAxis->size(); + int i = 0; + for (auto iter = flatAxis->begin(); iter != flatAxis->end(); iter++) { + reverse_param->axis_[i++] = *iter; + } + return reinterpret_cast<OpParameter *>(reverse_param); +} +} // namespace + +Registry g_reverseV0ParameterRegistry(schema::v0::PrimitiveType_Reverse, PopulateReverseParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/reverse_sequence_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/reverse_sequence_populate_v0.cc new file mode 100644 index 0000000000..f4782daea7 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/reverse_sequence_populate_v0.cc @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/reverse_sequence_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateReverseSequenceParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto reverse_sequence_prim = primitive->value_as_ReverseSequence(); + ReverseSequenceParameter *reverse_sequence_param = + reinterpret_cast<ReverseSequenceParameter *>(malloc(sizeof(ReverseSequenceParameter))); + if (reverse_sequence_param == nullptr) { + MS_LOG(ERROR) << "malloc ReverseSequenceParameter failed."; + return nullptr; + } + memset(reverse_sequence_param, 0, sizeof(ReverseSequenceParameter)); + + reverse_sequence_param->op_parameter_.type_ = schema::PrimitiveType_ReverseSequence; + reverse_sequence_param->seq_axis_ = reverse_sequence_prim->seqAxis(); + reverse_sequence_param->batch_axis_ = reverse_sequence_prim->batchAxis(); + return reinterpret_cast<OpParameter *>(reverse_sequence_param); +} +} // namespace + +Registry g_reverseSequenceV0ParameterRegistry(schema::v0::PrimitiveType_ReverseSequence, + PopulateReverseSequenceParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/roi_pooling_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/roi_pooling_populate_v0.cc new file mode 100644 index 0000000000..59d07c90d2 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/roi_pooling_populate_v0.cc @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/roi_pooling_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateROIPoolingParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto roi_pooling_prim = primitive->value_as_ROIPooling(); + + ROIPoolingParameter *roi_pooling_param = reinterpret_cast<ROIPoolingParameter *>(malloc(sizeof(ROIPoolingParameter))); + if (roi_pooling_param == nullptr) { + MS_LOG(ERROR) << "malloc ROIPoolingParameter failed."; + return nullptr; + } + memset(roi_pooling_param, 0, sizeof(ROIPoolingParameter)); + roi_pooling_param->op_parameter_.type_ = schema::PrimitiveType_ROIPooling; + roi_pooling_param->pooledH_ = roi_pooling_prim->pooledH(); + roi_pooling_param->pooledW_ = roi_pooling_prim->pooledW(); // note: origin is pooledH + roi_pooling_param->scale_ = roi_pooling_prim->scale(); + return reinterpret_cast<OpParameter *>(roi_pooling_param); +} +} // namespace + +Registry g_ROIPoolingV0ParameterRegistry(schema::v0::PrimitiveType_ROIPooling, PopulateROIPoolingParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/scale_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/scale_populate_v0.cc new file mode 100644 index 0000000000..59d778a101 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/scale_populate_v0.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/scale.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateScaleParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto scale_prim = primitive->value_as_Scale(); + if (primitive == nullptr) { + MS_LOG(ERROR) << "input primitive is nullptr"; + return nullptr; + } + ScaleParameter *scale_param = reinterpret_cast<ScaleParameter *>(malloc(sizeof(ScaleParameter))); + if (scale_param == nullptr) { + MS_LOG(ERROR) << "malloc ScaleParameter failed."; + return nullptr; + } + memset(scale_param, 0, sizeof(ScaleParameter)); + scale_param->op_parameter_.type_ = schema::PrimitiveType_ScaleFusion; + + scale_param->axis_ = scale_prim->axis(); + scale_param->activation_type_ = scale_prim->activationType(); + return reinterpret_cast<OpParameter *>(scale_param); +} +} // namespace + +Registry g_scaleV0ParameterRegistry(schema::v0::PrimitiveType_Scale, PopulateScaleParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/scatter_nd_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/scatter_nd_populate_v0.cc new file mode 100644 index 0000000000..5222f6d485 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/scatter_nd_populate_v0.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateScatterNDParameter(const void *prim) { + OpParameter *scatter_nd_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (scatter_nd_param == nullptr) { + MS_LOG(ERROR) << "malloc ScatterNDParameter failed."; + return nullptr; + } + memset(scatter_nd_param, 0, sizeof(OpParameter)); + scatter_nd_param->type_ = schema::PrimitiveType_ScatterNd; + return reinterpret_cast<OpParameter *>(scatter_nd_param); +} +} // namespace + +Registry g_scatterNDV0ParameterRegistry(schema::v0::PrimitiveType_ScatterND, PopulateScatterNDParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/shape_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/shape_populate_v0.cc new file mode 100644 index 0000000000..6e32434d26 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/shape_populate_v0.cc @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "src/common/log_adapter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateShapeParameter(const void *prim) { + OpParameter *shape_param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (shape_param == nullptr) { + MS_LOG(ERROR) << "malloc ShapeParameter failed."; + return nullptr; + } + memset(shape_param, 0, sizeof(OpParameter)); + shape_param->type_ = schema::PrimitiveType_Shape; + return reinterpret_cast<OpParameter *>(shape_param); +} +} // namespace + +Registry g_shapeV0ParameterRegistry(schema::v0::PrimitiveType_Shape, PopulateShapeParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/skip_gram_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/skip_gram_populate_v0.cc new file mode 100644 index 0000000000..5d8d7988a8 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/skip_gram_populate_v0.cc @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/skip_gram_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateSkipGramParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto skip_gram_prim = primitive->value_as_SkipGram(); + SkipGramParameter *skipGramParameter = reinterpret_cast<SkipGramParameter *>(malloc(sizeof(SkipGramParameter))); + if (skipGramParameter == nullptr) { + MS_LOG(ERROR) << "malloc SkipGramParameter failed."; + return nullptr; + } + memset(skipGramParameter, 0, sizeof(SkipGramParameter)); + skipGramParameter->op_parameter_.type_ = schema::PrimitiveType_SkipGram; + + skipGramParameter->ngram_size = skip_gram_prim->ngramSize(); + skipGramParameter->max_skip_size = skip_gram_prim->maxSkipSize(); + skipGramParameter->include_all_ngrams = skip_gram_prim->includeAllGrams(); + return reinterpret_cast<OpParameter *>(skipGramParameter); +} +} // namespace + +Registry g_skipGramV0ParameterRegistry(schema::v0::PrimitiveType_SkipGram, PopulateSkipGramParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/slice_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/slice_populate_v0.cc new file mode 100644 index 0000000000..0d2742d211 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/slice_populate_v0.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/slice_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateSliceParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto slice_prim = primitive->value_as_Slice(); + SliceParameter *slice_param = reinterpret_cast<SliceParameter *>(malloc(sizeof(SliceParameter))); + if (slice_param == nullptr) { + MS_LOG(ERROR) << "malloc SliceParameter failed."; + return nullptr; + } + memset(slice_param, 0, sizeof(SliceParameter)); + + slice_param->op_parameter_.type_ = schema::PrimitiveType_SliceFusion; + auto param_begin = slice_prim->begin(); + auto param_size = slice_prim->size(); + auto param_axis = slice_prim->axes(); + if (param_begin->size() != param_size->size() || param_begin->size() != param_axis->size()) { + free(slice_param); + return nullptr; + } + + slice_param->param_length_ = static_cast<int32_t>(param_begin->size()); + for (int32_t i = 0; i < slice_param->param_length_; ++i) { + slice_param->begin_[i] = *(param_begin->begin() + i); + slice_param->size_[i] = *(param_size->begin() + i); + slice_param->axis_[i] = *(param_axis->begin() + i); + } + + return reinterpret_cast<OpParameter *>(slice_param); +} +} // namespace + +Registry g_sliceV0ParameterRegistry(schema::v0::PrimitiveType_Slice, PopulateSliceParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/softmax_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/softmax_populate_v0.cc new file mode 100644 index 0000000000..390ddaeb39 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/softmax_populate_v0.cc @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/softmax_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateSoftmaxParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto softmax_prim = primitive->value_as_SoftMax(); + + SoftmaxParameter *softmax_param = reinterpret_cast<SoftmaxParameter *>(malloc(sizeof(SoftmaxParameter))); + if (softmax_param == nullptr) { + MS_LOG(ERROR) << "malloc SoftmaxParameter failed."; + return nullptr; + } + memset(softmax_param, 0, sizeof(SoftmaxParameter)); + softmax_param->op_parameter_.type_ = schema::PrimitiveType_Softmax; + softmax_param->axis_ = softmax_prim->axis(); + return reinterpret_cast<OpParameter *>(softmax_param); +} +} // namespace + +Registry g_softMaxV0ParameterRegistry(schema::v0::PrimitiveType_SoftMax, PopulateSoftmaxParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/space_to_batch_nd_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/space_to_batch_nd_populate_v0.cc new file mode 100644 index 0000000000..81bacedd49 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/space_to_batch_nd_populate_v0.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/space_to_batch_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateSpaceToBatchNDParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto space_to_batch_nd_prim = primitive->value_as_SpaceToBatchND(); + auto *space_batch_param_nd = reinterpret_cast<SpaceToBatchParameter *>(malloc(sizeof(SpaceToBatchParameter))); + if (space_batch_param_nd == nullptr) { + MS_LOG(ERROR) << "malloc SpaceToBatchParameter failed."; + return nullptr; + } + + space_batch_param_nd->op_parameter_.type_ = schema::PrimitiveType_SpaceToBatchND; + auto block_sizes = space_to_batch_nd_prim->blockShape(); + space_batch_param_nd->m_ = block_sizes->size(); + if (((size_t)block_sizes->size()) > std::numeric_limits<size_t>::max() / sizeof(int)) { + MS_LOG(ERROR) << "The value of block_sizes.size() is too big"; + free(space_batch_param_nd); + return nullptr; + } + memcpy(space_batch_param_nd->block_sizes_, (block_sizes->data()), block_sizes->size() * sizeof(int)); + auto paddings = space_to_batch_nd_prim->paddings(); + if (((size_t)paddings->size()) > std::numeric_limits<size_t>::max() / sizeof(int)) { + MS_LOG(ERROR) << "The value of paddings.size() is too big"; + free(space_batch_param_nd); + return nullptr; + } + memcpy(space_batch_param_nd->paddings_, (paddings->data()), paddings->size() * sizeof(int)); + return reinterpret_cast<OpParameter *>(space_batch_param_nd); +} +} // namespace + +Registry g_SpaceToBatchNDV0ParameterRegistry(schema::v0::PrimitiveType_SpaceToBatchND, PopulateSpaceToBatchNDParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/space_to_batch_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/space_to_batch_populate_v0.cc new file mode 100644 index 0000000000..c01154da13 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/space_to_batch_populate_v0.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/space_to_batch_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateSpaceToBatchParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto space_to_batch_prim = primitive->value_as_SpaceToBatch(); + SpaceToBatchParameter *space_batch_param = + reinterpret_cast<SpaceToBatchParameter *>(malloc(sizeof(SpaceToBatchParameter))); + if (space_batch_param == nullptr) { + MS_LOG(ERROR) << "malloc SpaceToBatchParameter failed."; + return nullptr; + } + memset(space_batch_param, 0, sizeof(SpaceToBatchParameter)); + space_batch_param->op_parameter_.type_ = schema::PrimitiveType_SpaceToBatch; + auto block_sizes = space_to_batch_prim->blockShape(); // maybe error + space_batch_param->m_ = block_sizes->size(); + if (((size_t)block_sizes->size()) > std::numeric_limits<size_t>::max() / sizeof(int)) { + MS_LOG(ERROR) << "The value of block_sizes.size() is too big"; + free(space_batch_param); + return nullptr; + } + memcpy(space_batch_param->block_sizes_, (block_sizes->data()), block_sizes->size() * sizeof(int)); + auto paddings = space_to_batch_prim->paddings(); + if (((size_t)paddings->size()) > std::numeric_limits<size_t>::max() / sizeof(int)) { + MS_LOG(ERROR) << "The value of paddings.size() is too big"; + free(space_batch_param); + return nullptr; + } + memcpy(space_batch_param->paddings_, (paddings->data()), paddings->size() * sizeof(int)); + + space_batch_param->m_ = space_to_batch_prim->blockShape()->size(); + for (int i = 0; i < space_batch_param->m_; i++) { + space_batch_param->block_sizes_[i] = space_to_batch_prim->blockShape()->data()[i]; + } + + return reinterpret_cast<OpParameter *>(space_batch_param); +} +} // namespace + +Registry g_spaceToBatchV0ParameterRegistry(schema::v0::PrimitiveType_SpaceToBatch, PopulateSpaceToBatchParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/space_to_depth_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/space_to_depth_populate_v0.cc new file mode 100644 index 0000000000..9306c97fc8 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/space_to_depth_populate_v0.cc @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/space_to_depth_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateSpaceToDepthParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto space_to_depth_prim = primitive->value_as_SpaceToDepth(); + SpaceToDepthParameter *space_depth_param = + reinterpret_cast<SpaceToDepthParameter *>(malloc(sizeof(SpaceToDepthParameter))); + if (space_depth_param == nullptr) { + MS_LOG(ERROR) << "malloc SpaceToDepthParameter failed."; + return nullptr; + } + memset(space_depth_param, 0, sizeof(SpaceToDepthParameter)); + space_depth_param->op_parameter_.type_ = schema::PrimitiveType_SpaceToDepth; + space_depth_param->block_size_ = space_to_depth_prim->blockSize(); + if (space_to_depth_prim->format() != schema::v0::Format::Format_NHWC) { + MS_LOG(ERROR) << "Currently only NHWC format is supported."; + free(space_depth_param); + return nullptr; + } + return reinterpret_cast<OpParameter *>(space_depth_param); +} +} // namespace + +Registry g_spaceToDepthV0ParameterRegistry(schema::v0::PrimitiveType_SpaceToDepth, PopulateSpaceToDepthParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/sparse_to_dense_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/sparse_to_dense_populate_v0.cc new file mode 100644 index 0000000000..dca503b095 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/sparse_to_dense_populate_v0.cc @@ -0,0 +1,39 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/sparse_to_dense_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateSparseToDenseParameter(const void *prim) { + auto *sparse_to_dense_param = reinterpret_cast<SparseToDenseParameter *>(malloc(sizeof(SparseToDenseParameter))); + if (sparse_to_dense_param == nullptr) { + MS_LOG(ERROR) << "malloc SparseToDenseParameter failed."; + return nullptr; + } + memset(sparse_to_dense_param, 0, sizeof(SparseToDenseParameter)); + sparse_to_dense_param->op_parameter_.type_ = schema::PrimitiveType_SparseToDense; + return reinterpret_cast<OpParameter *>(sparse_to_dense_param); +} +} // namespace + +Registry g_sparseToDenseV0ParameterRegistry(schema::v0::PrimitiveType_SparseToDense, PopulateSparseToDenseParameter, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/split_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/split_populate_v0.cc new file mode 100644 index 0000000000..9a619deb21 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/split_populate_v0.cc @@ -0,0 +1,64 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/split_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateSplitParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto split_prim = primitive->value_as_Split(); + auto *split_param = reinterpret_cast<SplitParameter *>(malloc(sizeof(SplitParameter))); + if (split_param == nullptr) { + MS_LOG(ERROR) << "malloc SplitParameter failed."; + return nullptr; + } + memset(split_param, 0, sizeof(SplitParameter)); + split_param->op_parameter_.type_ = schema::PrimitiveType_Split; + split_param->num_split_ = split_prim->numberSplit(); + if (split_param->num_split_ > std::numeric_limits<int>::max() / static_cast<int>(sizeof(int))) { + MS_LOG(ERROR) << "The value of split_param->num_split_ is too big"; + return nullptr; + } + int *split_sizes = reinterpret_cast<int *>(malloc(split_param->num_split_ * sizeof(int))); + if (split_sizes == nullptr) { + MS_LOG(ERROR) << "malloc split size of SplitParameter failed."; + free(split_param); + return nullptr; + } + memset(split_sizes, 0, split_param->num_split_ * sizeof(int)); + split_param->split_sizes_ = split_sizes; + auto split_sizes_vector_ = split_prim->sizeSplits(); + if (split_sizes_vector_ != NULL) { + int i = 0; + for (auto iter = split_sizes_vector_->begin(); iter != split_sizes_vector_->end(); iter++) { + split_param->split_sizes_[i++] = *iter; + } + split_param->split_count_ = split_param->num_split_; + } else { + split_param->split_count_ = 0; + } + split_param->split_dim_ = split_prim->splitDim(); + return reinterpret_cast<OpParameter *>(split_param); +} +} // namespace + +Registry g_splitV0ParameterRegistry(schema::v0::PrimitiveType_Split, PopulateSplitParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/squared_difference_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/squared_difference_populate_v0.cc new file mode 100644 index 0000000000..6d50809c76 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/squared_difference_populate_v0.cc @@ -0,0 +1,40 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/arithmetic.h" +#include "src/ops/populate/v0/arithmetic_populate_v0.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateSquaredDifferenceParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + ArithmeticParameter *param = PopulateArithmeticV0CommonPara(primitive); + if (param == nullptr) { + MS_LOG(ERROR) << "PopulateArithmeticCommonPara failed."; + return nullptr; + } + param->op_parameter_.type_ = schema::PrimitiveType_SquaredDifference; + return reinterpret_cast<OpParameter *>(param); +} +} // namespace + +Registry g_squaredDifferenceV0ParameterRegistry(schema::v0::PrimitiveType_SquaredDifference, + PopulateSquaredDifferenceParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/squeeze_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/squeeze_populate_v0.cc new file mode 100644 index 0000000000..154ff6bd7c --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/squeeze_populate_v0.cc @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/squeeze_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateSqueezeParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto squeeze_prim = primitive->value_as_Squeeze(); + SqueezeParameter *squeeze_param = reinterpret_cast<SqueezeParameter *>(malloc(sizeof(SqueezeParameter))); + if (squeeze_param == nullptr) { + MS_LOG(ERROR) << "malloc SqueezeParameter failed."; + return nullptr; + } + memset(squeeze_param, 0, sizeof(SqueezeParameter)); + squeeze_param->op_parameter_.type_ = schema::PrimitiveType_Squeeze; + if (squeeze_prim->axis() != nullptr) { + squeeze_param->axis_size_ = squeeze_prim->axis()->size(); + for (size_t i = 0; i < squeeze_param->axis_size_; i++) { + squeeze_param->axis_[i] = *(squeeze_prim->axis()->begin() + i); + } + } else { + squeeze_param->axis_size_ = 0; + } + + return reinterpret_cast<OpParameter *>(squeeze_param); +} +} // namespace + +Registry g_squeezeV0ParameterRegistry(schema::v0::PrimitiveType_Squeeze, PopulateSqueezeParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/stack_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/stack_populate_v0.cc new file mode 100644 index 0000000000..1c11ce9a3c --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/stack_populate_v0.cc @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/stack_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateStackParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto stack_prim = primitive->value_as_Stack(); + StackParameter *stack_param = reinterpret_cast<StackParameter *>(malloc(sizeof(StackParameter))); + if (stack_param == nullptr) { + MS_LOG(ERROR) << "malloc StackParameter failed."; + return nullptr; + } + memset(stack_param, 0, sizeof(StackParameter)); + + stack_param->op_parameter_.type_ = schema::PrimitiveType_Stack; + stack_param->axis_ = stack_prim->axis(); + return reinterpret_cast<OpParameter *>(stack_param); +} +} // namespace + +Registry g_stackV0ParameterRegistry(schema::v0::PrimitiveType_Stack, PopulateStackParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/strided_slice_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/strided_slice_populate_v0.cc new file mode 100644 index 0000000000..933a5ce02c --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/strided_slice_populate_v0.cc @@ -0,0 +1,75 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/ops/populate/v0/strided_slice_populate_v0.h" +#include <limits> +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/strided_slice_parameter.h" + +namespace mindspore { +namespace lite { +OpParameter *PopulateStridedSliceParameterV0(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto strided_slice_prim = primitive->value_as_StridedSlice(); + StridedSliceParameter *strided_slice_param = + reinterpret_cast<StridedSliceParameter *>(malloc(sizeof(StridedSliceParameter))); + if (strided_slice_param == nullptr) { + MS_LOG(ERROR) << "malloc StridedSliceParameter failed."; + return nullptr; + } + memset(strided_slice_param, 0, sizeof(StridedSliceParameter)); + strided_slice_param->op_parameter_.type_ = schema::PrimitiveType_StridedSlice; + + auto begin = strided_slice_prim->begin(); + if (begin != nullptr) { + if (((size_t)begin->size()) > std::numeric_limits<size_t>::max() / sizeof(int)) { + MS_LOG(ERROR) << "The value of begin.size() is too big"; + free(strided_slice_param); + return nullptr; + } + memcpy(strided_slice_param->begins_, (begin->data()), begin->size() * sizeof(int)); + } + auto end = strided_slice_prim->end(); + if (end != nullptr) { + if (((size_t)end->size()) > std::numeric_limits<size_t>::max() / sizeof(int)) { + MS_LOG(ERROR) << "The value of end.size() is too big"; + free(strided_slice_param); + return nullptr; + } + memcpy(strided_slice_param->ends_, (end->data()), end->size() * sizeof(int)); + } + auto stride = strided_slice_prim->stride(); + if (stride != nullptr) { + if (((size_t)stride->size()) > std::numeric_limits<size_t>::max() / sizeof(int)) { + MS_LOG(ERROR) << "The value of stride.size() is too big"; + free(strided_slice_param); + return nullptr; + } + memcpy(strided_slice_param->strides_, (stride->data()), stride->size() * sizeof(int)); + } + strided_slice_param->begins_mask_ = strided_slice_prim->beginMask(); + strided_slice_param->ends_mask_ = strided_slice_prim->endMask(); + strided_slice_param->ellipsisMask_ = strided_slice_prim->ellipsisMask(); + strided_slice_param->newAxisMask_ = strided_slice_prim->newAxisMask(); + strided_slice_param->shrinkAxisMask_ = strided_slice_prim->shrinkAxisMask(); + + return reinterpret_cast<OpParameter *>(strided_slice_param); +} + +Registry g_stridedSliceV0ParameterRegistry(schema::v0::PrimitiveType_StridedSlice, PopulateStridedSliceParameterV0, + SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/strided_slice_populate_v0.h b/mindspore/lite/src/ops/populate/v0/strided_slice_populate_v0.h new file mode 100644 index 0000000000..6a2e89ac0a --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/strided_slice_populate_v0.h @@ -0,0 +1,28 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_OPS_POPULATE_STRIDED_SLICE_POPULATE_H_ +#define MINDSPORE_LITE_SRC_OPS_POPULATE_STRIDED_SLICE_POPULATE_H_ + +#include "nnacl/strided_slice_parameter.h" + +namespace mindspore { +namespace lite { +OpParameter *PopulateStridedSliceParameterV0(const void *prim); + +} // namespace lite +} // namespace mindspore +#endif // MINDSPORE_LITE_SRC_OPS_POPULATE_STRIDED_SLICE_POPULATE_H_ diff --git a/mindspore/lite/src/ops/populate/v0/sub_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/sub_populate_v0.cc new file mode 100644 index 0000000000..8694f24641 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/sub_populate_v0.cc @@ -0,0 +1,41 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/arithmetic.h" +#include "src/ops/populate/arithmetic_populate.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateSubParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto sub_prim = primitive->value_as_Sub(); + ArithmeticParameter *param = PopulateArithmeticCommonPara(primitive); + if (param == nullptr) { + MS_LOG(ERROR) << "PopulateArithmeticCommonPara failed."; + return nullptr; + } + param->op_parameter_.type_ = schema::PrimitiveType_SubFusion; // note: maybe error noneed? + param->activation_type_ = sub_prim->activationType(); + return reinterpret_cast<OpParameter *>(param); +} +} // namespace + +Registry g_subV0ParameterRegistry(schema::v0::PrimitiveType_Sub, PopulateSubParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/switch_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/switch_populate_v0.cc new file mode 100644 index 0000000000..3cda18f091 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/switch_populate_v0.cc @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateSwitchParameter(const void *prim) { + OpParameter *switch_parameter = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (switch_parameter == nullptr) { + MS_LOG(ERROR) << "malloc SwitchParameter failed."; + return nullptr; + } + memset(switch_parameter, 0, sizeof(OpParameter)); + switch_parameter->type_ = schema::PrimitiveType_Switch; + + return reinterpret_cast<OpParameter *>(switch_parameter); +} +} // namespace + +Registry g_switchv0ParameterRegistry(schema::v0::PrimitiveType_Switch, PopulateSwitchParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/tensorlistfromtensor_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/tensorlistfromtensor_populate_v0.cc new file mode 100644 index 0000000000..b806fe45ee --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/tensorlistfromtensor_populate_v0.cc @@ -0,0 +1,44 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "nnacl/tensorlist_parameter.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateTensorListFromTensorParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto tensorList = primitive->value_as_TensorListFromTensor(); + TensorListParameter *TensorList_param = reinterpret_cast<TensorListParameter *>(malloc(sizeof(TensorListParameter))); + if (TensorList_param == nullptr) { + MS_LOG(ERROR) << "malloc TensorListParameter failed."; + return nullptr; + } + memset(TensorList_param, 0, sizeof(TensorListParameter)); + TensorList_param->op_parameter_.type_ = schema::PrimitiveType_TensorListFromTensor; + TensorList_param->shape_type_ = (TypeId)(tensorList->shapeType()); + TensorList_param->element_dtype_ = (TypeId)(tensorList->elementDType()); + return reinterpret_cast<OpParameter *>(TensorList_param); +} +} // namespace + +Registry g_tensorListFromTensorV0ParameterRegistry(schema::v0::PrimitiveType_TensorListFromTensor, + PopulateTensorListFromTensorParameter, SCHEMA_V0); + +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/tensorlistgetitem_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/tensorlistgetitem_populate_v0.cc new file mode 100644 index 0000000000..7722fd50ef --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/tensorlistgetitem_populate_v0.cc @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/tensorlist_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateTensorListGetItemParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto tensorList_prim = primitive->value_as_TensorListGetItem(); + TensorListParameter *getItem_param = reinterpret_cast<TensorListParameter *>(malloc(sizeof(TensorListParameter))); + if (getItem_param == nullptr) { + MS_LOG(ERROR) << "malloc TensorListParameter failed."; + return nullptr; + } + memset(getItem_param, 0, sizeof(TensorListParameter)); + getItem_param->op_parameter_.type_ = schema::PrimitiveType_TensorListGetItem; + getItem_param->element_dtype_ = (TypeId)tensorList_prim->elementDType(); + return reinterpret_cast<OpParameter *>(getItem_param); +} +} // namespace + +Registry g_tensorListGetItemV0ParameterRegistry(schema::v0::PrimitiveType_TensorListGetItem, + PopulateTensorListGetItemParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/tensorlistreserve_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/tensorlistreserve_populate_v0.cc new file mode 100644 index 0000000000..bc311f0f74 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/tensorlistreserve_populate_v0.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/tensorlist_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateTensorListReserveParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto tensorList_prim = primitive->value_as_TensorListReserve(); + TensorListParameter *reserve_param = reinterpret_cast<TensorListParameter *>(malloc(sizeof(TensorListParameter))); + if (reserve_param == nullptr) { + MS_LOG(ERROR) << "malloc TensorListParameter failed."; + return nullptr; + } + memset(reserve_param, 0, sizeof(TensorListParameter)); + reserve_param->op_parameter_.type_ = schema::PrimitiveType_TensorListReserve; + reserve_param->element_dtype_ = (TypeId)tensorList_prim->elementDType(); + return reinterpret_cast<OpParameter *>(reserve_param); +} +} // namespace + +Registry g_tensorListReserveV0ParameterRegistry(schema::v0::PrimitiveType_TensorListReserve, + PopulateTensorListReserveParameter, SCHEMA_V0); + +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/tensorlistsetlitem_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/tensorlistsetlitem_populate_v0.cc new file mode 100644 index 0000000000..7f947aae1b --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/tensorlistsetlitem_populate_v0.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/tensorlist_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateTensorListSetItemParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto tensorList_prim = primitive->value_as_TensorListSetItem(); + TensorListParameter *setItem_param = reinterpret_cast<TensorListParameter *>(malloc(sizeof(TensorListParameter))); + if (setItem_param == nullptr) { + MS_LOG(ERROR) << "malloc TensorListParameter failed."; + return nullptr; + } + memset(setItem_param, 0, sizeof(TensorListParameter)); + setItem_param->op_parameter_.type_ = schema::PrimitiveType_TensorListSetItem; + setItem_param->element_dtype_ = (TypeId)tensorList_prim->elementDType(); + return reinterpret_cast<OpParameter *>(setItem_param); +} +} // namespace + +Registry g_tensorListSetItemV0ParameterRegistry(schema::v0::PrimitiveType_TensorListSetItem, + PopulateTensorListSetItemParameter, SCHEMA_V0); + +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/tensorliststack_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/tensorliststack_populate_v0.cc new file mode 100644 index 0000000000..cf5495efa4 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/tensorliststack_populate_v0.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/tensorlist_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateTensorListStackParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto tensorList_prim = primitive->value_as_TensorListStack(); + TensorListParameter *stack_param = reinterpret_cast<TensorListParameter *>(malloc(sizeof(TensorListParameter))); + if (stack_param == nullptr) { + MS_LOG(ERROR) << "malloc TensorListParameter failed."; + return nullptr; + } + memset(stack_param, 0, sizeof(TensorListParameter)); + stack_param->op_parameter_.type_ = schema::PrimitiveType_TensorListStack; + stack_param->element_dtype_ = (TypeId)tensorList_prim->elementDType(); + stack_param->num_element_ = tensorList_prim->numElements(); + return reinterpret_cast<OpParameter *>(stack_param); +} +} // namespace + +Registry g_tensorListStackV0ParameterRegistry(schema::v0::PrimitiveType_TensorListStack, + PopulateTensorListStackParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/tile_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/tile_populate_v0.cc new file mode 100644 index 0000000000..1fba50856f --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/tile_populate_v0.cc @@ -0,0 +1,58 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/base/tile_base.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateTileParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto tile_prim = primitive->value_as_Tile(); + TileParameter *tile_param = reinterpret_cast<TileParameter *>(malloc(sizeof(TileParameter))); + if (tile_param == nullptr) { + MS_LOG(ERROR) << "malloc TileParameter failed."; + return nullptr; + } + memset(tile_param, 0, sizeof(TileParameter)); + tile_param->op_parameter_.type_ = schema::PrimitiveType_TileFusion; +#ifdef SUPPORT_TRAIN + auto multiples = tile_prim->multiples(); + tile_param->in_dim_ = multiples->size(); + for (int i = 0; i < tile_param->in_dim_; ++i) { + tile_param->multiples_[i] = *(multiples->begin() + i); + } +#else + if (tile_prim->dims() != nullptr) { + auto dims = tile_prim->dims(); + if (dims != nullptr) { + for (size_t i = 0; i < dims->size(); i++) { + tile_param->dims_[i] = static_cast<int>(dims->Get(i)); + } + } + tile_param->dims_size_ = tile_prim->dims()->size(); + } + +#endif + return reinterpret_cast<OpParameter *>(tile_param); +} +} // namespace + +Registry g_tileV0ParameterRegistry(schema::v0::PrimitiveType_Tile, PopulateTileParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/topk_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/topk_populate_v0.cc new file mode 100644 index 0000000000..f87f03e590 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/topk_populate_v0.cc @@ -0,0 +1,43 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/topk_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateTopKParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto topk_prim = primitive->value_as_TopK(); + TopkParameter *topk_param = reinterpret_cast<TopkParameter *>(malloc(sizeof(TopkParameter))); + if (topk_param == nullptr) { + MS_LOG(ERROR) << "malloc TopkParameter failed."; + return nullptr; + } + memset(topk_param, 0, sizeof(TopkParameter)); + topk_param->op_parameter_.type_ = schema::PrimitiveType_TopKFusion; + + topk_param->k_ = topk_prim->k(); + topk_param->sorted_ = topk_prim->sorted(); + return reinterpret_cast<OpParameter *>(topk_param); +} +} // namespace + +Registry g_topKV0ParameterRegistry(schema::v0::PrimitiveType_TopK, PopulateTopKParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/transpose_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/transpose_populate_v0.cc new file mode 100644 index 0000000000..defa29b26c --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/transpose_populate_v0.cc @@ -0,0 +1,49 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/transpose.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateTransposeParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto transpose_prim = primitive->value_as_Transpose(); + TransposeParameter *transpose_param = reinterpret_cast<TransposeParameter *>(malloc(sizeof(TransposeParameter))); + if (transpose_param == nullptr) { + MS_LOG(ERROR) << "malloc TransposeParameter failed."; + return nullptr; + } + memset(transpose_param, 0, sizeof(TransposeParameter)); + + transpose_param->op_parameter_.type_ = schema::PrimitiveType_Transpose; + auto perm_vector_ = transpose_prim->perm(); + int i = 0; + for (auto iter = perm_vector_->begin(); iter != perm_vector_->end(); iter++) { + transpose_param->perm_[i++] = *iter; + } + transpose_param->num_axes_ = i; + transpose_param->perm_size_ = transpose_prim->perm()->size(); + + return reinterpret_cast<OpParameter *>(transpose_param); +} +} // namespace + +Registry g_transposeV0ParameterRegistry(schema::v0::PrimitiveType_Transpose, PopulateTransposeParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/unique_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/unique_populate_v0.cc new file mode 100644 index 0000000000..f7819fa84d --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/unique_populate_v0.cc @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/fp32/unique_fp32.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateUniqueParameter(const void *prim) { + UniqueParameter *unique_param = reinterpret_cast<UniqueParameter *>(malloc(sizeof(UniqueParameter))); + if (unique_param == nullptr) { + MS_LOG(ERROR) << "malloc UniqueParameter failed."; + return nullptr; + } + memset(unique_param, 0, sizeof(UniqueParameter)); + unique_param->op_parameter_.type_ = schema::PrimitiveType_Unique; + return reinterpret_cast<OpParameter *>(unique_param); +} +} // namespace + +Registry g_uniqueV0ParameterRegistry(schema::v0::PrimitiveType_Unique, PopulateUniqueParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/unsorted_segment_sum_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/unsorted_segment_sum_populate_v0.cc new file mode 100644 index 0000000000..27881842f0 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/unsorted_segment_sum_populate_v0.cc @@ -0,0 +1,38 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateUnsortedSegmentSumParameter(const void *prim) { + OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "malloc UnsortedSegmentSum Parameter failed."; + return nullptr; + } + memset(param, 0, sizeof(OpParameter)); + param->type_ = schema::PrimitiveType_UnsortedSegmentSum; + return param; +} +} // namespace + +Registry g_unsortedSegmentSumV0ParameterRegistry(schema::v0::PrimitiveType_UnsortedSegmentSum, + PopulateUnsortedSegmentSumParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/unsqueeze_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/unsqueeze_populate_v0.cc new file mode 100644 index 0000000000..93ea17f0c6 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/unsqueeze_populate_v0.cc @@ -0,0 +1,47 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/unsqueeze_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateUnsqueezeParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto unsqueeze_prim = primitive->value_as_Unsqueeze(); + + UnSqueezeParameter *unsqueeze_param = reinterpret_cast<UnSqueezeParameter *>(malloc(sizeof(UnSqueezeParameter))); + if (unsqueeze_param == nullptr) { + MS_LOG(ERROR) << "malloc UnSqueezeParameter failed."; + return nullptr; + } + memset(unsqueeze_param, 0, sizeof(UnSqueezeParameter)); + unsqueeze_param->op_parameter_.type_ = schema::PrimitiveType_Unsqueeze; + auto flatAxis = unsqueeze_prim->axis(); + unsqueeze_param->num_dim_ = flatAxis->size(); + int i = 0; + for (auto iter = flatAxis->begin(); iter != flatAxis->end(); iter++) { + unsqueeze_param->dims_[i++] = *iter; + } + return reinterpret_cast<OpParameter *>(unsqueeze_param); +} +} // namespace + +Registry g_unsqueezeV0ParameterRegistry(schema::v0::PrimitiveType_Unsqueeze, PopulateUnsqueezeParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/unstack_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/unstack_populate_v0.cc new file mode 100644 index 0000000000..80dbfa3254 --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/unstack_populate_v0.cc @@ -0,0 +1,42 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" +#include "nnacl/unstack_parameter.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateUnstackParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto unstack_prim = primitive->value_as_Unstack(); + UnstackParameter *unstack_param = reinterpret_cast<UnstackParameter *>(malloc(sizeof(UnstackParameter))); + if (unstack_param == nullptr) { + MS_LOG(ERROR) << "malloc UnstackParameter failed."; + return nullptr; + } + memset(unstack_param, 0, sizeof(UnstackParameter)); + + unstack_param->op_parameter_.type_ = schema::PrimitiveType_Unstack; + unstack_param->axis_ = unstack_prim->axis(); + return reinterpret_cast<OpParameter *>(unstack_param); +} +} // namespace + +Registry g_unstackV0ParameterRegistry(schema::v0::PrimitiveType_Unstack, PopulateUnstackParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/where_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/where_populate_v0.cc new file mode 100644 index 0000000000..77e3035d9a --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/where_populate_v0.cc @@ -0,0 +1,37 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +OpParameter *PopulateWhereParameter(const void *prim) { + OpParameter *where_parameter = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (where_parameter == nullptr) { + MS_LOG(ERROR) << "malloc Where parameter failed."; + return nullptr; + } + memset(where_parameter, 0, sizeof(OpParameter)); + where_parameter->type_ = schema::PrimitiveType_Where; + return reinterpret_cast<OpParameter *>(where_parameter); +} +} // namespace + +Registry g_whereV0ParameterRegistry(schema::v0::PrimitiveType_Where, PopulateWhereParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/v0/while_populate_v0.cc b/mindspore/lite/src/ops/populate/v0/while_populate_v0.cc new file mode 100644 index 0000000000..0231f800bf --- /dev/null +++ b/mindspore/lite/src/ops/populate/v0/while_populate_v0.cc @@ -0,0 +1,48 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "schema/model_v0_generated.h" +#include "src/ops/populate/populate_register.h" + +namespace mindspore { +namespace lite { +namespace { +typedef struct WhileParemeter { + OpParameter op_parameter_; + int body_subgraph_index; + int cond_subgraph_index; +} WhileParemeter; + +OpParameter *PopulateWhileParameter(const void *prim) { + auto *primitive = static_cast<const schema::v0::Primitive *>(prim); + auto while_prim = primitive->value_as_While(); + WhileParemeter *while_paremeter = reinterpret_cast<WhileParemeter *>(malloc(sizeof(WhileParemeter))); + if (while_paremeter == nullptr) { + MS_LOG(ERROR) << "malloc WhileParemeter failed."; + return nullptr; + } + memset(while_paremeter, 0, sizeof(WhileParemeter)); + + while_paremeter->op_parameter_.type_ = schema::PrimitiveType_While; + while_paremeter->body_subgraph_index = while_prim->bodySubgraphIndex(); + while_paremeter->cond_subgraph_index = while_prim->condSubgraphIndex(); + return reinterpret_cast<OpParameter *>(while_paremeter); +} +} // namespace + +Registry g_whileV0ParemeterRegistry(schema::v0::PrimitiveType_While, PopulateWhileParameter, SCHEMA_V0); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/where_populate.cc b/mindspore/lite/src/ops/populate/where_populate.cc index a759447ca2..5f1790f839 100644 --- a/mindspore/lite/src/ops/populate/where_populate.cc +++ b/mindspore/lite/src/ops/populate/where_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,23 +13,23 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" -#include "mindspore/lite/nnacl/fp32/where_fp32.h" namespace mindspore { namespace lite { - -OpParameter *PopulateWhereParameter(const mindspore::lite::PrimitiveC *primitive) { - WhereParameter *where_parameter = reinterpret_cast<WhereParameter *>(malloc(sizeof(WhereParameter))); +namespace { +OpParameter *PopulateWhereParameter(const void *prim) { + OpParameter *where_parameter = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); if (where_parameter == nullptr) { MS_LOG(ERROR) << "malloc Where parameter failed."; return nullptr; } memset(where_parameter, 0, sizeof(OpParameter)); - where_parameter->op_parameter_.type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + where_parameter->type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(where_parameter); } -Registry WhereParameterRegistry(schema::PrimitiveType_Where, PopulateWhereParameter); +} // namespace +Registry g_whereParameterRegistry(schema::PrimitiveType_Where, PopulateWhereParameter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/populate/while_populate.cc b/mindspore/lite/src/ops/populate/while_populate.cc index efcb64d177..f040731015 100644 --- a/mindspore/lite/src/ops/populate/while_populate.cc +++ b/mindspore/lite/src/ops/populate/while_populate.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019-2020 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,9 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -#include "src/ops/while.h" -#include "src/ops/primitive_c.h" #include "src/ops/populate/populate_register.h" namespace mindspore { @@ -27,19 +24,20 @@ typedef struct WhileParemeter { int cond_subgraph_index; } WhileParemeter; -OpParameter *PopulateWhileParemeter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateWhileParemeter(const void *prim) { WhileParemeter *while_paremeter = reinterpret_cast<WhileParemeter *>(malloc(sizeof(WhileParemeter))); if (while_paremeter == nullptr) { MS_LOG(ERROR) << "malloc WhileParemeter failed."; return nullptr; } memset(while_paremeter, 0, sizeof(WhileParemeter)); - auto param = reinterpret_cast<mindspore::lite::While *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - while_paremeter->op_parameter_.type_ = primitive->Type(); - while_paremeter->body_subgraph_index = param->GetBodySubgraphIndex(); - while_paremeter->cond_subgraph_index = param->GetCondSubgraphIndex(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_While(); + while_paremeter->op_parameter_.type_ = primitive->value_type(); + while_paremeter->body_subgraph_index = value->body_subgraph_index(); + while_paremeter->cond_subgraph_index = value->cond_subgraph_index(); return reinterpret_cast<OpParameter *>(while_paremeter); } -Registry WhileParemeterRegistry(schema::PrimitiveType_While, PopulateWhileParemeter); +Registry WhileParemeterRegistry(schema::PrimitiveType_While, PopulateWhileParemeter, SCHEMA_CUR); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/src/ops/power.cc b/mindspore/lite/src/ops/power.cc deleted file mode 100644 index 492b2ff37f..0000000000 --- a/mindspore/lite/src/ops/power.cc +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/power.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float Power::GetPower() const { return this->primitive_->value.AsPower()->power; } -float Power::GetScale() const { return this->primitive_->value.AsPower()->scale; } -float Power::GetShift() const { return this->primitive_->value.AsPower()->shift; } - -void Power::SetPower(float power) { this->primitive_->value.AsPower()->power = power; } -void Power::SetScale(float scale) { this->primitive_->value.AsPower()->scale = scale; } -void Power::SetShift(float shift) { this->primitive_->value.AsPower()->shift = shift; } - -int Power::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Power; - } - if (this->primitive_->value.type != schema::PrimitiveType_Power) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::PowerT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - - if (prim.GetAttr("scale") == nullptr) { - MS_LOG(INFO) << "Power's attr scale is set to default"; - attr->scale = 1.0f; - } else { - attr->scale = GetValue<float>(prim.GetAttr("scale")); - } - if (prim.GetAttr("power") == nullptr) { - MS_LOG(INFO) << "Power's attr power is set to default"; - attr->power = 1.0f; - } else { - attr->power = GetValue<float>(prim.GetAttr("power")); - } - if (prim.GetAttr("shift") == nullptr) { - MS_LOG(INFO) << "Power's attr shift is set to default"; - attr->shift = 0; - } else { - attr->shift = GetValue<float>(prim.GetAttr("shift")); - } - this->primitive_->value.value = attr; - } - return RET_OK; -} - -#else - -float Power::GetPower() const { return this->primitive_->value_as_Power()->power(); } -float Power::GetScale() const { return this->primitive_->value_as_Power()->scale(); } -float Power::GetShift() const { return this->primitive_->value_as_Power()->shift(); } -int Power::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Power(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Power return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreatePower(*fbb, attr->power(), attr->scale(), attr->shift()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Power, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *PowerCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Power>(primitive); } -Registry PowerRegistry(schema::PrimitiveType_Power, PowerCreator); -#endif - -int Power::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - MS_ASSERT(this->primitive_ != nullptr); - auto x_tensor = inputs.at(0); - MS_ASSERT(x_tensor != nullptr); - Tensor *exp_tensor = nullptr; - if (inputs.size() == 2) { - exp_tensor = inputs.at(1); - MS_ASSERT(exp_tensor != nullptr); - } - auto output_tensor = outputs.at(0); - MS_ASSERT(output_tensor != nullptr); - output_tensor->set_data_type(x_tensor->data_type()); - output_tensor->set_format(x_tensor->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - if (exp_tensor != nullptr) { - if ((exp_tensor->shape().size() > 1 && exp_tensor->shape() != x_tensor->shape()) || - (exp_tensor->shape().size() == 1 && exp_tensor->shape().at(0) != 1) || - exp_tensor->data_type() != x_tensor->data_type()) { - MS_LOG(ERROR) << "Power inputs shape or type is not equal!"; - return RET_INPUT_TENSOR_ERROR; - } - } - - output_tensor->set_shape(x_tensor->shape()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/power.h b/mindspore/lite/src/ops/power.h deleted file mode 100644 index 2da7dcb86a..0000000000 --- a/mindspore/lite/src/ops/power.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_POWER_H_ -#define LITE_MINDSPORE_LITE_C_OPS_POWER_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Power : public PrimitiveC { - public: - Power() = default; - ~Power() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Power, PrimitiveC); - explicit Power(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetPower(float power); - void SetScale(float scale); - void SetShift(float shift); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - float GetPower() const; - float GetScale() const; - float GetShift() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_POWER_H_ diff --git a/mindspore/lite/src/ops/power_grad.cc b/mindspore/lite/src/ops/power_grad.cc deleted file mode 100644 index e95a3fcabf..0000000000 --- a/mindspore/lite/src/ops/power_grad.cc +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/power_grad.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float PowerGrad::GetPower() const { return this->primitive_->value.AsPowerGrad()->power; } -float PowerGrad::GetScale() const { return this->primitive_->value.AsPowerGrad()->scale; } -float PowerGrad::GetShift() const { return this->primitive_->value.AsPowerGrad()->shift; } - -void PowerGrad::SetPower(float power) { this->primitive_->value.AsPowerGrad()->power = power; } -void PowerGrad::SetScale(float scale) { this->primitive_->value.AsPowerGrad()->scale = scale; } -void PowerGrad::SetShift(float shift) { this->primitive_->value.AsPowerGrad()->shift = shift; } -int PowerGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_PowerGrad; - } - if (this->primitive_->value.type != schema::PrimitiveType_PowerGrad) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::PowerGradT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - attr->power = GetValue<float>(prim.GetAttr("power")); - attr->scale = GetValue<float>(prim.GetAttr("scale")); - attr->shift = GetValue<float>(prim.GetAttr("shift")); - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else - -float PowerGrad::GetPower() const { return this->primitive_->value_as_PowerGrad()->power(); } -float PowerGrad::GetScale() const { return this->primitive_->value_as_PowerGrad()->scale(); } -float PowerGrad::GetShift() const { return this->primitive_->value_as_PowerGrad()->shift(); } - -int PowerGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto attr = primitive->value_as_PowerGrad(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_PowerGrad return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreatePowerGrad(*fbb, attr->power(), attr->scale(), attr->shift()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_PowerGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *PowerGradCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<PowerGrad>(primitive); -} -Registry PowerGradRegistry(schema::PrimitiveType_PowerGrad, PowerGradCreator); -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/power_grad.h b/mindspore/lite/src/ops/power_grad.h deleted file mode 100644 index 48e67994fd..0000000000 --- a/mindspore/lite/src/ops/power_grad.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_POWER_GRAD_H_ -#define MINDSPORE_LITE_SRC_OPS_POWER_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class PowerGrad : public PrimitiveC { - public: - PowerGrad() = default; - ~PowerGrad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(PowerGrad, PrimitiveC); - explicit PowerGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetPower(float power); - void SetScale(float scale); - void SetShift(float shift); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - float GetPower() const; - float GetScale() const; - float GetShift() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_POWER_GRAD_H_ diff --git a/mindspore/lite/src/ops/primitive_c.cc b/mindspore/lite/src/ops/primitive_c.cc deleted file mode 100644 index 3b7965c472..0000000000 --- a/mindspore/lite/src/ops/primitive_c.cc +++ /dev/null @@ -1,1178 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/primitive_c.h" -#ifdef PRIMITIVE_WRITEABLE -#include <memory> -#include <map> -#include "tools/converter/quantizer/quantize_util.h" -#include "src/ops/assert_op.h" -#include "src/ops/space_to_batch.h" -#include "src/ops/space_to_batch_nd.h" -#include "src/ops/conv2d.h" -#include "src/ops/roi_pooling.h" -#include "src/ops/topk.h" -#include "src/ops/broadcast_to.h" -#include "src/ops/unsqueeze.h" -#include "src/ops/unstack.h" -#include "src/ops/depth_to_space.h" -#include "src/ops/batch_to_space.h" -#include "src/ops/prior_box.h" -#include "src/ops/lstm.h" -#include "src/ops/softmax.h" -#include "src/ops/activation.h" -#include "src/ops/deconv2d.h" -#include "src/ops/reduce.h" -#include "src/ops/pooling.h" -#include "src/ops/fused_batchnorm.h" -#include "src/ops/batch_norm.h" -#include "src/ops/power.h" -#include "src/ops/range.h" -#include "src/ops/add.h" -#include "src/ops/sub.h" -#include "src/ops/div.h" -#include "src/ops/bias_add.h" -#include "src/ops/expand_dims.h" -#include "src/ops/full_connection.h" -#include "src/ops/shape.h" -#include "src/ops/elu.h" -#include "src/ops/embedding_lookup.h" -#include "src/ops/quant_dtype_cast.h" -#include "src/ops/matmul.h" -#include "src/ops/resize.h" -#include "src/ops/tile.h" -#include "src/ops/one_hot.h" -#include "src/ops/space_to_depth.h" -#include "src/ops/split.h" -#include "src/ops/argmax.h" -#include "src/ops/argmin.h" -#include "src/ops/cast.h" -#include "src/ops/reshape.h" -#include "src/ops/scale.h" -#include "src/ops/concat.h" -#include "src/ops/nchw2nhwc.h" -#include "src/ops/slice.h" -#include "src/ops/squeeze.h" -#include "src/ops/flatten.h" -#include "src/ops/nhwc2nchw.h" -#include "src/ops/stack.h" -#include "src/ops/crop.h" -#include "src/ops/addn.h" -#include "src/ops/gather.h" -#include "src/ops/gather_nd.h" -#include "src/ops/local_response_normalization.h" -#include "src/ops/pad.h" -#include "src/ops/p_relu.h" -#include "src/ops/leaky_relu.h" -#include "src/ops/reverse_sequence.h" -#include "src/ops/dedepthwise_conv2d.h" -#include "src/ops/depthwise_conv2d.h" -#include "src/ops/mul.h" -#include "src/ops/eltwise.h" -#include "src/ops/fill.h" -#include "src/ops/transpose.h" -#include "src/ops/log.h" -#include "src/ops/abs.h" -#include "src/ops/sin.h" -#include "src/ops/cos.h" -#include "src/ops/sqrt.h" -#include "src/ops/square.h" -#include "src/ops/exp.h" -#include "src/ops/rsqrt.h" -#include "src/ops/maximum.h" -#include "src/ops/minimum.h" -#include "src/ops/strided_slice.h" -#include "src/ops/reverse.h" -#include "src/ops/logical_and.h" -#include "src/ops/logical_or.h" -#include "src/ops/logical_not.h" -#include "src/ops/floor_div.h" -#include "src/ops/floor_mod.h" -#include "src/ops/mod.h" -#include "src/ops/equal.h" -#include "src/ops/not_equal.h" -#include "src/ops/less.h" -#include "src/ops/less_equal.h" -#include "src/ops/greater_equal.h" -#include "src/ops/greater.h" -#include "src/ops/floor.h" -#include "src/ops/squared_difference.h" -#include "src/ops/ceil.h" -#include "src/ops/round.h" -#include "src/ops/unique.h" -#include "src/ops/zeros_like.h" -#include "src/ops/return.h" -#include "src/ops/where.h" -#include "src/ops/scatter_nd.h" -#include "src/ops/constant_of_shape.h" -#include "src/ops/dequant.h" -#include "src/ops/make_tuple.h" -#include "src/ops/quant.h" -#include "src/ops/tuple_get_item.h" -#include "src/ops/l2_norm.h" -#include "src/ops/neg.h" -#include "src/ops/sparse_to_dense.h" -#include "src/ops/detection_post_process.h" -#include "src/ops/dropout.h" -#include "src/ops/real_div.h" -#include "src/ops/lsh_projection.h" -#include "src/ops/hashtable_lookup.h" -#include "src/ops/skip_gram.h" -#include "src/ops/clip.h" -#include "src/ops/adder.h" -#include "src/ops/custom_predict.h" -#include "src/ops/custom_normalize.h" -#include "src/ops/custom_extract_features.h" -#include "src/ops/upsample.h" -#include "src/ops/layer_norm.h" -#include "src/ops/non_max_suppression.h" -#include "src/ops/rfft.h" -#include "src/ops/fft_real.h" -#include "src/ops/fft_imag.h" -#include "src/ops/audio_spectrogram.h" -#include "src/ops/mfcc.h" -#include "src/ops/identity.h" -#include "src/ops/instance_norm.h" -#include "src/ops/while.h" -#include "src/ops/oneslike.h" -#include "src/ops/unsorted_segment_sum.h" -#include "src/ops/reciprocal.h" -#include "src/ops/constant.h" -#include "src/ops/tensorlist_fromtensor.h" -#include "src/ops/tensorlist_getitem.h" -#include "src/ops/tensorlist_setitem.h" -#include "src/ops/tensorlist_reserve.h" -#include "src/ops/tensorlist_stack.h" -#include "src/ops/merge.h" -#include "src/ops/switch.h" -#include "src/ops/partial.h" -#include "src/ops/if.h" -#include "src/ops/select.h" -#include "src/ops/gelu.h" -#include "src/ops/gru.h" -#include "src/ops/size.h" -#include "src/ops/random_standard_normal.h" -#include "src/ops/invert_permutation.h" -#include "src/ops/crop_and_resize.h" -#include "src/ops/nonzero.h" -#include "src/ops/erf.h" -#include "src/ops/lin_space.h" -#include "src/ops/uniform_real.h" -#include "src/ops/rank.h" -#include "src/ops/is_finite.h" -#include "src/ops/neg_grad.h" -#include "src/ops/activation_grad.h" -#include "src/ops/apply_momentum.h" -#include "src/ops/bias_grad.h" -#include "src/ops/pooling_grad.h" -#include "src/ops/conv2d_grad_filter.h" -#include "src/ops/conv2d_grad_input.h" -#include "src/ops/group_conv2d_grad_input.h" -#include "src/ops/power_grad.h" -#include "src/ops/softmax_cross_entropy.h" -#include "src/ops/sparse_softmax_cross_entropy.h" -#include "src/ops/bn_grad.h" -#include "src/ops/arithmetic_grad.h" -#include "src/ops/depend.h" -#include "src/ops/flatten_grad.h" -#include "src/ops/log_grad.h" -#include "src/ops/abs_grad.h" -#include "src/ops/sgd.h" -#include "src/ops/adam.h" -#include "src/ops/assign.h" -#include "src/ops/dropout_grad.h" -#include "src/ops/maximum_grad.h" -#include "src/ops/minimum_grad.h" -#include "src/ops/control_depend.h" -#include "src/ops/assign_add.h" -#include "src/ops/binary_cross_entropy.h" -#include "src/ops/binary_cross_entropy_grad.h" -#include "src/ops/smooth_l1_loss.h" -#include "src/ops/smooth_l1_loss_grad.h" -#include "src/ops/sigmoid_cross_entropy_with_logits.h" -#include "src/ops/sigmoid_cross_entropy_with_logits_grad.h" -#include "src/ops/strided_slice_grad.h" -#endif -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> CastToInt(const ValuePtr &value) { - if (value == nullptr) { - MS_LOG(WARNING) << "valueptr is nullptr."; - return {}; - } - std::vector<int> cur_value; - if (utils::isa<ValueSequeuePtr>(value)) { - if (value->cast<ValueSequeuePtr>()->value().front()->type()->number_type() == kNumberTypeInt64) { - auto origin_value = GetValue<std::vector<int64_t>>(value); - for (size_t index = 0; index < origin_value.size(); ++index) { - cur_value.push_back(static_cast<int>(origin_value[index])); - } - } else { - cur_value = GetValue<std::vector<int>>(value); - } - } else { - if (value->type()->number_type() == kNumberTypeInt64) { - cur_value.push_back(static_cast<int>(GetValue<int64_t>(value))); - } else { - cur_value.push_back(GetValue<int>(value)); - } - } - return cur_value; -} - -void PrimitiveC::CalFloatScopeByMeanAndStddev(const double &mean, const double &stdDev, float *mMin, float *mMax) { - const float qmin = 0; - const float qmax = 255; - *mMin = static_cast<float>((qmin - mean) / stdDev); - *mMax = static_cast<float>((qmax - mean) / stdDev); -} - -void PrimitiveC::FillDefaultInputQuantParamIfNeed(const size_t &inputSize) { - std::vector<schema::QuantParamT> quants; - schema::QuantParamT quantParam; - - if (input_quant_param_.size() == kDoubleNum) { - quants.clear(); - quantParam.min = 0.0; - quantParam.max = 0.0; - quantParam.zeroPoint = 0; - quantParam.scale = input_quant_param_.at(0).at(0).scale * input_quant_param_.at(1).at(0).scale; - quants.emplace_back(quantParam); - input_quant_param_.emplace_back(quants); - } - // fill input_quant_param_ by not inited quant_parm - if (input_quant_param_.size() < inputSize) { - schema::QuantParamT tmpQuantParam; - quants.emplace_back(tmpQuantParam); - input_quant_param_.insert(input_quant_param_.end(), inputSize - input_quant_param_.size(), quants); - } -} - -void PrimitiveC::PopulaterInputQuantParam(const Primitive &prim, const std::vector<AnfNodePtr> &inputs, - bool narrowRangeQuantParam, int32_t numbitsRangeQuantParam) { - std::vector<schema::QuantParamT> quants; - schema::QuantParamT quantParam; - auto inputMin = prim.GetAttr("input_minq"); - auto inputMax = prim.GetAttr("input_maxq"); - if (inputMin != nullptr && inputMax != nullptr) { - auto inputMinPtr = inputMin->cast<TensorPtr>(); - auto inputMaxPtr = inputMax->cast<TensorPtr>(); - auto *minBuf = static_cast<float *>(inputMinPtr->data_c()); - auto *maxBuf = static_cast<float *>(inputMaxPtr->data_c()); - quantParam.min = *minBuf; - quantParam.max = *maxBuf; - auto ret = quant::CalQuantizationParams(&quantParam, quantParam.min, quantParam.max, narrowRangeQuantParam, - numbitsRangeQuantParam); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Can't calculate quant parameters"; - return; - } - quants.emplace_back(quantParam); - input_quant_param_.emplace_back(quants); - } - - quants.clear(); - auto filterMin = prim.GetAttr("filter_minq"); - auto filterMax = prim.GetAttr("filter_maxq"); - if (filterMin != nullptr && filterMax != nullptr) { - auto filterMinPtr = filterMin->cast<TensorPtr>(); - auto filterMaxPtr = filterMax->cast<TensorPtr>(); - auto *minBuf = static_cast<float *>(filterMinPtr->data_c()); - auto *maxBuf = static_cast<float *>(filterMaxPtr->data_c()); - quantParam.min = FLT_MAX; - quantParam.max = FLT_MIN; - for (int i = 0; i < filterMinPtr->ElementsNum(); ++i) { - quantParam.min = (*(minBuf) < quantParam.min) ? (*minBuf) : quantParam.min; - quantParam.max = (*(maxBuf) > quantParam.max) ? (*maxBuf) : quantParam.max; - minBuf++; - maxBuf++; - } - auto ret = quant::CalQuantizationParams(&quantParam, quantParam.min, quantParam.max, true, numbitsRangeQuantParam); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Can't calculate quant parameters"; - return; - } - quants.emplace_back(quantParam); - input_quant_param_.emplace_back(quants); - } - FillDefaultInputQuantParamIfNeed(inputs.size()); -} - -void PrimitiveC::PopulaterOutputQuantParam(const Primitive &prim, bool narrowRangeQuantParam, - int32_t numbitsRangeQuantParam) { - std::vector<schema::QuantParamT> quants; - schema::QuantParamT quantParam; - auto outputMin = prim.GetAttr("output_minq"); - auto outputMax = prim.GetAttr("output_maxq"); - if (outputMin != nullptr && outputMax != nullptr) { - auto outputMinPtr = outputMin->cast<TensorPtr>(); - auto outputMaxPtr = outputMax->cast<TensorPtr>(); - auto *minBuf = static_cast<float *>(outputMinPtr->data_c()); - auto *maxBuf = static_cast<float *>(outputMaxPtr->data_c()); - quantParam.min = *minBuf; - quantParam.max = *maxBuf; - auto ret = quant::CalQuantizationParams(&quantParam, quantParam.min, quantParam.max, narrowRangeQuantParam, - numbitsRangeQuantParam); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Can't calculate quant parameters"; - return; - } - quants.emplace_back(quantParam); - output_quant_param_.emplace_back(quants); - } else { - schema::QuantParamT tmpQuantParam; - quants.emplace_back(tmpQuantParam); - output_quant_param_.emplace_back(quants); - } -} - -void PrimitiveC::PopulaterQuantParam(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - auto narrow_range = prim.GetAttr("narrow_range"); - bool narrowRangeQuantParam = false; - if (narrow_range != nullptr) { - if (utils::isa<tensor::TensorPtr>(narrow_range)) { - auto narrow_range_tensor = narrow_range->cast<tensor::TensorPtr>(); - narrowRangeQuantParam = *reinterpret_cast<bool *>(narrow_range_tensor->data_c()); - } else if (utils::isa<ImmTraits<bool>::type>(narrow_range)) { - narrowRangeQuantParam = GetValue<bool>(narrow_range); - } else { - MS_LOG(ERROR) << "valueptr is invalid."; - return; - } - } - auto num_bits = prim.GetAttr("num_bits"); - int32_t numbitsRangeQuantParam = 8; - if (num_bits != nullptr) { - if (utils::isa<tensor::TensorPtr>(num_bits)) { - auto num_bits_tensor = num_bits->cast<tensor::TensorPtr>(); - numbitsRangeQuantParam = *reinterpret_cast<int64_t *>(num_bits_tensor->data_c()); - } else if (utils::isa<ImmTraits<int64_t>::type>(num_bits)) { - numbitsRangeQuantParam = GetValue<int64_t>(num_bits); - } - } - PopulaterInputQuantParam(prim, inputs, narrowRangeQuantParam, numbitsRangeQuantParam); - PopulaterOutputQuantParam(prim, narrowRangeQuantParam, numbitsRangeQuantParam); -} - -void PrimitiveC::GetAttrDataFromInput(const AnfNodePtr &inputNode, std::vector<int> *data) { - if (inputNode->isa<ValueNode>()) { - auto valNode = inputNode->cast<ValueNodePtr>(); - MS_ASSERT(valNode != nullptr); - auto val = valNode->value(); - MS_ASSERT(val != nullptr); - if (val->isa<ValueTuple>()) { - auto tuple = val->cast<ValueTuplePtr>(); - MS_ASSERT(tuple != nullptr); - for (size_t i = 0; i < tuple->size(); i++) { - auto elem = tuple->value().at(i); - MS_ASSERT(elem != nullptr); - data->emplace_back(CastToInt(elem).front()); - } - } - } -} - -schema::PrimitiveT *PrimitiveC::primitiveT() const { return this->primitive_; } - -void PrimitiveC::ClearPrimitiveT() { this->primitive_ = nullptr; } - -void PrimitiveC::set_input_quant_params(const std::vector<std::vector<schema::QuantParamT>> &input_quant_param) { - this->input_quant_param_ = input_quant_param; -} - -void PrimitiveC::set_input_quant_param(const size_t &index, const std::vector<schema::QuantParamT> &input_quant_param) { - if (index >= this->input_quant_param_.size()) { - this->input_quant_param_.resize(index + 1); - } - this->input_quant_param_.at(index) = input_quant_param; -} - -void PrimitiveC::set_output_quant_params(const std::vector<std::vector<schema::QuantParamT>> &output_quant_param) { - this->output_quant_param_ = output_quant_param; -} - -void PrimitiveC::set_output_quant_param(const size_t &index, - const std::vector<schema::QuantParamT> &output_quant_param) { - MS_ASSERT(index < this->output_quant_param_.size()); - this->output_quant_param_.at(index) = output_quant_param; -} - -bool PrimitiveC::IsInputQuantParamsInited() { - if (this->input_quant_param_.empty()) { - return false; - } - for (auto &quant_param : this->input_quant_param_) { - if (!quant_param.front().inited) { - return false; - } - } - return true; -} - -bool PrimitiveC::IsOutputQuantParamsInited() { - if (this->output_quant_param_.empty()) { - return false; - } - for (auto &quant_param : this->output_quant_param_) { - if (!quant_param.front().inited) { - return false; - } - } - return true; -} - -void PrimitiveC::ClearInputOutputQuantParam() { - input_quant_param_.clear(); - output_quant_param_.clear(); -} - -void PrimitiveC::AddInputQuantParam(const std::vector<schema::QuantParamT> &quant_param) { - this->input_quant_param_.emplace_back(quant_param); -} -std::vector<std::vector<schema::QuantParamT>> PrimitiveC::input_quant_params() const { return input_quant_param_; } - -void PrimitiveC::AddOutputQuantParam(const std::vector<schema::QuantParamT> &quant_param) { - this->output_quant_param_.emplace_back(quant_param); -} -std::vector<std::vector<schema::QuantParamT>> PrimitiveC::output_quant_params() const { return output_quant_param_; } - -void PrimitiveC::set_quant_type(const schema::QuantType &quant_type) { this->quant_type_ = quant_type; } - -schema::QuantType PrimitiveC::quant_type() const { return quant_type_; } - -bool PrimitiveC::enable_huffman_code() const { return enable_huffman_code_; } - -void PrimitiveC::set_enable_huffman_code(bool enable_huffman_code) { this->enable_huffman_code_ = enable_huffman_code; } - -std::shared_ptr<PrimitiveC> GetReturnPrim() { - auto return_primitiveT = new (std::nothrow) schema::PrimitiveT; - if (return_primitiveT == nullptr) { - MS_LOG(ERROR) << "new PrimitiveT failed"; - return nullptr; - } - return_primitiveT->value.type = schema::PrimitiveType_Return; - return_primitiveT->value.value = new (std::nothrow) schema::ReturnT; - if (return_primitiveT->value.value == nullptr) { - MS_LOG(ERROR) << "new ReturnT failed"; - delete (return_primitiveT); - return nullptr; - } - return std::make_shared<Return>(return_primitiveT); -} - -std::shared_ptr<PrimitiveC> GetMakeTuplePrim() { - auto make_tuple_primitiveT = new (std::nothrow) schema::PrimitiveT; - if (make_tuple_primitiveT == nullptr) { - MS_LOG(ERROR) << "new PrimitiveT failed"; - return nullptr; - } - make_tuple_primitiveT->value.type = schema::PrimitiveType_MakeTuple; - make_tuple_primitiveT->value.value = new (std::nothrow) schema::MakeTupleT; - if (make_tuple_primitiveT->value.value == nullptr) { - MS_LOG(ERROR) << "new MakeTupleT failed"; - delete (make_tuple_primitiveT); - return nullptr; - } - return std::make_shared<MakeTuple>(make_tuple_primitiveT); -} - -std::shared_ptr<PrimitiveC> GetTupleGetItemPrim() { - auto tuple_get_item_primitiveT = new (std::nothrow) schema::PrimitiveT(); - if (tuple_get_item_primitiveT == nullptr) { - MS_LOG(ERROR) << "new PrimitiveT failed"; - return nullptr; - } - tuple_get_item_primitiveT->value.type = schema::PrimitiveType_TupleGetItem; - tuple_get_item_primitiveT->value.value = new (std::nothrow) schema::TupleGetItemT; - if (tuple_get_item_primitiveT->value.value == nullptr) { - MS_LOG(ERROR) << "new TupleGetItemT failed"; - delete (tuple_get_item_primitiveT); - return nullptr; - } - return std::make_shared<TupleGetItem>(tuple_get_item_primitiveT); -} - -template <typename T, typename = std::enable_if<std::is_base_of<PrimitiveC, T>::value>> -std::shared_ptr<PrimitiveC> NewPrimitiveC(const mindspore::Primitive &prim, const std::vector<AnfNodePtr> &inputs, - const schema::QuantType &quantType, bool train_flag = false) { - auto primc = std::make_shared<T>(); - if (primc == nullptr) { - MS_LOG(ERROR) << "make_shared PrimitiveC failed"; - return nullptr; - } - primc->set_quant_type(quantType); - primc->set_train_flag(train_flag); - auto ret = primc->UnPackAttr(prim, inputs); - if (ret != RET_OK) { - MS_LOG(ERROR) << "UnPackAttr failed"; - return nullptr; - } - return primc; -} - -std::shared_ptr<PrimitiveC> PrimitiveC::Create(const Primitive &prim, const std::vector<AnfNodePtr> &inputs, - const schema::QuantType &quantType, bool train_flag) { - const auto &op_type = prim.name(); - if (op_type == "ReLU" || op_type == "ReLU6" || op_type == "Sigmoid" || op_type == "HSwish" || op_type == "HSigmoid") { - return NewPrimitiveC<Activation>(prim, inputs, quantType); - } else if (op_type == "Abs") { - return NewPrimitiveC<Abs>(prim, inputs, quantType); - } else if (op_type == "AddN") { - return NewPrimitiveC<AddN>(prim, inputs, quantType); - } else if (op_type == "BatchNorm") { - return NewPrimitiveC<BatchNorm>(prim, inputs, quantType); - } else if (op_type == "BiasAdd") { - return NewPrimitiveC<BiasAdd>(prim, inputs, quantType); - } else if (op_type == "Concat") { - return NewPrimitiveC<Concat>(prim, inputs, quantType); - } else if (op_type == "Conv2D") { - return NewPrimitiveC<Conv2D>(prim, inputs, quantType, train_flag); - } else if (op_type == "Cos") { - return NewPrimitiveC<Cos>(prim, inputs, quantType); - } else if (op_type == "DepthwiseConv2dNative" || op_type == "DepthwiseConv2D") { - return NewPrimitiveC<DepthwiseConv2D>(prim, inputs, quantType); - } else if (op_type == "Dequant") { - return NewPrimitiveC<Dequant>(prim, inputs, quantType); - } else if (op_type == "Flatten") { - return NewPrimitiveC<Flatten>(prim, inputs, quantType); - } else if (op_type == "FloorDiv") { - return NewPrimitiveC<FloorDiv>(prim, inputs, quantType); - } else if ((op_type == "FusedBatchNorm") || (op_type == "FusedBatchNormEx")) { - return NewPrimitiveC<FusedBatchNorm>(prim, inputs, quantType); - } else if ((op_type == "make_tuple") || (op_type == "MakeTuple")) { - return NewPrimitiveC<MakeTuple>(prim, inputs, quantType); - } else if (op_type == "MatMul" || op_type == "BatchMatMul") { - return NewPrimitiveC<MatMul>(prim, inputs, quantType); - } else if (op_type == "Mul") { - return NewPrimitiveC<Mul>(prim, inputs, quantType); - } else if (op_type == "MaxPool" || op_type == "AvgPool") { - return NewPrimitiveC<Pooling>(prim, inputs, quantType); - } else if (op_type == "Quant") { - return NewPrimitiveC<Quant>(prim, inputs, quantType); - } else if (op_type == "RealDiv") { - return NewPrimitiveC<RealDiv>(prim, inputs, quantType); - } else if (op_type == "Reciprocal") { - return NewPrimitiveC<Reciprocal>(prim, inputs, quantType); - } else if (op_type == "ReduceMax") { - return NewPrimitiveC<Reduce>(prim, inputs, quantType); - } else if (op_type == "ReduceMean") { - return NewPrimitiveC<Reduce>(prim, inputs, quantType); - } else if (op_type == "ReduceMin") { - return NewPrimitiveC<Reduce>(prim, inputs, quantType); - } else if (op_type == "ReduceProd") { - return NewPrimitiveC<Reduce>(prim, inputs, quantType); - } else if (op_type == "ReduceSum") { - return NewPrimitiveC<Reduce>(prim, inputs, quantType); - } else if (op_type == "ReduceSumSquare") { - return NewPrimitiveC<Reduce>(prim, inputs, quantType); - } else if (op_type == "Reshape") { - return NewPrimitiveC<Reshape>(prim, inputs, quantType); - } else if (op_type == "Rsqrt") { - return NewPrimitiveC<Rsqrt>(prim, inputs, quantType); - } else if (op_type == "Sin") { - return NewPrimitiveC<Sin>(prim, inputs, quantType); - } else if (op_type == "Slice") { - return NewPrimitiveC<Slice>(prim, inputs, quantType); - } else if (op_type == "Squeeze") { - return NewPrimitiveC<Squeeze>(prim, inputs, quantType); - } else if (op_type == "TensorAdd") { - return NewPrimitiveC<Add>(prim, inputs, quantType); - } else if (op_type == "Transpose") { - return NewPrimitiveC<Transpose>(prim, inputs, quantType); - } else if (op_type == "Elu") { - return NewPrimitiveC<Elu>(prim, inputs, quantType); - } else if (op_type == "Log") { - return NewPrimitiveC<Log>(prim, inputs, quantType); - } else if (op_type == "Exp") { - return NewPrimitiveC<Exp>(prim, inputs, quantType); - } else if (op_type == "Neg") { - return NewPrimitiveC<Neg>(prim, inputs, quantType); - } else if (op_type == "DeConv2D") { - return NewPrimitiveC<DeConv2D>(prim, inputs, quantType); - } else if (op_type == "tuple_getitem") { - return NewPrimitiveC<TupleGetItem>(prim, inputs, quantType); - } else if (op_type == "Softmax") { - return NewPrimitiveC<SoftMax>(prim, inputs, quantType); - } else if (op_type == "StridedSlice") { - return NewPrimitiveC<StridedSlice>(prim, inputs, quantType); - } else if (op_type == "Cast") { - return NewPrimitiveC<Cast>(prim, inputs, quantType); - } else if (op_type == "Maximum") { - return NewPrimitiveC<Maximum>(prim, inputs, quantType); - } else if (op_type == "Split") { - return NewPrimitiveC<Split>(prim, inputs, quantType); - } else if (op_type == "OneHot") { - return NewPrimitiveC<OneHot>(prim, inputs, quantType); - } else if (op_type == "Dropout") { - return NewPrimitiveC<Dropout>(prim, inputs, quantType); - } else if (op_type == "While") { - return NewPrimitiveC<While>(prim, inputs, quantType); - } else if (op_type == "MirrorPad") { - return NewPrimitiveC<Pad>(prim, inputs, quantType); - } else if (op_type == "InstanceNorm") { - return NewPrimitiveC<InstanceNorm>(prim, inputs, quantType); - } else if (op_type == "Gather") { - return NewPrimitiveC<Gather>(prim, inputs, quantType); - } else if (op_type == "OnesLike") { - return NewPrimitiveC<OnesLike>(prim, inputs, quantType); - } else if (op_type == "Pow") { - return NewPrimitiveC<Power>(prim, inputs, quantType); - } else if (op_type == "Sub") { - return NewPrimitiveC<Sub>(prim, inputs, quantType); - } else if (op_type == "ExpandDims") { - return NewPrimitiveC<ExpandDims>(prim, inputs, quantType); - } else if (op_type == "UnsortedSegmentSum") { - return NewPrimitiveC<UnsortedSegmentSum>(prim, inputs, quantType); - } else if (op_type == "ResizeNearestNeighbor") { - return NewPrimitiveC<Resize>(prim, inputs, quantType); - } else if (op_type == "ResizeBilinear") { - return NewPrimitiveC<Resize>(prim, inputs, quantType); - } else if (op_type == "Floor") { - return NewPrimitiveC<Floor>(prim, inputs, quantType); - } else if (op_type == "Minimum") { - return NewPrimitiveC<Minimum>(prim, inputs, quantType); - } else if (op_type == "Div") { - return NewPrimitiveC<Div>(prim, inputs, quantType); - } else if (op_type == "Tanh") { - return NewPrimitiveC<Activation>(prim, inputs, quantType); - } else if (op_type == "Equal") { - return NewPrimitiveC<Equal>(prim, inputs, quantType); - } else if (op_type == "TopK") { - return NewPrimitiveC<TopK>(prim, inputs, quantType); - } else if (op_type == "Mod") { - return NewPrimitiveC<Mod>(prim, inputs, quantType); - } else if (op_type == "ArgMin" || op_type == "ArgMinWithValue") { - return NewPrimitiveC<ArgMin>(prim, inputs, quantType); - } else if (op_type == "Range") { - return NewPrimitiveC<Range>(prim, inputs, quantType); - } else if (op_type == "Tile") { - return NewPrimitiveC<Tile>(prim, inputs, quantType, train_flag); - } else if (op_type == "GatherNd") { - return NewPrimitiveC<GatherNd>(prim, inputs, quantType); - } else if (op_type == "Square") { - return NewPrimitiveC<Square>(prim, inputs, quantType); - } else if (op_type == "Sqrt") { - return NewPrimitiveC<Sqrt>(prim, inputs, quantType); - } else if (op_type == "Greater") { - return NewPrimitiveC<Greater>(prim, inputs, quantType); - } else if (op_type == "Switch") { - return NewPrimitiveC<Switch>(prim, inputs, quantType); - } else if (op_type == "Partial") { - return NewPrimitiveC<Partial>(prim, inputs, quantType); - } else if (op_type == "Merge") { - return NewPrimitiveC<Merge>(prim, inputs, quantType); - } else if (op_type == "LayerNorm") { - return NewPrimitiveC<LayerNorm>(prim, inputs, quantType); - } else if (op_type == "ArgMax" || op_type == "ArgMaxWithValue") { - return NewPrimitiveC<ArgMax>(prim, inputs, quantType); - } else if (op_type == "Gelu") { - return NewPrimitiveC<GeLU>(prim, inputs, quantType); - } else if (op_type == "SoftmaxCrossEntropyWithLogits") { - return NewPrimitiveC<SoftmaxCrossEntropy>(prim, inputs, quantType); - } else if (op_type == "SparseSoftmaxCrossEntropyWithLogits") { - return NewPrimitiveC<SparseSoftmaxCrossEntropy>(prim, inputs, quantType); - } else if (op_type == "BiasAddGrad") { - return NewPrimitiveC<BiasGrad>(prim, inputs, quantType); - } else if (op_type == "ApplyMomentum") { - return NewPrimitiveC<ApplyMomentum>(prim, inputs, quantType); - } else if (op_type == "Depend") { - return NewPrimitiveC<Depend>(prim, inputs, quantType); - } else if (op_type == "ControlDepend") { - return NewPrimitiveC<ControlDepend>(prim, inputs, quantType); - } else if ((op_type == "ReluGrad" || op_type == "ReLU6Grad" || op_type == "SigmoidGrad" || - op_type == "HSigmoidGrad" || op_type == "HSwishGrad")) { - return NewPrimitiveC<ActivationGrad>(prim, inputs, quantType); - } else if ((op_type == "MaxPoolGrad") || (op_type == "AvgPoolGrad") || (op_type == "AvgPoolGradGpu") || - (op_type == "AvgPoolGradCpu")) { - return NewPrimitiveC<PoolingGrad>(prim, inputs, quantType); - } else if (op_type == "Conv2DBackpropFilter") { - return NewPrimitiveC<Conv2DGradFilter>(prim, inputs, quantType); - } else if (op_type == "Conv2DBackpropInput" && train_flag) { - return NewPrimitiveC<Conv2DGradInput>(prim, inputs, quantType); - } else if ((op_type == "BatchNormGrad") || (op_type == "FusedBatchNormGradEx")) { - return NewPrimitiveC<BNGrad>(prim, inputs, quantType); - } else if (op_type == "FlattenGrad") { - return NewPrimitiveC<FlattenGrad>(prim, inputs, quantType); - } else if ((op_type == "FusedBatchNormGrad") || (op_type == "FusedBatchNormGradCpu")) { - return NewPrimitiveC<BNGrad>(prim, inputs, quantType); - } else if (op_type == "PowerGrad") { - return NewPrimitiveC<PowerGrad>(prim, inputs, quantType); - } else if (op_type == "SGD") { - return NewPrimitiveC<Sgd>(prim, inputs, quantType); - } else if (op_type == "Adam") { - return NewPrimitiveC<Adam>(prim, inputs, quantType); - } else if (op_type == "Assign") { - return NewPrimitiveC<Assign>(prim, inputs, quantType); - } else if (op_type == "DropoutGrad") { - return NewPrimitiveC<DropoutGrad>(prim, inputs, quantType); - } else if (op_type == "MaximumGrad") { - return NewPrimitiveC<MaximumGrad>(prim, inputs, quantType); - } else if (op_type == "MinimumGrad") { - return NewPrimitiveC<MinimumGrad>(prim, inputs, quantType); - } else if (op_type == "AssignAdd") { - return NewPrimitiveC<AssignAdd>(prim, inputs, quantType); - } else if (op_type == "BinaryCrossEntropy") { - return NewPrimitiveC<BinaryCrossEntropy>(prim, inputs, quantType); - } else if (op_type == "BinaryCrossEntropyGrad") { - return NewPrimitiveC<BinaryCrossEntropyGrad>(prim, inputs, quantType); - } else if (op_type == "SmoothL1Loss") { - return NewPrimitiveC<SmoothL1Loss>(prim, inputs, quantType); - } else if (op_type == "SmoothL1LossGrad") { - return NewPrimitiveC<SmoothL1LossGrad>(prim, inputs, quantType); - } else if (op_type == "SigmoidCrossEntropyWithLogits") { - return NewPrimitiveC<SigmoidCrossEntropyWithLogits>(prim, inputs, quantType); - } else if (op_type == "SigmoidCrossEntropyWithLogitsGrad") { - return NewPrimitiveC<SigmoidCrossEntropyWithLogitsGrad>(prim, inputs, quantType); - } else if (op_type == "Pad") { - return NewPrimitiveC<Pad>(prim, inputs, quantType); - } else if (op_type == "StridedSliceGrad") { - return NewPrimitiveC<StridedSliceGrad>(prim, inputs, quantType); - } else if (op_type == "AbsGrad") { - return NewPrimitiveC<AbsGrad>(prim, inputs, quantType); - } else if (op_type == "Conv2DBackpropInput" && !train_flag) { - return NewPrimitiveC<DeConv2D>(prim, inputs, quantType); - } else { - MS_LOG(ERROR) << "Unsupported primitive type in Create : " << op_type; - return nullptr; - } -} - -PrimitiveC *PrimitiveC::Create(mindspore::schema::PrimitiveT *primitive) { - MS_ASSERT(primitive != nullptr); - auto op_type = primitive->value.type; - switch (op_type) { - case schema::PrimitiveType_SoftMax: - return new (std::nothrow) SoftMax(primitive); - case schema::PrimitiveType_Activation: - return new (std::nothrow) Activation(primitive); - case schema::PrimitiveType_Conv2D: - return new (std::nothrow) Conv2D(primitive); - case schema::PrimitiveType_DeConv2D: - return new (std::nothrow) DeConv2D(primitive); - case schema::PrimitiveType_Reduce: - return new (std::nothrow) Reduce(primitive); - case schema::PrimitiveType_Pooling: - return new (std::nothrow) Pooling(primitive); - case schema::PrimitiveType_ROIPooling: - return new (std::nothrow) ROIPooling(primitive); - case schema::PrimitiveType_DepthwiseConv2D: - return new (std::nothrow) DepthwiseConv2D(primitive); - case schema::PrimitiveType_FusedBatchNorm: - return new (std::nothrow) FusedBatchNorm(primitive); - case schema::PrimitiveType_BatchNorm: - return new (std::nothrow) BatchNorm(primitive); - case schema::PrimitiveType_FullConnection: - return new (std::nothrow) FullConnection(primitive); - case schema::PrimitiveType_Power: - return new (std::nothrow) Power(primitive); - case schema::PrimitiveType_Pad: - return new (std::nothrow) Pad(primitive); - case schema::PrimitiveType_Range: - return new (std::nothrow) Range(primitive); - case schema::PrimitiveType_Mul: - return new (std::nothrow) Mul(primitive); - case schema::PrimitiveType_Add: - return new (std::nothrow) Add(primitive); - case schema::PrimitiveType_Sub: - return new (std::nothrow) Sub(primitive); - case schema::PrimitiveType_Div: - return new (std::nothrow) Div(primitive); - case schema::PrimitiveType_BiasAdd: - return new (std::nothrow) BiasAdd(primitive); - case schema::PrimitiveType_ExpandDims: - return new (std::nothrow) ExpandDims(primitive); - case schema::PrimitiveType_ArgMax: - return new (std::nothrow) ArgMax(primitive); - case schema::PrimitiveType_ArgMin: - return new (std::nothrow) ArgMin(primitive); - case schema::PrimitiveType_Cast: - return new (std::nothrow) Cast(primitive); - case schema::PrimitiveType_Reshape: - return new (std::nothrow) Reshape(primitive); - case schema::PrimitiveType_Scale: - return new (std::nothrow) Scale(primitive); - case schema::PrimitiveType_Eltwise: - return new (std::nothrow) Eltwise(primitive); - case schema::PrimitiveType_Ceil: - return new (std::nothrow) Ceil(primitive); - case schema::PrimitiveType_Concat: - return new (std::nothrow) Concat(primitive); - case schema::PrimitiveType_Fill: - return new (std::nothrow) Fill(primitive); - case schema::PrimitiveType_Nhwc2Nchw: - return new (std::nothrow) Nhwc2Nchw(primitive); - case schema::PrimitiveType_Nchw2Nhwc: - return new (std::nothrow) Nchw2Nhwc(primitive); - case schema::PrimitiveType_Transpose: - return new (std::nothrow) Transpose(primitive); - case schema::PrimitiveType_Slice: - return new (std::nothrow) Slice(primitive); - case schema::PrimitiveType_Squeeze: - return new (std::nothrow) Squeeze(primitive); - case schema::PrimitiveType_Flatten: - return new (std::nothrow) Flatten(primitive); - case schema::PrimitiveType_Stack: - return new (std::nothrow) Stack(primitive); - case schema::PrimitiveType_Crop: - return new (std::nothrow) Crop(primitive); - case schema::PrimitiveType_SquaredDifference: - return new (std::nothrow) SquaredDifference(primitive); - case schema::PrimitiveType_AddN: - return new (std::nothrow) AddN(primitive); - case schema::PrimitiveType_Abs: - return new (std::nothrow) Abs(primitive); - case schema::PrimitiveType_Sin: - return new (std::nothrow) Sin(primitive); - case schema::PrimitiveType_Cos: - return new (std::nothrow) Cos(primitive); - case schema::PrimitiveType_Log: - return new (std::nothrow) Log(primitive); - case schema::PrimitiveType_Sqrt: - return new (std::nothrow) Sqrt(primitive); - case schema::PrimitiveType_Rsqrt: - return new (std::nothrow) Rsqrt(primitive); - case schema::PrimitiveType_Square: - return new (std::nothrow) Square(primitive); - case schema::PrimitiveType_Exp: - return new (std::nothrow) Exp(primitive); - case schema::PrimitiveType_Gather: - return new (std::nothrow) Gather(primitive); - case schema::PrimitiveType_GatherNd: - return new (std::nothrow) GatherNd(primitive); - case schema::PrimitiveType_LocalResponseNormalization: - return new (std::nothrow) LocalResponseNormalization(primitive); - case schema::PrimitiveType_Maximum: - return new (std::nothrow) Maximum(primitive); - case schema::PrimitiveType_Minimum: - return new (std::nothrow) Minimum(primitive); - case schema::PrimitiveType_StridedSlice: - return new (std::nothrow) StridedSlice(primitive); - case schema::PrimitiveType_LeakyReLU: - return new (std::nothrow) LeakyReLU(primitive); - case schema::PrimitiveType_PReLU: - return new (std::nothrow) PReLU(primitive); - case schema::PrimitiveType_Round: - return new (std::nothrow) Round(primitive); - case schema::PrimitiveType_Reverse: - return new (std::nothrow) Reverse(primitive); - case schema::PrimitiveType_ReverseSequence: - return new (std::nothrow) ReverseSequence(primitive); - case schema::PrimitiveType_LogicalAnd: - return new (std::nothrow) LogicalAnd(primitive); - case schema::PrimitiveType_LogicalOr: - return new (std::nothrow) LogicalOr(primitive); - case schema::PrimitiveType_LogicalNot: - return new (std::nothrow) LogicalNot(primitive); - case schema::PrimitiveType_FloorDiv: - return new (std::nothrow) FloorDiv(primitive); - case schema::PrimitiveType_FloorMod: - return new (std::nothrow) FloorMod(primitive); - case schema::PrimitiveType_Mod: - return new (std::nothrow) Mod(primitive); - case schema::PrimitiveType_Equal: - return new (std::nothrow) Equal(primitive); - case schema::PrimitiveType_NotEqual: - return new (std::nothrow) NotEqual(primitive); - case schema::PrimitiveType_Less: - return new (std::nothrow) Less(primitive); - case schema::PrimitiveType_LessEqual: - return new (std::nothrow) LessEqual(primitive); - case schema::PrimitiveType_Greater: - return new (std::nothrow) Greater(primitive); - case schema::PrimitiveType_GreaterEqual: - return new (std::nothrow) GreaterEqual(primitive); - case schema::PrimitiveType_Floor: - return new (std::nothrow) Floor(primitive); - case schema::PrimitiveType_Split: - return new (std::nothrow) Split(primitive); - case schema::PrimitiveType_OneHot: - return new (std::nothrow) OneHot(primitive); - case schema::PrimitiveType_PriorBox: - return new (std::nothrow) PriorBox(primitive); - case schema::PrimitiveType_SpaceToDepth: - return new (std::nothrow) SpaceToDepth(primitive); - case schema::PrimitiveType_Tile: - return new (std::nothrow) Tile(primitive); - case schema::PrimitiveType_Resize: - return new (std::nothrow) Resize(primitive); - case schema::PrimitiveType_Unstack: - return new (std::nothrow) Unstack(primitive); - case schema::PrimitiveType_Unique: - return new (std::nothrow) Unique(primitive); - case schema::PrimitiveType_TopK: - return new (std::nothrow) TopK(primitive); - case schema::PrimitiveType_MatMul: - return new (std::nothrow) MatMul(primitive); - case schema::PrimitiveType_QuantDTypeCast: - return new (std::nothrow) QuantDTypeCast(primitive); - case schema::PrimitiveType_EmbeddingLookup: - return new (std::nothrow) EmbeddingLookup(primitive); - case schema::PrimitiveType_Elu: - return new (std::nothrow) Elu(primitive); - case schema::PrimitiveType_DeDepthwiseConv2D: - return new (std::nothrow) DeDepthwiseConv2D(primitive); - case schema::PrimitiveType_Shape: - return new (std::nothrow) Shape(primitive); - case schema::PrimitiveType_Unsqueeze: - return new (std::nothrow) Unsqueeze(primitive); - case schema::PrimitiveType_BatchToSpace: - case schema::PrimitiveType_BatchToSpaceND: - return new (std::nothrow) BatchToSpace(primitive); - case schema::PrimitiveType_SpaceToBatch: - return new (std::nothrow) SpaceToBatch(primitive); - case schema::PrimitiveType_SpaceToBatchND: - return new (std::nothrow) SpaceToBatchND(primitive); - case schema::PrimitiveType_BroadcastTo: - return new (std::nothrow) BroadcastTo(primitive); - case schema::PrimitiveType_DepthToSpace: - return new (std::nothrow) DepthToSpace(primitive); - case schema::PrimitiveType_Lstm: - return new (std::nothrow) Lstm(primitive); - case schema::PrimitiveType_ZerosLike: - return new (std::nothrow) ZerosLike(primitive); - case schema::PrimitiveType_MakeTuple: - return new (std::nothrow) MakeTuple(primitive); - case schema::PrimitiveType_Where: - return new (std::nothrow) Where(primitive); - case schema::PrimitiveType_ScatterND: - return new (std::nothrow) ScatterND(primitive); - case schema::PrimitiveType_ConstantOfShape: - return new (std::nothrow) ConstantOfShape(primitive); - case schema::PrimitiveType_L2Norm: - return new (std::nothrow) L2Norm(primitive); - case schema::PrimitiveType_SparseToDense: - return new (std::nothrow) SparseToDense(primitive); - case schema::PrimitiveType_DetectionPostProcess: - return new (std::nothrow) DetectionPostProcess(primitive); - case schema::PrimitiveType_Dropout: - return new (std::nothrow) Dropout(primitive); - case schema::PrimitiveType_Neg: - return new (std::nothrow) Neg(primitive); - case schema::PrimitiveType_RealDiv: - return new (std::nothrow) RealDiv(primitive); - case schema::PrimitiveType_LshProjection: - return new (std::nothrow) LshProjection(primitive); - case schema::PrimitiveType_HashtableLookup: - return new (std::nothrow) HashtableLookup(primitive); - case schema::PrimitiveType_SkipGram: - return new (std::nothrow) SkipGram(primitive); - case schema::PrimitiveType_Clip: - return new (std::nothrow) Clip(primitive); - case schema::PrimitiveType_Adder: - return new (std::nothrow) Adder(primitive); - case schema::PrimitiveType_CustomPredict: - return new (std::nothrow) CustomPredict(primitive); - case schema::PrimitiveType_CustomNormalize: - return new (std::nothrow) CustomNormalize(primitive); - case schema::PrimitiveType_CustomExtractFeatures: - return new (std::nothrow) CustomExtractFeatures(primitive); - case schema::PrimitiveType_Upsample: - return new (std::nothrow) Upsample(primitive); - case schema::PrimitiveType_LayerNorm: - return new (std::nothrow) LayerNorm(primitive); - case schema::PrimitiveType_NonMaxSuppression: - return new (std::nothrow) NonMaxSuppression(primitive); - case schema::PrimitiveType_Identity: - return new (std::nothrow) Identity(primitive); - case schema::PrimitiveType_Rfft: - return new (std::nothrow) Rfft(primitive); - case schema::PrimitiveType_FftReal: - return new (std::nothrow) FftReal(primitive); - case schema::PrimitiveType_FftImag: - return new (std::nothrow) FftImag(primitive); - case schema::PrimitiveType_AudioSpectrogram: - return new (std::nothrow) AudioSpectrogram(primitive); - case schema::PrimitiveType_Mfcc: - return new (std::nothrow) Mfcc(primitive); - case schema::PrimitiveType_InstanceNorm: - return new (std::nothrow) InstanceNorm(primitive); - case schema::PrimitiveType_While: - return new (std::nothrow) While(primitive); - case schema::PrimitiveType_OnnxInt8Quantize: - return new (std::nothrow) Quant(primitive); - case schema::PrimitiveType_OnnxInt8Dequantize: - return new (std::nothrow) Dequant(primitive); - case schema::PrimitiveType_Reciprocal: - return new (std::nothrow) Reciprocal(primitive); - case schema::PrimitiveType_Constant: - return new (std::nothrow) Constant(primitive); - case schema::PrimitiveType_TensorListFromTensor: - return new (std::nothrow) TensorListFromTensor(primitive); - case schema::PrimitiveType_TensorListGetItem: - return new (std::nothrow) TensorListGetItem(primitive); - case schema::PrimitiveType_TensorListSetItem: - return new (std::nothrow) TensorListSetItem(primitive); - case schema::PrimitiveType_TensorListReserve: - return new (std::nothrow) TensorListReserve(primitive); - case schema::PrimitiveType_TensorListStack: - return new (std::nothrow) TensorListStack(primitive); - case schema::PrimitiveType_Switch: - return new (std::nothrow) Switch(primitive); - case schema::PrimitiveType_Merge: - return new (std::nothrow) Merge(primitive); - case schema::PrimitiveType_Partial: - return new (std::nothrow) Partial(primitive); - case schema::PrimitiveType_Assert: - return new (std::nothrow) AssertOP(primitive); - case schema::PrimitiveType_GeLU: - return new (std::nothrow) GeLU(primitive); - case schema::PrimitiveType_If: - return new (std::nothrow) If(primitive); - case schema::PrimitiveType_Select: - return new (std::nothrow) Select(primitive); - case schema::PrimitiveType_Gru: - return new (std::nothrow) Gru(primitive); - case schema::PrimitiveType_Size: - return new (std::nothrow) Size(primitive); - case schema::PrimitiveType_InvertPermutation: - return new (std::nothrow) InvertPermutation(primitive); - case schema::PrimitiveType_RandomStandardNormal: - return new (std::nothrow) RandomStandardNormal(primitive); - case schema::PrimitiveType_CropAndResize: - return new (std::nothrow) CropAndResize(primitive); - case schema::PrimitiveType_NonZero: - return new (std::nothrow) NonZero(primitive); - case schema::PrimitiveType_Erf: - return new (std::nothrow) Erf(primitive); - case schema::PrimitiveType_IsFinite: - return new (std::nothrow) IsFinite(primitive); - case schema::PrimitiveType_LinSpace: - return new (std::nothrow) LinSpace(primitive); - case schema::PrimitiveType_UniformReal: - return new (std::nothrow) UniformReal(primitive); - case schema::PrimitiveType_Rank: - return new (std::nothrow) Rank(primitive); - case schema::PrimitiveType_ActivationGrad: - return new (std::nothrow) ActivationGrad(primitive); - case schema::PrimitiveType_PoolingGrad: - return new (std::nothrow) PoolingGrad(primitive); - case schema::PrimitiveType_Conv2DGradFilter: - return new (std::nothrow) Conv2DGradFilter(primitive); - case schema::PrimitiveType_Conv2DGradInput: - return new (std::nothrow) Conv2DGradInput(primitive); - case schema::PrimitiveType_GroupConv2DGradInput: - return new (std::nothrow) GroupConv2DGradInput(primitive); - case schema::PrimitiveType_BiasGrad: - return new (std::nothrow) BiasGrad(primitive); - case schema::PrimitiveType_ApplyMomentum: - return new (std::nothrow) ApplyMomentum(primitive); - case schema::PrimitiveType_BNGrad: - return new (std::nothrow) BNGrad(primitive); - case schema::PrimitiveType_AddGrad: - return new (std::nothrow) ArithmeticGrad(primitive); - case schema::PrimitiveType_SubGrad: - return new (std::nothrow) ArithmeticGrad(primitive); - case schema::PrimitiveType_MulGrad: - return new (std::nothrow) ArithmeticGrad(primitive); - case schema::PrimitiveType_DivGrad: - return new (std::nothrow) ArithmeticGrad(primitive); - case schema::PrimitiveType_SoftmaxCrossEntropy: - return new (std::nothrow) SoftmaxCrossEntropy(primitive); - case schema::PrimitiveType_SparseSoftmaxCrossEntropy: - return new (std::nothrow) SparseSoftmaxCrossEntropy(primitive); - case schema::PrimitiveType_PowerGrad: - return new (std::nothrow) PowerGrad(primitive); - case schema::PrimitiveType_Depend: - return new (std::nothrow) Depend(primitive); - case schema::PrimitiveType_ControlDepend: - return new (std::nothrow) ControlDepend(primitive); - case schema::PrimitiveType_FlattenGrad: - return new (std::nothrow) FlattenGrad(primitive); - case schema::PrimitiveType_NegGrad: - return new (std::nothrow) NegGrad(primitive); - case schema::PrimitiveType_LogGrad: - return new (std::nothrow) LogGrad(primitive); - case schema::PrimitiveType_AbsGrad: - return new (std::nothrow) AbsGrad(primitive); - case schema::PrimitiveType_Sgd: - return new (std::nothrow) Sgd(primitive); - case schema::PrimitiveType_Adam: - return new (std::nothrow) Adam(primitive); - case schema::PrimitiveType_Assign: - return new (std::nothrow) Assign(primitive); - case schema::PrimitiveType_AssignAdd: - return new (std::nothrow) AssignAdd(primitive); - case schema::PrimitiveType_OnesLike: - return new (std::nothrow) OnesLike(primitive); - case schema::PrimitiveType_UnsortedSegmentSum: - return new (std::nothrow) UnsortedSegmentSum(primitive); - case schema::PrimitiveType_BinaryCrossEntropyGrad: - return new (std::nothrow) BinaryCrossEntropyGrad(primitive); - case schema::PrimitiveType_BinaryCrossEntropy: - return new (std::nothrow) BinaryCrossEntropy(primitive); - case schema::PrimitiveType_DropoutGrad: - return new (std::nothrow) DropoutGrad(primitive); - case schema::PrimitiveType_MaximumGrad: - return new (std::nothrow) MaximumGrad(primitive); - case schema::PrimitiveType_MinimumGrad: - return new (std::nothrow) MinimumGrad(primitive); - case schema::PrimitiveType_SmoothL1Loss: - return new (std::nothrow) SmoothL1Loss(primitive); - case schema::PrimitiveType_SmoothL1LossGrad: - return new (std::nothrow) SmoothL1LossGrad(primitive); - case schema::PrimitiveType_SigmoidCrossEntropyWithLogits: - return new (std::nothrow) SigmoidCrossEntropyWithLogits(primitive); - case schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad: - return new (std::nothrow) SigmoidCrossEntropyWithLogitsGrad(primitive); - case schema::PrimitiveType_StridedSliceGrad: - return new (std::nothrow) StridedSliceGrad(primitive); - default: - MS_LOG(ERROR) << "Unsupported primitive type in Create : " << schema::EnumNamePrimitiveType(op_type); - break; - } - return nullptr; -} - -#else -void PrimitiveC::set_quant_type(schema::QuantType quant_type) { this->quant_type_ = quant_type; } -schema::QuantType PrimitiveC::quant_type() const { return quant_type_; } -#endif - -int PrimitiveC::Type() const { - if (this->primitive_ == nullptr && this->op_type_ == OP_TYPE_NOT_SET) { - return schema::PrimitiveType_NONE; - } -#ifdef PRIMITIVE_WRITEABLE - if (op_type_ != OP_TYPE_NOT_SET) { - return op_type_; - } - return this->primitive_->value.type; -#else - return this->primitive_->value_type(); -#endif -} -bool PrimitiveC::infer_flag() const { return this->infer_flag_; } - -void PrimitiveC::set_infer_flag(bool flag) { this->infer_flag_ = flag; } - -bool PrimitiveC::train_flag() const { return this->train_flag_; } - -void PrimitiveC::set_train_flag(bool flag) { this->train_flag_ = flag; } - -int PrimitiveC::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - auto input = inputs.front(); - MS_ASSERT(input != nullptr); - auto output = outputs.front(); - MS_ASSERT(output != nullptr); - output->set_shape(input->shape()); - output->set_data_type(input->data_type()); - output->set_format(input->format()); - return 0; -} - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/primitive_c.h b/mindspore/lite/src/ops/primitive_c.h deleted file mode 100644 index 469992a7b9..0000000000 --- a/mindspore/lite/src/ops/primitive_c.h +++ /dev/null @@ -1,259 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_PRIMITIVE_C_H_ -#define MINDSPORE_LITE_SRC_OPS_PRIMITIVE_C_H_ -#include <string> -#include <set> -#include <vector> -#include <memory> -#include <map> -#ifdef PRIMITIVE_WRITEABLE -#include "ir/primitive.h" -#include "schema/inner/model_generated.h" -#include "schema/inner/ops_generated.h" -#include "schema/ops_generated.h" -#include "tools/converter/ops/ops_def.h" -#else -#include "schema/model_generated.h" -#endif -#include "nnacl/op_base.h" -#include "src/tensor.h" -#include "include/errorcode.h" -#include "src/common/log_adapter.h" - -namespace mindspore { -namespace lite { -constexpr const int OP_TYPE_NOT_SET = -1; -constexpr uint32_t kSingleNum = 1; -constexpr uint32_t kDoubleNum = 2; -constexpr uint32_t kTripleNum = 3; -constexpr uint32_t kQuadrupleNum = 4; - -const std::set<int> kSupportDataType = {kNumberTypeBool, kNumberTypeUInt8, kNumberTypeInt8, - kNumberTypeInt32, kNumberTypeFloat32, kNumberTypeFloat16}; - -#ifdef PRIMITIVE_WRITEABLE -using TensorPtr = std::shared_ptr<mindspore::tensor::Tensor>; -constexpr int kAnfPopulaterInputNumOne = 1; -constexpr int kAnfPopulaterInputNumTwo = 2; -constexpr int kAnfPopulaterInputNumThree = 3; -static std::map<std::string, schema::ActivationType> kActivationTypeMap{ - {"ReLU", schema::ActivationType_RELU}, - {"ReLU6", schema::ActivationType_RELU6}, - {"Sigmoid", schema::ActivationType_SIGMOID}, - {"HSwish", schema::ActivationType_HSWISH}, - {"HSigmoid", schema::ActivationType_HSIGMOID}, - {"Swish", schema::ActivationType_SWISH}, - {"LeakyRelu", schema::ActivationType_LEAKY_RELU}, - {"Tanh", schema::ActivationType_TANH}, - {"Logistic", schema::ActivationType_SIGMOID}}; -std::vector<int> CastToInt(const ValuePtr &value); -class PrimitiveC : public mindspore::Primitive { - public: - // Argument primitive is deliverd into PrimitiveC and will be deleted in ~PrimitiveC(). - // Caller should not delete primitive. - explicit PrimitiveC(schema::PrimitiveT *primitive) : Primitive(""), primitive_(primitive) {} - - explicit PrimitiveC(const Primitive &prim) : Primitive(prim) {} - - // Argument primitive is deliverd into PrimitiveC and will be deleted in ~PrimitiveC(). - // Caller should not delete primitive. - PrimitiveC(const std::string &name, schema::PrimitiveT *primitive) : Primitive(name), primitive_(primitive) {} - - PrimitiveC() : Primitive(""), primitive_(nullptr) {} - - MS_DECLARE_PARENT(PrimitiveC, Primitive); - - ~PrimitiveC() override { delete this->primitive_; } - - int Type() const; - - schema::PrimitiveT *primitiveT() const; - - void ClearPrimitiveT(); - - bool operator==(const Value &rhs) const override { - if (rhs.isa<PrimitiveC>()) { - auto other_prim = dynamic_cast<const PrimitiveC &>(rhs); - auto a = this->primitive_->value.type; - auto b = other_prim.primitive_->value.type; - return a == b; - } else { - return false; - } - } - - void set_input_quant_params(const std::vector<std::vector<schema::QuantParamT>> &input_quant_param); - - void set_input_quant_param(const size_t &index, const std::vector<schema::QuantParamT> &input_quant_param); - - void set_output_quant_params(const std::vector<std::vector<schema::QuantParamT>> &output_quant_param); - - void set_output_quant_param(const size_t &index, const std::vector<schema::QuantParamT> &output_quant_param); - - bool IsInputQuantParamsInited(); - - bool IsOutputQuantParamsInited(); - - void ClearInputOutputQuantParam(); - - void AddInputQuantParam(const std::vector<schema::QuantParamT> &quant_param); - - std::vector<std::vector<schema::QuantParamT>> input_quant_params() const; - - void AddOutputQuantParam(const std::vector<schema::QuantParamT> &quant_param); - - std::vector<std::vector<schema::QuantParamT>> output_quant_params() const; - - void set_quant_type(const schema::QuantType &quant_type); - - schema::QuantType quant_type() const; - - bool enable_huffman_code() const; - - void set_enable_huffman_code(bool enable_huffman_code); - - virtual int InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs); - - bool infer_flag() const; - - void set_infer_flag(bool flag); - - bool train_flag() const; - - void set_train_flag(bool flag); - - static PrimitiveC *Create(mindspore::schema::Primitive *primitive) { return Create(primitive->UnPack()); } - - static PrimitiveC *Create(mindspore::schema::PrimitiveT *primitive); - - static void GetAttrDataFromInput(const AnfNodePtr &inputNode, std::vector<int> *data); - - static std::shared_ptr<PrimitiveC> Create(const Primitive &prim, const std::vector<AnfNodePtr> &inputs, - const schema::QuantType &quantType, bool train_flag = false); - void PopulaterQuantParam(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); - void FillDefaultInputQuantParamIfNeed(const size_t &inputSize); - void PopulaterInputQuantParam(const Primitive &prim, const std::vector<AnfNodePtr> &inputs, - bool narrowRangeQuantParam, int32_t numbitsRangeQuantParam); - void PopulaterOutputQuantParam(const Primitive &prim, bool narrowRangeQuantParam, int32_t numbitsRangeQuantParam); - static void CalFloatScopeByMeanAndStddev(const double &mean, const double &stdDev, float *mMin, float *mMax); - - protected: - virtual int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { return RET_ERROR; } - - protected: - schema::PrimitiveT *primitive_ = nullptr; - std::vector<std::vector<schema::QuantParamT>> input_quant_param_; - std::vector<std::vector<schema::QuantParamT>> output_quant_param_; - schema::QuantType quant_type_{schema::QuantType_QUANT_NONE}; - bool infer_flag_ = true; - int op_type_ = OP_TYPE_NOT_SET; - bool enable_huffman_code_ = false; - bool train_flag_ = false; -}; -std::shared_ptr<PrimitiveC> GetReturnPrim(); - -std::shared_ptr<PrimitiveC> GetMakeTuplePrim(); - -std::shared_ptr<PrimitiveC> GetTupleGetItemPrim(); - -#else -class PrimitiveC { - public: - PrimitiveC() = default; - - virtual ~PrimitiveC() { free(this->primitive_buf_); } - - static PrimitiveC *Create(const schema::Primitive *primitive); - - bool infer_flag() const; - - void set_infer_flag(bool flag); - - bool train_flag() const; - - void set_train_flag(bool flag); - - virtual int InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs); - - int Type() const; - - void set_quant_type(schema::QuantType quant_type); - schema::QuantType quant_type() const; - - template <typename T, typename = std::enable_if<std::is_base_of<PrimitiveC, T>::value>> - static PrimitiveC *NewPrimitiveC(const schema::Primitive *primitive) { - auto primc = new (std::nothrow) T(); - if (primc == nullptr) { - MS_LOG(ERROR) << "new PrimitiveC failed"; - return nullptr; - } - auto ret = primc->UnPackSchemaPrimitive(primitive); - if (ret != RET_OK) { - delete primc; - MS_LOG(ERROR) << "UnPackSchemaPrimitive failed"; - return nullptr; - } - return primc; - } - - protected: - int UnPackSchemaPrimitive(const schema::Primitive *primitive) { - flatbuffers::FlatBufferBuilder fbb(1024); - if (UnPackToFlatBuilder(primitive, &fbb) != RET_OK) { - MS_LOG(ERROR) << "UnPackToFlatBuilder failde"; - fbb.Clear(); - return RET_ERROR; - } - auto buf = fbb.GetBufferPointer(); - if (buf == nullptr) { - MS_LOG(ERROR) << "GetBufferPointer return nullptr"; - fbb.Clear(); - return RET_ERROR; - } - primitive_buf_ = reinterpret_cast<char *>(malloc(fbb.GetSize())); - if (primitive_buf_ == nullptr) { - MS_LOG(ERROR) << "malloc primitive_buf_ failed"; - fbb.Clear(); - return RET_ERROR; - } - memcpy(primitive_buf_, buf, fbb.GetSize()); - this->primitive_ = flatbuffers::GetRoot<schema::Primitive>(primitive_buf_); - fbb.Clear(); - return RET_OK; - } - - virtual int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - return RET_ERROR; - } - - protected: - const schema::Primitive *primitive_ = nullptr; - char *primitive_buf_ = nullptr; - bool infer_flag_ = true; - schema::QuantType quant_type_{schema::QuantType_QUANT_NONE}; - int op_type_ = OP_TYPE_NOT_SET; - bool train_flag_ = false; -}; -using PrimitiveCPtr = std::shared_ptr<PrimitiveC>; -typedef PrimitiveC *(*PrimitiveCCreator)(const schema::Primitive *primitive); -#endif -typedef OpParameter *(*ParameterCreator)(const PrimitiveC *primitive); - -} // namespace lite -} // namespace mindspore -#endif // MINDSPORE_LITE_SRC_OPS_PRIMITIVE_C_H_ diff --git a/mindspore/lite/src/ops/prior_box.cc b/mindspore/lite/src/ops/prior_box.cc deleted file mode 100644 index 1d70ad4edf..0000000000 --- a/mindspore/lite/src/ops/prior_box.cc +++ /dev/null @@ -1,165 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/prior_box.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> PriorBox::GetMinSizes() const { return this->primitive_->value.AsPriorBox()->max_sizes; } -std::vector<int> PriorBox::GetMaxSizes() const { return this->primitive_->value.AsPriorBox()->max_sizes; } -std::vector<float> PriorBox::GetAspectRatios() const { return this->primitive_->value.AsPriorBox()->aspect_ratios; } -std::vector<float> PriorBox::GetVariances() const { return this->primitive_->value.AsPriorBox()->variances; } -int PriorBox::GetImageSizeW() const { return this->primitive_->value.AsPriorBox()->image_size_w; } -int PriorBox::GetImageSizeH() const { return this->primitive_->value.AsPriorBox()->image_size_h; } -float PriorBox::GetStepW() const { return this->primitive_->value.AsPriorBox()->step_w; } -float PriorBox::GetStepH() const { return this->primitive_->value.AsPriorBox()->step_h; } -bool PriorBox::GetClip() const { return this->primitive_->value.AsPriorBox()->clip; } -bool PriorBox::GetFlip() const { return this->primitive_->value.AsPriorBox()->flip; } -float PriorBox::GetOffset() const { return this->primitive_->value.AsPriorBox()->offset; } - -void PriorBox::SetMinSizes(const std::vector<int> &min_sizes) { - this->primitive_->value.AsPriorBox()->min_sizes = min_sizes; -} -void PriorBox::SetMaxSizes(const std::vector<int> &max_sizes) { - this->primitive_->value.AsPriorBox()->max_sizes = max_sizes; -} -void PriorBox::SetAspectRatios(const std::vector<float> &aspect_ratios) { - this->primitive_->value.AsPriorBox()->aspect_ratios = aspect_ratios; -} -void PriorBox::SetVariances(const std::vector<float> &variances) { - this->primitive_->value.AsPriorBox()->variances = variances; -} -void PriorBox::SetImageSizeW(int image_size_w) { this->primitive_->value.AsPriorBox()->image_size_w = image_size_w; } -void PriorBox::SetImageSizeH(int image_size_h) { this->primitive_->value.AsPriorBox()->image_size_h = image_size_h; } -void PriorBox::SetStepW(float step_w) { this->primitive_->value.AsPriorBox()->step_w = step_w; } -void PriorBox::SetStepH(float step_h) { this->primitive_->value.AsPriorBox()->step_h = step_h; } -void PriorBox::SetClip(bool clip) { this->primitive_->value.AsPriorBox()->clip = clip; } -void PriorBox::SetFlip(bool flip) { this->primitive_->value.AsPriorBox()->flip = flip; } -void PriorBox::SetOffset(float offset) { this->primitive_->value.AsPriorBox()->offset = offset; } - -#else - -std::vector<int> PriorBox::GetMinSizes() const { - auto fb_vector = this->primitive_->value_as_PriorBox()->min_sizes(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -std::vector<int> PriorBox::GetMaxSizes() const { - auto fb_vector = this->primitive_->value_as_PriorBox()->max_sizes(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -std::vector<float> PriorBox::GetAspectRatios() const { - auto fb_vector = this->primitive_->value_as_PriorBox()->aspect_ratios(); - return std::vector<float>(fb_vector->begin(), fb_vector->end()); -} -std::vector<float> PriorBox::GetVariances() const { - auto fb_vector = this->primitive_->value_as_PriorBox()->variances(); - return std::vector<float>(fb_vector->begin(), fb_vector->end()); -} -int PriorBox::GetImageSizeW() const { return this->primitive_->value_as_PriorBox()->image_size_w(); } -int PriorBox::GetImageSizeH() const { return this->primitive_->value_as_PriorBox()->image_size_h(); } -float PriorBox::GetStepW() const { return this->primitive_->value_as_PriorBox()->step_w(); } -float PriorBox::GetStepH() const { return this->primitive_->value_as_PriorBox()->step_h(); } -bool PriorBox::GetClip() const { return this->primitive_->value_as_PriorBox()->clip(); } -bool PriorBox::GetFlip() const { return this->primitive_->value_as_PriorBox()->flip(); } -float PriorBox::GetOffset() const { return this->primitive_->value_as_PriorBox()->offset(); } - -int PriorBox::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_PriorBox(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_PriorBox return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> min_sizes; - if (attr->min_sizes() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->min_sizes()->size()); i++) { - min_sizes.push_back(attr->min_sizes()->data()[i]); - } - } - std::vector<int32_t> max_sizes; - if (attr->max_sizes() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->max_sizes()->size()); i++) { - max_sizes.push_back(attr->max_sizes()->data()[i]); - } - } - std::vector<float> aspect_ratios; - if (attr->aspect_ratios() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->aspect_ratios()->size()); i++) { - aspect_ratios.push_back(attr->aspect_ratios()->data()[i]); - } - } - std::vector<float> variances; - if (attr->variances() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->variances()->size()); i++) { - variances.push_back(attr->variances()->data()[i]); - } - } - auto val_offset = schema::CreatePriorBoxDirect(*fbb, &min_sizes, &max_sizes, &aspect_ratios, &variances); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_PriorBox, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *PriorBoxCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<PriorBox>(primitive); -} -Registry PriorBoxRegistry(schema::PrimitiveType_PriorBox, PriorBoxCreator); -#endif - -namespace { -constexpr int kPriorBoxPoints = 4; -constexpr int kPriorBoxN = 1; -constexpr int kPriorBoxW = 1; -constexpr int kPriorBoxC = 2; -} // namespace -int PriorBox::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.at(0); - MS_ASSERT(input != nullptr); - auto output = outputs_.at(0); - MS_ASSERT(output != nullptr); - output->set_data_type(kNumberTypeFloat32); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - std::vector<float> different_aspect_ratios{1.0f}; - auto aspect_ratios = GetAspectRatios(); - for (size_t i = 0; i < aspect_ratios.size(); i++) { - float ratio = aspect_ratios[i]; - bool exist = std::any_of(different_aspect_ratios.begin(), different_aspect_ratios.end(), - [&](float v) { return abs(ratio - v) < 1e-6; }); - if (!exist) { - different_aspect_ratios.emplace_back(ratio); - if (GetFlip()) { - different_aspect_ratios.emplace_back(1.0f / ratio); - } - } - } - int32_t num_priors_box = GetMinSizes().size() * different_aspect_ratios.size() + GetMaxSizes().size(); - int32_t h = input->Height() * input->Width() * num_priors_box * kPriorBoxPoints; - std::vector<int> output_shape{kPriorBoxN, h, kPriorBoxW, kPriorBoxC}; - output->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/prior_box.h b/mindspore/lite/src/ops/prior_box.h deleted file mode 100644 index 4976ea425f..0000000000 --- a/mindspore/lite/src/ops/prior_box.h +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_PRIOR_BOX_H_ -#define LITE_MINDSPORE_LITE_C_OPS_PRIOR_BOX_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class PriorBox : public PrimitiveC { - public: - PriorBox() = default; - ~PriorBox() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(PriorBox, PrimitiveC); - explicit PriorBox(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetMinSizes(const std::vector<int> &min_sizes); - void SetMaxSizes(const std::vector<int> &max_sizes); - void SetAspectRatios(const std::vector<float> &aspect_ratios); - void SetVariances(const std::vector<float> &variances); - void SetImageSizeW(int image_size_w); - void SetImageSizeH(int image_size_h); - void SetStepW(float step_w); - void SetStepH(float step_h); - void SetClip(bool clip); - void SetFlip(bool flip); - void SetOffset(float offset); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - std::vector<int> GetMinSizes() const; - std::vector<int> GetMaxSizes() const; - std::vector<float> GetAspectRatios() const; - std::vector<float> GetVariances() const; - int GetImageSizeW() const; - int GetImageSizeH() const; - float GetStepW() const; - float GetStepH() const; - bool GetClip() const; - bool GetFlip() const; - float GetOffset() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_PRIOR_BOX_H_ diff --git a/mindspore/lite/src/ops/quant.cc b/mindspore/lite/src/ops/quant.cc deleted file mode 100644 index 9df5c609bb..0000000000 --- a/mindspore/lite/src/ops/quant.cc +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/quant.h" -#include <vector> -#include <memory> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Quant::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_OnnxInt8Quantize; - } - if (this->primitive_->value.type != schema::PrimitiveType_OnnxInt8Quantize) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::OnnxInt8QuantizeT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/quant.h b/mindspore/lite/src/ops/quant.h deleted file mode 100644 index dd854768cf..0000000000 --- a/mindspore/lite/src/ops/quant.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_SRC_OPS_QUANT_H_ -#define LITE_MINDSPORE_LITE_SRC_OPS_QUANT_H_ -#include <vector> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Quant : public PrimitiveC { - public: - Quant() = default; - ~Quant() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Quant, PrimitiveC); - explicit Quant(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_SRC_OPS_QUANT_H_ diff --git a/mindspore/lite/src/ops/quant_dtype_cast.cc b/mindspore/lite/src/ops/quant_dtype_cast.cc deleted file mode 100644 index e7fa5a97c1..0000000000 --- a/mindspore/lite/src/ops/quant_dtype_cast.cc +++ /dev/null @@ -1,72 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/quant_dtype_cast.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int QuantDTypeCast::GetSrcT() const { return this->primitive_->value.AsQuantDTypeCast()->srcT; } -int QuantDTypeCast::GetDstT() const { return this->primitive_->value.AsQuantDTypeCast()->dstT; } - -void QuantDTypeCast::SetSrcT(int src_t) { this->primitive_->value.AsQuantDTypeCast()->srcT = src_t; } -void QuantDTypeCast::SetDstT(int dst_t) { this->primitive_->value.AsQuantDTypeCast()->dstT = dst_t; } - -#else - -int QuantDTypeCast::GetSrcT() const { return this->primitive_->value_as_QuantDTypeCast()->srcT(); } -int QuantDTypeCast::GetDstT() const { return this->primitive_->value_as_QuantDTypeCast()->dstT(); } -int QuantDTypeCast::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_QuantDTypeCast(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_QuantDTypeCast return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateQuantDTypeCast(*fbb, attr->srcT(), attr->dstT()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_QuantDTypeCast, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *QuantDTypeCastCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<QuantDTypeCast>(primitive); -} -Registry QuantDTypeCastRegistry(schema::PrimitiveType_QuantDTypeCast, QuantDTypeCastCreator); -#endif - -int QuantDTypeCast::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - MS_ASSERT(input->data_type() == this->GetSrcT()); - output->set_data_type(static_cast<TypeId>(GetDstT())); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - output->set_shape(input->shape()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/quant_dtype_cast.h b/mindspore/lite/src/ops/quant_dtype_cast.h deleted file mode 100644 index ec9f75c18f..0000000000 --- a/mindspore/lite/src/ops/quant_dtype_cast.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_QUANT_D_TYPE_CAST_H_ -#define LITE_MINDSPORE_LITE_C_OPS_QUANT_D_TYPE_CAST_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class QuantDTypeCast : public PrimitiveC { - public: - QuantDTypeCast() = default; - ~QuantDTypeCast() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(QuantDTypeCast, PrimitiveC); - explicit QuantDTypeCast(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetSrcT(int src_t); - void SetDstT(int dst_t); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetSrcT() const; - int GetDstT() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_QUANT_D_TYPE_CAST_H_ diff --git a/mindspore/lite/src/ops/random_standard_normal.cc b/mindspore/lite/src/ops/random_standard_normal.cc deleted file mode 100644 index d17b1984ed..0000000000 --- a/mindspore/lite/src/ops/random_standard_normal.cc +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/random_standard_normal.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int RandomStandardNormal::GetSeed() const { return this->primitive_->value.AsRandomStandardNormal()->seed; } - -int RandomStandardNormal::GetSeed2() const { return this->primitive_->value.AsRandomStandardNormal()->seed2; } - -int RandomStandardNormal::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_RandomStandardNormal; - } - if (this->primitive_->value.type != schema::PrimitiveType_RandomStandardNormal) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::RandomStandardNormalT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int RandomStandardNormal::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_RandomStandardNormal(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_RandomStandardNormal return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateRandomStandardNormal(*fbb, attr->seed(), attr->seed2()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_RandomStandardNormal, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -int RandomStandardNormal::GetSeed() const { return this->primitive_->value_as_RandomStandardNormal()->seed(); } - -int RandomStandardNormal::GetSeed2() const { return this->primitive_->value_as_RandomStandardNormal()->seed2(); } - -PrimitiveC *RandomStandardNormalCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<RandomStandardNormal>(primitive); -} -Registry RandomStandardNormalRegistry(schema::PrimitiveType_RandomStandardNormal, RandomStandardNormalCreator); -#endif - -int RandomStandardNormal::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input_data = static_cast<int32_t *>(inputs_[0]->data_c()); - if (input_data == nullptr) { - return RET_INFER_INVALID; - } - auto input_num = inputs_[0]->ElementsNum(); - std::vector<int> output_shape = {}; - for (int i = 0; i < input_num; i++) { - output_shape.push_back(input_data[i]); - } - outputs_[0]->set_shape(output_shape); - outputs_[0]->set_data_type(kNumberTypeFloat32); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/random_standard_normal.h b/mindspore/lite/src/ops/random_standard_normal.h deleted file mode 100644 index 5cd60748aa..0000000000 --- a/mindspore/lite/src/ops/random_standard_normal.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_RANDOM_STANDARD_NORMAL_H_ -#define LITE_MINDSPORE_LITE_C_OPS_RANDOM_STANDARD_NORMAL_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class RandomStandardNormal : public PrimitiveC { - public: - RandomStandardNormal() = default; - ~RandomStandardNormal() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(RandomStandardNormal, PrimitiveC); - explicit RandomStandardNormal(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetSeed() const; - int GetSeed2() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_RANDOM_STANDARD_NORMAL_H_ diff --git a/mindspore/lite/src/ops/range.cc b/mindspore/lite/src/ops/range.cc deleted file mode 100644 index 8014d62cd0..0000000000 --- a/mindspore/lite/src/ops/range.cc +++ /dev/null @@ -1,149 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <algorithm> -#include "src/ops/range.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Range::GetDType() const { return this->primitive_->value.AsRange()->dType; } -int Range::GetStart() const { return this->primitive_->value.AsRange()->start; } -int Range::GetLimit() const { return this->primitive_->value.AsRange()->limit; } -int Range::GetDelta() const { return this->primitive_->value.AsRange()->delta; } - -void Range::SetDType(int d_type) { this->primitive_->value.AsRange()->dType = d_type; } -void Range::SetStart(int start) { this->primitive_->value.AsRange()->start = start; } -void Range::SetLimit(int limit) { this->primitive_->value.AsRange()->limit = limit; } -void Range::SetDelta(int delta) { this->primitive_->value.AsRange()->delta = delta; } -int Range::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Range; - } - if (this->primitive_->value.type != schema::PrimitiveType_Range) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::RangeT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - attr->dType = 0; - if (prim.GetAttr("start") != nullptr) { - attr->start = static_cast<int32_t>(GetValue<float>(prim.GetAttr("start"))); - } - if (prim.GetAttr("limit") != nullptr) { - attr->limit = static_cast<int32_t>(GetValue<float>(prim.GetAttr("limit"))); - } - if (prim.GetAttr("delta") != nullptr) { - attr->delta = static_cast<int32_t>(GetValue<float>(prim.GetAttr("delta"))); - } - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else - -int Range::GetDType() const { return this->primitive_->value_as_Range()->dType(); } -int Range::GetStart() const { return this->primitive_->value_as_Range()->start(); } -int Range::GetLimit() const { return this->primitive_->value_as_Range()->limit(); } -int Range::GetDelta() const { return this->primitive_->value_as_Range()->delta(); } -int Range::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Range(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Range return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateRange(*fbb, attr->dType(), attr->start(), attr->limit(), attr->delta()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Range, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *RangeCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Range>(primitive); } -Registry RangeRegistry(schema::PrimitiveType_Range, RangeCreator); -#endif - -int Range::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - - if (inputs_.size() == 3) { - output->set_data_type(input->data_type()); - } else { - output->set_data_type(mindspore::kNumberTypeInt32); - } - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - int shape_size = 0; - if (inputs_.size() == 3) { - if ((inputs_.at(0)->data_c() == nullptr) || (inputs_.at(1)->data_c() == nullptr) || - (inputs_.at(2)->data_c() == nullptr)) { - return RET_INFER_INVALID; - } - switch (inputs_.at(0)->data_type()) { - case kNumberTypeInt: - case kNumberTypeInt32: { - auto start = *reinterpret_cast<int *>(inputs_.at(0)->data_c()); - auto limit = *reinterpret_cast<int *>(inputs_.at(1)->data_c()); - auto delta = *reinterpret_cast<int *>(inputs_.at(2)->data_c()); - shape_size = std::max(static_cast<int>(std::ceil(static_cast<float>(limit - start) / delta)), 0); - } break; - case kNumberTypeFloat32: - case kNumberTypeFloat: { - auto start = *reinterpret_cast<float *>(inputs_.at(0)->data_c()); - auto limit = *reinterpret_cast<float *>(inputs_.at(1)->data_c()); - auto delta = *reinterpret_cast<float *>(inputs_.at(2)->data_c()); - shape_size = std::max(static_cast<int>(std::ceil(static_cast<float>(limit - start) / delta)), 0); - } break; - default: { - MS_LOG(ERROR) << "Range has unsupported dataType: " << inputs_.at(0)->data_type(); - return RET_INFER_ERR; - } - } - } else { - shape_size = std::ceil(static_cast<float>(GetLimit() - GetStart()) / GetDelta()); - } - - std::vector<int> in_shape = {shape_size}; - output->set_shape(in_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/range.h b/mindspore/lite/src/ops/range.h deleted file mode 100644 index 8f1adafcc6..0000000000 --- a/mindspore/lite/src/ops/range.h +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_RANGE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_RANGE_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Range : public PrimitiveC { - public: - Range() = default; - ~Range() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Range, PrimitiveC); - explicit Range(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetDType(int d_type); - void SetStart(int start); - void SetLimit(int limit); - void SetDelta(int delta); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetDType() const; - int GetStart() const; - int GetLimit() const; - int GetDelta() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_RANGE_H_ diff --git a/mindspore/lite/src/ops/rank.cc b/mindspore/lite/src/ops/rank.cc deleted file mode 100644 index c0633e2d92..0000000000 --- a/mindspore/lite/src/ops/rank.cc +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/rank.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -#else -int Rank::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateRank(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Rank, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *RankCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Rank>(primitive); } -Registry RankRegistry(schema::PrimitiveType_Rank, RankCreator); -#endif -int Rank::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - std::vector<int> in_shape(1, 1); - output->set_shape(in_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/rank.h b/mindspore/lite/src/ops/rank.h deleted file mode 100644 index 4ee203ef88..0000000000 --- a/mindspore/lite/src/ops/rank.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_RANK_H_ -#define LITE_MINDSPORE_LITE_C_OPS_RANK_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Rank : public PrimitiveC { - public: - Rank() = default; - ~Rank() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Rank, PrimitiveC); - explicit Rank(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_RANK_H_ diff --git a/mindspore/lite/src/ops/real_div.cc b/mindspore/lite/src/ops/real_div.cc deleted file mode 100644 index 2b36e748ed..0000000000 --- a/mindspore/lite/src/ops/real_div.cc +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/real_div.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE - -int RealDiv::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_RealDiv; - } - if (this->primitive_->value.type != schema::PrimitiveType_RealDiv) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::RealDivT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else -int RealDiv::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateRank(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_RealDiv, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *RealDivCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<RealDiv>(primitive); } -Registry RealDivRegistry(schema::PrimitiveType_RealDiv, RealDivCreator); -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/real_div.h b/mindspore/lite/src/ops/real_div.h deleted file mode 100644 index 97e1e8c74f..0000000000 --- a/mindspore/lite/src/ops/real_div.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_REAL_DIV_H_ -#define MINDSPORE_LITE_SRC_OPS_REAL_DIV_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic.h" - -namespace mindspore { -namespace lite { -class RealDiv : public Arithmetic { - public: - RealDiv() = default; - ~RealDiv() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(RealDiv, Arithmetic); - explicit RealDiv(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_REAL_DIV_H_ diff --git a/mindspore/lite/src/ops/reciprocal.cc b/mindspore/lite/src/ops/reciprocal.cc deleted file mode 100644 index 5944b1a2f1..0000000000 --- a/mindspore/lite/src/ops/reciprocal.cc +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/reciprocal.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Reciprocal::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Reciprocal; - } - if (this->primitive_->value.type != schema::PrimitiveType_Reciprocal) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::ReciprocalT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else -PrimitiveC *ReciprocalCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<Reciprocal>(primitive); -} -Registry ReciprocalRegistry(schema::PrimitiveType_Reciprocal, ReciprocalCreator); -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/reciprocal.h b/mindspore/lite/src/ops/reciprocal.h deleted file mode 100644 index 838a8d4ccd..0000000000 --- a/mindspore/lite/src/ops/reciprocal.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_RECIPROCAL_H_ -#define MINDSPORE_LITE_SRC_OPS_RECIPROCAL_H_ - -#include "src/ops/arithmetic_self.h" -#ifdef PRIMITIVE_WRITEABLE -#include <vector> -#endif - -namespace mindspore { -namespace lite { -class Reciprocal : public ArithmeticSelf { - public: - Reciprocal() = default; - ~Reciprocal() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Reciprocal, ArithmeticSelf); - explicit Reciprocal(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateReciprocal(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Reciprocal, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; - } -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_RECIPROCAL_H_ diff --git a/mindspore/lite/src/ops/reduce.cc b/mindspore/lite/src/ops/reduce.cc deleted file mode 100644 index ed9d2cff1c..0000000000 --- a/mindspore/lite/src/ops/reduce.cc +++ /dev/null @@ -1,221 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/reduce.h" -#include <memory> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> Reduce::GetAxes() const { return this->primitive_->value.AsReduce()->axes; } -int Reduce::GetKeepDims() const { return this->primitive_->value.AsReduce()->keepDims; } -int Reduce::GetMode() const { return this->primitive_->value.AsReduce()->mode; } -bool Reduce::GetReduceToEnd() const { return this->primitive_->value.AsReduce()->reduceToEnd; } -float Reduce::GetCoeff() const { return this->primitive_->value.AsReduce()->coeff; } - -void Reduce::SetAxes(const std::vector<int> &axes) { this->primitive_->value.AsReduce()->axes = axes; } -void Reduce::SetKeepDims(int keep_dims) { this->primitive_->value.AsReduce()->keepDims = keep_dims; } -void Reduce::SetMode(int mode) { this->primitive_->value.AsReduce()->mode = (schema::ReduceMode)mode; } -void Reduce::SetReduceToEnd(bool reduce_to_end) { this->primitive_->value.AsReduce()->reduceToEnd = reduce_to_end; } -void Reduce::SetCoeff(float coeff) { this->primitive_->value.AsReduce()->coeff = coeff; } - -int Reduce::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Reduce; - } - if (this->primitive_->value.type != schema::PrimitiveType_Reduce) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::ReduceT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - if (prim.name() == "ReduceMean") { - attr->mode = schema::ReduceMode_ReduceMean; - } else if (prim.name() == "ReduceSum") { - attr->mode = schema::ReduceMode_ReduceSum; - } else if (prim.name() == "ReduceMax") { - attr->mode = schema::ReduceMode_ReduceMax; - } else if (prim.name() == "ReduceMin") { - attr->mode = schema::ReduceMode_ReduceMin; - } else if (prim.name() == "ReduceProd") { - attr->mode = schema::ReduceMode_ReduceProd; - } else if (prim.name() == "ReduceSumSquare") { - attr->mode = schema::ReduceMode_ReduceSumSquare; - } else if (prim.name() == "ReduceAll") { - attr->mode = schema::ReduceMode_ReduceAll; - } else { - MS_LOG(ERROR) << "Not supported reduce mode: " << prim.name(); - return RET_ERROR; - } - - attr->keepDims = GetValue<bool>(prim.GetAttr("keep_dims")); - if (inputs.size() == kAnfPopulaterInputNumTwo) { - auto inputNode = inputs.at(kAnfPopulaterInputNumOne); - MS_ASSERT(inputNode != nullptr); - if (inputNode->isa<ValueNode>()) { - auto valueNode = inputNode->cast<ValueNodePtr>(); - MS_ASSERT(valueNode != nullptr); - auto value = valueNode->value(); - MS_ASSERT(value != nullptr); - if (value->isa<ValueTuple>()) { - auto valTuplPtr = dyn_cast<ValueTuple>(value); - MS_ASSERT(valTuplPtr != nullptr); - for (size_t i = 0; i < valTuplPtr->size(); i++) { - auto elem = (*valTuplPtr)[i]; - MS_ASSERT(elem != nullptr); - attr->axes.emplace_back(CastToInt(elem).front()); - } - } else { - int axes_item = CastToInt(value).front(); - attr->axes.push_back(axes_item); - } - } - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else - -std::vector<int> Reduce::GetAxes() const { - auto fb_vector = this->primitive_->value_as_Reduce()->axes(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -int Reduce::GetKeepDims() const { return this->primitive_->value_as_Reduce()->keepDims(); } -int Reduce::GetMode() const { return this->primitive_->value_as_Reduce()->mode(); } -bool Reduce::GetReduceToEnd() const { return this->primitive_->value_as_Reduce()->reduceToEnd(); } -float Reduce::GetCoeff() const { return this->primitive_->value_as_Reduce()->coeff(); } -int Reduce::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Reduce(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Reduce return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> axes; - if (attr->axes() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->axes()->size()); i++) { - axes.push_back(attr->axes()->data()[i]); - } - } - auto val_offset = - schema::CreateReduceDirect(*fbb, &axes, attr->keepDims(), attr->mode(), attr->reduceToEnd(), attr->coeff()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Reduce, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *ReduceCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Reduce>(primitive); } -Registry ReduceRegistry(schema::PrimitiveType_Reduce, ReduceCreator); -#endif - -namespace { -constexpr size_t kInputSize = 1; -constexpr size_t kOutputSize = 1; -} // namespace -int Reduce::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - if (inputs_.size() < kInputSize || outputs_.size() != kOutputSize) { - return RET_ERROR; - } - auto input = inputs_.front(); - auto output = outputs_.front(); - if (input == nullptr || output == nullptr) { - return RET_NULL_PTR; - } - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - if (this->primitive_ == nullptr) { - return RET_NULL_PTR; - } - - bool keep_dims = static_cast<bool>(GetKeepDims()); - std::vector<int> in_shape = input->shape(); - std::vector<int> out_shape; - const auto &axes = GetAxes(); - auto num_axes = axes.size(); - int rank = static_cast<int>(in_shape.size()); - std::vector<int> actual_axes(axes.begin(), axes.end()); - - if (GetReduceToEnd()) { - if (num_axes != 1) { - MS_LOG(ERROR) << "Reduce when reduce_to_end, num of axis should be 1, got " << num_axes; - return RET_ERROR; - } - - int begin_axis; - begin_axis = axes.at(0) < 0 ? axes.at(0) + rank : axes.at(0); - for (auto i = begin_axis + 1; i < rank; ++i) { - actual_axes.emplace_back(i); - } - num_axes = rank - begin_axis; - keep_dims = false; - } - // reduce on all axes - if (num_axes == 0) { - if (keep_dims) { - for (size_t i = 0; i < in_shape.size(); i++) { - out_shape.push_back(1); - } - } - output->set_shape(out_shape); - output->set_data_type(input->data_type()); - return RET_OK; - } - // reduce on selected axes - for (size_t i = 0; i < in_shape.size(); i++) { - bool reduce_axis = false; - for (size_t idx = 0; idx < num_axes; ++idx) { - if (static_cast<size_t>(actual_axes.at(idx)) == i || - static_cast<size_t>(actual_axes.at(idx) + in_shape.size()) == i) { - reduce_axis = true; - break; - } - } - if (reduce_axis) { - if (keep_dims) { - out_shape.push_back(1); - } - } else { - out_shape.push_back(in_shape.at(i)); - } - } - output->set_shape(out_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/reduce.h b/mindspore/lite/src/ops/reduce.h deleted file mode 100644 index 321c942a2d..0000000000 --- a/mindspore/lite/src/ops/reduce.h +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_REDUCE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_REDUCE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" -#include "schema/model_generated.h" - -namespace mindspore { -namespace lite { -class Reduce : public PrimitiveC { - public: - Reduce() = default; - ~Reduce() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Reduce, PrimitiveC); - explicit Reduce(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - void SetAxes(const std::vector<int> &axes); - void SetKeepDims(int keep_dims); - void SetMode(int mode); - void SetReduceToEnd(bool reduce_to_end); - void SetCoeff(float coeff); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - std::vector<int> GetAxes() const; - int GetKeepDims() const; - int GetMode() const; - bool GetReduceToEnd() const; - float GetCoeff() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_REDUCE_H_ diff --git a/mindspore/lite/src/ops/reshape.cc b/mindspore/lite/src/ops/reshape.cc deleted file mode 100644 index afe2164105..0000000000 --- a/mindspore/lite/src/ops/reshape.cc +++ /dev/null @@ -1,242 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/reshape.h" -#include <memory> -#include <algorithm> -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Reshape::GetFormat() const { return this->primitive_->value.AsReshape()->format; } -std::vector<int64_t> Reshape::GetShape() const { return this->primitive_->value.AsReshape()->shape; } - -void Reshape::SetFormat(int format) { this->primitive_->value.AsReshape()->format = (schema::Format)format; } -void Reshape::SetShape(const std::vector<int64_t> &shape) { this->primitive_->value.AsReshape()->shape = shape; } -int Reshape::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Reshape; - } - if (this->primitive_->value.type != schema::PrimitiveType_Reshape) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::ReshapeT(); - MS_ASSERT(inputs.size() == kAnfPopulaterInputNumThree - 1); - auto inputNode = inputs.at(kAnfPopulaterInputNumTwo - 1); - if (inputNode->isa<ValueNode>()) { - auto valueNode = inputNode->cast<ValueNodePtr>(); - MS_ASSERT(valueNode != nullptr); - auto val = valueNode->value(); - MS_ASSERT(val != nullptr); - if (val->isa<ValueTuple>()) { - auto tuple = val->cast<ValueTuplePtr>(); - MS_ASSERT(tuple != nullptr); - for (size_t i = 0; i < tuple->size(); ++i) { - auto elem = tuple->value().at(i); - MS_ASSERT(elem != nullptr); - attr->shape.emplace_back(CastToInt(elem).front()); - } - } else { - int dim = CastToInt(val).front(); - attr->shape = {dim}; - } - } - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else - -int Reshape::GetFormat() const { return this->primitive_->value_as_Reshape()->format(); } -std::vector<int64_t> Reshape::GetShape() const { - auto fb_vector = this->primitive_->value_as_Reshape()->shape(); - return std::vector<int64_t>(fb_vector->begin(), fb_vector->end()); -} -int Reshape::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Reshape(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Reshape return nullptr"; - return RET_ERROR; - } - std::vector<int64_t> shape; - if (attr->shape() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->shape()->size()); i++) { - shape.push_back(attr->shape()->data()[i]); - } - } - auto val_offset = schema::CreateReshapeDirect(*fbb, attr->format(), &shape); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Reshape, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *ReshapeCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Reshape>(primitive); } -Registry ReshapeRegistry(schema::PrimitiveType_Reshape, ReshapeCreator); -#endif - -int Reshape::CalNewShape(const Tensor *in_tensor, std::vector<int> *out_shape) const { - size_t in_shape_size = 1; - for (size_t i = 0; i < in_tensor->shape().size(); i++) { - in_shape_size *= in_tensor->shape().at(i); - } - int64_t infer_index = -1; - size_t out_shape_size = 1; - for (size_t i = 0; i < out_shape->size(); i++) { - if (out_shape->at(i) == -1) { - if (infer_index == -1) { - infer_index = i; - } else { - MS_LOG(ERROR) << "output shape should has no more than one dim which need infer"; - return RET_INFER_ERR; - } - } else if (out_shape->at(i) < 0) { - MS_LOG(ERROR) << "output shape dim should be non-negative"; - return RET_INFER_ERR; - } else if (out_shape->at(i) == 0) { - if (in_tensor->ElementsNum() != 0) { - out_shape->at(i) = in_tensor->shape().at(i); - out_shape_size *= out_shape->at(i); - } else { - out_shape_size = 0; - break; - } - } else { - out_shape_size *= out_shape->at(i); - } - } - if (infer_index == -1 && out_shape_size != in_shape_size) { - MS_LOG(ERROR) << "output shapeSize: " << out_shape_size << " should be equal to input shapeSize: " << in_shape_size; - return RET_INFER_ERR; - } - if (infer_index != -1) { - out_shape->at(infer_index) = in_shape_size / out_shape_size; - } - return RET_OK; -} -template <typename T> -void CalShape(const T *data, const std::vector<Tensor *> &inputs, std::vector<int> *out_shape, int shape_size) { - int input_count = inputs[0]->ElementsNum(); - int index = 0; - int size = 1; - for (int i = 0; i < shape_size; i++) { - if (static_cast<int>(data[i]) == -1) { - index = i; - } else if (static_cast<int>(data[i]) == 0) { - size *= inputs[0]->shape().at(i); - } else { - size *= data[i]; - } - out_shape->push_back(data[i]); - } - if (static_cast<int>(data[index]) == -1) { - (*out_shape).at(index) = input_count / size; - } -} -int Reshape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - out_shape_.clear(); - if (inputs_.size() == kDoubleNum) { - auto shape_tensor = inputs_.at(1); - if (shape_tensor->IsConst()) { - if (shape_tensor->data_c() == nullptr || (shape_tensor->shape().size() == 1 && shape_tensor->shape()[0] == 0)) { - MS_LOG(DEBUG) << "reshape to a scalar."; - output->set_shape(out_shape_); - return RET_OK; - } - } - if (shape_tensor->data_c() == nullptr) { - MS_LOG(INFO) << "Do infer shape in runtime."; - return RET_INFER_INVALID; - } - size_t shape_size = shape_tensor->ElementsNum(); - switch (shape_tensor->data_type()) { - case kNumberTypeInt8: { - auto data = reinterpret_cast<int8_t *>(shape_tensor->MutableData()); - CalShape<int8_t>(data, inputs_, &out_shape_, shape_size); - } break; - case kNumberTypeInt32: { - auto data = reinterpret_cast<int32_t *>(shape_tensor->MutableData()); - CalShape<int32_t>(data, inputs_, &out_shape_, shape_size); - } break; - case kNumberTypeInt64: { - auto data = reinterpret_cast<int64_t *>(shape_tensor->MutableData()); - CalShape<int64_t>(data, inputs_, &out_shape_, shape_size); - } break; - case kNumberTypeFloat: { - auto data = reinterpret_cast<float *>(shape_tensor->MutableData()); - CalShape<float>(data, inputs_, &out_shape_, shape_size); - } break; - case kNumberTypeUInt32: { - auto data = reinterpret_cast<uint32_t *>(shape_tensor->MutableData()); - CalShape<uint32_t>(data, inputs_, &out_shape_, shape_size); - } break; - default: { - MS_LOG(ERROR) << "Reshape weight tensor has unsupported dataType: " << shape_tensor->data_type(); - return RET_INFER_ERR; - } - } - } else if (inputs_.size() == kSingleNum) { - for (size_t i = 0; i < GetShape().size(); ++i) { - out_shape_.push_back(GetShape().at(i)); - } - } else { - MS_LOG(ERROR) << "inputs tensor size invalid."; - return RET_INFER_ERR; - } - auto ret = CalNewShape(inputs_.front(), &out_shape_); - if (ret != RET_OK) { - MS_LOG(ERROR) << "CalNewShape error"; - return ret; - } - output->set_shape(out_shape_); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/reshape.h b/mindspore/lite/src/ops/reshape.h deleted file mode 100644 index 38bdcd7691..0000000000 --- a/mindspore/lite/src/ops/reshape.h +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_RESHAPE_H_ -#define MINDSPORE_LITE_SRC_OPS_RESHAPE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Reshape : public PrimitiveC { - public: - Reshape() = default; - ~Reshape() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Reshape, PrimitiveC); - explicit Reshape(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - void SetFormat(int format); - void SetShape(const std::vector<int64_t> &shape); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetFormat() const; - std::vector<int64_t> GetShape() const; - std::vector<int> GetOutputShape() { return out_shape_; } - - private: - int CalNewShape(const lite::Tensor *in_tensor, std::vector<int> *out_shape) const; - std::vector<int> out_shape_; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_RESHAPE_H_ diff --git a/mindspore/lite/src/ops/resize.cc b/mindspore/lite/src/ops/resize.cc deleted file mode 100644 index daff37aef1..0000000000 --- a/mindspore/lite/src/ops/resize.cc +++ /dev/null @@ -1,261 +0,0 @@ -/** - * Copyright 2020-2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/resize.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Resize::GetFormat() const { return this->primitive_->value.AsResize()->format; } -int Resize::GetMethod() const { return this->primitive_->value.AsResize()->method; } -int64_t Resize::GetNewHeight() const { return this->primitive_->value.AsResize()->newHeight; } -int64_t Resize::GetNewWidth() const { return this->primitive_->value.AsResize()->newWidth; } -bool Resize::GetPreserveAspectRatio() const { return this->primitive_->value.AsResize()->preserveAspectRatio; } -int Resize::GetCoordinateTransformMode() const { return this->primitive_->value.AsResize()->coordinateTransformMode; } - -void Resize::SetFormat(int format) { this->primitive_->value.AsResize()->format = (schema::Format)format; } -void Resize::SetMethod(int method) { this->primitive_->value.AsResize()->method = (schema::ResizeMethod)method; } -void Resize::SetNewHeight(int64_t new_height) { this->primitive_->value.AsResize()->newHeight = new_height; } -void Resize::SetNewWidth(int64_t new_width) { this->primitive_->value.AsResize()->newWidth = new_width; } -void Resize::SetCoordinateTransformMode(int coordinate_transform_mode) { - this->primitive_->value.AsResize()->coordinateTransformMode = - static_cast<schema::CoordinateTransformMode>(coordinate_transform_mode); -} -void Resize::SetPreserveAspectRatio(bool preserve_aspect_ratio) { - this->primitive_->value.AsResize()->preserveAspectRatio = preserve_aspect_ratio; -} - -int Resize::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Resize; - } - if (this->primitive_->value.type != schema::PrimitiveType_Resize) { - MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::ResizeT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr value failed"; - return RET_ERROR; - } - if (prim.instance_name() == "ResizeNearestNeighbor") { - attr->method = schema::ResizeMethod_NEAREST; - } else if (prim.instance_name() == "ResizeBilinear") { - attr->method = schema::ResizeMethod_LINEAR; - } else { - delete attr; - MS_LOG(ERROR) << "wrong resize type"; - return RET_ERROR; - } - std::vector<int> targetSize = CastToInt(prim.GetAttr("size")); - attr->newHeight = targetSize.at(0); - attr->newWidth = targetSize.at(1); - attr->alignCorners = GetValue<bool>(prim.GetAttr("align_corners")); - if (attr->alignCorners) { - attr->coordinateTransformMode = schema::CoordinateTransformMode_ALIGN_CORNERS; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - if (attr != nullptr) { - delete attr; - } - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} -#else - -int Resize::GetFormat() const { return this->primitive_->value_as_Resize()->format(); } -int Resize::GetMethod() const { return this->primitive_->value_as_Resize()->method(); } -int64_t Resize::GetNewHeight() const { return this->primitive_->value_as_Resize()->newHeight(); } -int64_t Resize::GetNewWidth() const { return this->primitive_->value_as_Resize()->newWidth(); } -int Resize::GetCoordinateTransformMode() const { - return this->primitive_->value_as_Resize()->coordinateTransformMode(); -} -bool Resize::GetPreserveAspectRatio() const { return this->primitive_->value_as_Resize()->preserveAspectRatio(); } -int Resize::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Resize(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Resize return nullptr"; - return RET_ERROR; - } - auto val_offset = - schema::CreateResize(*fbb, attr->format(), attr->method(), attr->newHeight(), attr->newWidth(), - attr->alignCorners(), attr->preserveAspectRatio(), attr->coordinateTransformMode(), - attr->cubicCoeff(), attr->excludeOutside(), attr->extrapolationValue(), attr->nearestMode()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Resize, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *ResizeCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Resize>(primitive); } -Registry ResizeRegistry(schema::PrimitiveType_Resize, ResizeCreator); -#endif - -namespace { -constexpr int kInputRank = 4; -} // namespace -int64_t Resize::new_height() const { return new_height_; } -int64_t Resize::new_width() const { return new_width_; } -int Resize::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - if (input == nullptr) { - return RET_ERROR; - } - if (!input->shape().empty() && input->shape().size() != kInputRank) { - MS_LOG(ERROR) << "Size of input shape is wrong."; - return RET_ERROR; - } - - auto output = outputs_.front(); - if (output == nullptr) { - return RET_NULL_PTR; - } - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - std::vector<int> output_shape; - output_shape.push_back(input->Batch()); - auto ret = CalculateNewHeightAndWidth(inputs_); - if (ret == RET_OK) { - output_shape.push_back(new_height_); - output_shape.push_back(new_width_); - output_shape.push_back(input->Channel()); - output->set_shape(output_shape); - } - return ret; -} - -int Resize::CalculateNewHeightAndWidth(const std::vector<lite::Tensor *> &inputs) { - auto input = inputs.front(); - if (inputs.size() == kDoubleNum) { - auto shape_tensor = inputs.at(1); - if (shape_tensor->data_c() == nullptr) { - MS_LOG(INFO) << "Do infer shape in runtime."; - return RET_INFER_INVALID; - } - size_t shape_size = shape_tensor->ElementsNum(); - switch (shape_size) { - case kQuadrupleNum: { - if (shape_tensor->data_type() == kNumberTypeInt32) { - auto data = reinterpret_cast<int32_t *>(shape_tensor->data_c()); - if (data == nullptr) { - MS_LOG(INFO) << "Resize op size can't cast int."; - return RET_INFER_INVALID; - } - switch (shape_tensor->format()) { - case schema::Format_NCHW: - new_height_ = data[2]; - new_width_ = data[3]; - break; - case schema::Format_NHWC: - new_height_ = data[1]; - new_width_ = data[2]; - break; - default: - MS_LOG(INFO) << "Resize don't support tensor format."; - return RET_INFER_INVALID; - } - } else if (shape_tensor->data_type() == kNumberTypeFloat32) { - auto data = reinterpret_cast<float *>(shape_tensor->data_c()); - if (data == nullptr) { - MS_LOG(INFO) << "Resize op size can't cast float."; - return RET_INFER_INVALID; - } - switch (shape_tensor->format()) { - case schema::Format_NCHW: - new_height_ = data[2] * input->Height(); - new_width_ = data[3] * input->Width(); - break; - case schema::Format_NHWC: - new_height_ = data[1] * input->Height(); - new_width_ = data[2] * input->Width(); - break; - default: - MS_LOG(INFO) << "Resize don't support tensor format."; - return RET_INFER_INVALID; - } - } - break; - } - case kDoubleNum: { - auto data = reinterpret_cast<int32_t *>(shape_tensor->data_c()); - if (data == nullptr) { - MS_LOG(INFO) << "Resize op size can't cast float."; - return RET_INFER_INVALID; - } - new_height_ = data[0]; - new_width_ = data[1]; - break; - } - case kSingleNum: { - // caffe zoom_factor - int scale; - if (shape_tensor->data_type() == kNumberTypeInt32) { - auto data = reinterpret_cast<int *>(shape_tensor->data_c()); - if (data == nullptr) { - MS_LOG(INFO) << "Resize op size can't cast int."; - return RET_INFER_INVALID; - } - scale = data[0]; - } else { - MS_LOG(ERROR) << "Unsupported data type:" << shape_tensor->data_type(); - return RET_INFER_ERR; - } - new_height_ = input->Height() + (input->Height() - 1) * (scale - 1); - new_width_ = input->Width() + (input->Width() - 1) * (scale - 1); - break; - } - default: { - MS_LOG(ERROR) << "Unsupported shape size:" << shape_size; - return RET_INFER_ERR; - } - } - } else if (inputs.size() == kSingleNum) { - new_height_ = GetNewHeight(); - new_width_ = GetNewWidth(); - } else if (inputs.size() == kQuadrupleNum) { - if (inputs[3]->data_c() == nullptr) { - return RET_INFER_INVALID; - } - new_height_ = static_cast<int *>(inputs.at(3)->data_c())[0]; - new_height_ = static_cast<int *>(inputs.at(3)->data_c())[1]; - } else { - MS_LOG(ERROR) << "inputs tensor size invalid."; - return RET_INFER_ERR; - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/resize.h b/mindspore/lite/src/ops/resize.h deleted file mode 100644 index 275ac07399..0000000000 --- a/mindspore/lite/src/ops/resize.h +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Copyright 2020-2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_RESIZE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_RESIZE_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Resize : public PrimitiveC { - public: - Resize() = default; - ~Resize() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Resize, PrimitiveC); - explicit Resize(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetFormat(int format); - void SetMethod(int method); - void SetNewHeight(int64_t new_height); - void SetNewWidth(int64_t new_width); - void SetPreserveAspectRatio(bool preserve_aspect_ratio); - void SetCoordinateTransformMode(int coordinate_transform_mode); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetFormat() const; - int GetMethod() const; - int64_t GetNewHeight() const; - int64_t GetNewWidth() const; - bool GetPreserveAspectRatio() const; - int GetCoordinateTransformMode() const; - - int64_t new_height() const; - int64_t new_width() const; - - private: - int CalculateNewHeightAndWidth(const std::vector<lite::Tensor *> &inputs); - int64_t new_height_; - int64_t new_width_; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_RESIZE_H_ diff --git a/mindspore/lite/src/ops/return.cc b/mindspore/lite/src/ops/return.cc deleted file mode 100644 index 401c886001..0000000000 --- a/mindspore/lite/src/ops/return.cc +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/return.h" -#include <memory> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Return::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Return; - } - if (this->primitive_->value.type != schema::PrimitiveType_Return) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::ReturnT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -PrimitiveC *ReturnCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Return>(primitive); } -Registry ReturnRegistry(schema::PrimitiveType_Return, ReturnCreator); -#endif - -namespace { -constexpr size_t kInputSize = 1; -constexpr size_t kOutputSize = 1; -} // namespace -int Return::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - if (inputs_.size() != kInputSize || outputs_.size() != kOutputSize) { - return RET_ERROR; - } - auto input = inputs_.front(); - auto output = outputs_.front(); - if (input == nullptr || output == nullptr) { - return RET_NULL_PTR; - } - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - if (this->primitive_ == nullptr) { - return RET_NULL_PTR; - } - output->set_data_type(input->data_type()); - output->set_shape(input->shape()); - output->set_format(input->format()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/return.h b/mindspore/lite/src/ops/return.h deleted file mode 100644 index f1c4c389c6..0000000000 --- a/mindspore/lite/src/ops/return.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_RETURN_H_ -#define LITE_MINDSPORE_LITE_C_OPS_RETURN_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Return : public PrimitiveC { - public: - Return() = default; - ~Return() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Return, PrimitiveC); - explicit Return(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_RETURN_H_ diff --git a/mindspore/lite/src/ops/reverse.cc b/mindspore/lite/src/ops/reverse.cc deleted file mode 100644 index 26efd182a5..0000000000 --- a/mindspore/lite/src/ops/reverse.cc +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/reverse.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> Reverse::GetAxis() const { return this->primitive_->value.AsReverse()->axis; } - -void Reverse::SetAxis(const std::vector<int> &axis) { this->primitive_->value.AsReverse()->axis = axis; } - -#else - -std::vector<int> Reverse::GetAxis() const { - auto fb_vector = this->primitive_->value_as_Reverse()->axis(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -int Reverse::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Reverse(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Reverse return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> axis; - if (attr->axis() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->axis()->size()); i++) { - axis.push_back(attr->axis()->data()[i]); - } - } - auto val_offset = schema::CreateReverseDirect(*fbb, &axis); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Reverse, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *ReverseCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Reverse>(primitive); } -Registry ReverseRegistry(schema::PrimitiveType_Reverse, ReverseCreator); - -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/reverse.h b/mindspore/lite/src/ops/reverse.h deleted file mode 100644 index f29d3414a6..0000000000 --- a/mindspore/lite/src/ops/reverse.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_REVERSE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_REVERSE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Reverse : public PrimitiveC { - public: - Reverse() = default; - ~Reverse() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Reverse, PrimitiveC); - explicit Reverse(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAxis(const std::vector<int> &axis); - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - std::vector<int> GetAxis() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_REVERSE_H_ diff --git a/mindspore/lite/src/ops/reverse_sequence.cc b/mindspore/lite/src/ops/reverse_sequence.cc deleted file mode 100644 index 08c52ebcd4..0000000000 --- a/mindspore/lite/src/ops/reverse_sequence.cc +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/reverse_sequence.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int ReverseSequence::GetSeqAxis() const { return this->primitive_->value.AsReverseSequence()->seqAxis; } -int ReverseSequence::GetBatchAxis() const { return this->primitive_->value.AsReverseSequence()->batchAxis; } - -void ReverseSequence::SetSeqAxis(int seq_axis) { this->primitive_->value.AsReverseSequence()->seqAxis = seq_axis; } -void ReverseSequence::SetBatchAxis(int batch_axis) { - this->primitive_->value.AsReverseSequence()->batchAxis = batch_axis; -} - -#else - -int ReverseSequence::GetSeqAxis() const { return this->primitive_->value_as_ReverseSequence()->seqAxis(); } -int ReverseSequence::GetBatchAxis() const { return this->primitive_->value_as_ReverseSequence()->batchAxis(); } -int ReverseSequence::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto attr = primitive->value_as_ReverseSequence(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_ReverseSequence return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateReverseSequence(*fbb, attr->seqAxis(), attr->batchAxis()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ReverseSequence, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *ReverseSequenceCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<ReverseSequence>(primitive); -} -Registry ReverseSequenceRegistry(schema::PrimitiveType_ReverseSequence, ReverseSequenceCreator); - -#endif - -int ReverseSequence::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - auto input = inputs.front(); - auto output = outputs.front(); - MS_ASSERT(input != nullptr); - MS_ASSERT(output != nullptr); - - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - output->set_shape(input->shape()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/reverse_sequence.h b/mindspore/lite/src/ops/reverse_sequence.h deleted file mode 100644 index dd473d1d6f..0000000000 --- a/mindspore/lite/src/ops/reverse_sequence.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_REVERSE_SEQUENCE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_REVERSE_SEQUENCE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class ReverseSequence : public PrimitiveC { - public: - ReverseSequence() = default; - ~ReverseSequence() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(ReverseSequence, PrimitiveC); - explicit ReverseSequence(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetSeqAxis(int seq_axis); - void SetBatchAxis(int batch_axis); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetSeqAxis() const; - int GetBatchAxis() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_REVERSE_SEQUENCE_H_ diff --git a/mindspore/lite/src/ops/rfft.cc b/mindspore/lite/src/ops/rfft.cc deleted file mode 100644 index 0fe7734e7b..0000000000 --- a/mindspore/lite/src/ops/rfft.cc +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/rfft.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Rfft::GetFftLength() const { return this->primitive_->value.AsRfft()->fftLength; } - -void Rfft::SetFftLength(int fft_length) { this->primitive_->value.AsRfft()->fftLength = fft_length; } - -#else -int Rfft::GetFftLength() const { return this->primitive_->value_as_Rfft()->fftLength(); } -int Rfft::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Rfft(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Add return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateRfft(*fbb, attr->fftLength()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Rfft, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *RfftCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Rfft>(primitive); } -Registry RfftRegistry(schema::PrimitiveType_Rfft, RfftCreator); -#endif -int Rfft::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(TypeId::kNumberTypeComplex64); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input_shape = input->shape(); - input_shape.at(input_shape.size() - 1) = GetFftLength() / 2 + 1; - input_shape.push_back(2); - outputs_.front()->set_shape(input_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/rfft.h b/mindspore/lite/src/ops/rfft.h deleted file mode 100644 index 0ec0ccd877..0000000000 --- a/mindspore/lite/src/ops/rfft.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_RFFT_H_ -#define LITE_MINDSPORE_LITE_C_OPS_RFFT_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Rfft : public PrimitiveC { - public: - Rfft() = default; - ~Rfft() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Rfft, PrimitiveC); - explicit Rfft(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetFftLength(int fft_length); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int GetFftLength() const; - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_RFFT_H_ diff --git a/mindspore/lite/src/ops/roi_pooling.cc b/mindspore/lite/src/ops/roi_pooling.cc deleted file mode 100644 index 6a0704392e..0000000000 --- a/mindspore/lite/src/ops/roi_pooling.cc +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/roi_pooling.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int ROIPooling::GetPooledH() const { return this->primitive_->value.AsROIPooling()->pooledH; } -int ROIPooling::GetPooledW() const { return this->primitive_->value.AsROIPooling()->pooledW; } -float ROIPooling::GetScale() const { return this->primitive_->value.AsROIPooling()->scale; } - -void ROIPooling::SetPooledH(int pooled_h) { this->primitive_->value.AsROIPooling()->pooledH = pooled_h; } -void ROIPooling::SetPooledW(int pooled_w) { this->primitive_->value.AsROIPooling()->pooledW = pooled_w; } -void ROIPooling::SetScale(float scale) { this->primitive_->value.AsROIPooling()->scale = scale; } - -#else - -int ROIPooling::GetPooledH() const { return this->primitive_->value_as_ROIPooling()->pooledH(); } -int ROIPooling::GetPooledW() const { return this->primitive_->value_as_ROIPooling()->pooledW(); } -float ROIPooling::GetScale() const { return this->primitive_->value_as_ROIPooling()->scale(); } -int ROIPooling::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto attr = primitive->value_as_ROIPooling(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_ROIPooling return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateROIPooling(*fbb, attr->pooledH(), attr->pooledW(), attr->scale()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ROIPooling, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *ROIPoolingCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<ROIPooling>(primitive); -} -Registry ROIPoolingRegistry(schema::PrimitiveType_ROIPooling, ROIPoolingCreator); -#endif - -int ROIPooling::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - if (inputs_.size() != kDoubleNum) { - MS_LOG(ERROR) << "inputs number is not equal to " << kDoubleNum; - return RET_ERROR; - } - auto input = inputs_.front(); - if (input == nullptr) { - return RET_NULL_PTR; - } - auto roi = inputs_.at(1); - if (roi == nullptr) { - return RET_NULL_PTR; - } - auto output = outputs_.front(); - if (output == nullptr) { - return RET_NULL_PTR; - } - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - auto new_h = GetPooledH(); - auto new_w = GetPooledW(); - auto shape_data = roi->shape(); - std::vector<int> output_shape; - output_shape.push_back(shape_data[0]); - output_shape.push_back(new_h); - output_shape.push_back(new_w); - output_shape.push_back(input->Channel()); - output->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/roi_pooling.h b/mindspore/lite/src/ops/roi_pooling.h deleted file mode 100644 index c1b942fb61..0000000000 --- a/mindspore/lite/src/ops/roi_pooling.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_R_O_I_POOLING_H_ -#define LITE_MINDSPORE_LITE_C_OPS_R_O_I_POOLING_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class ROIPooling : public PrimitiveC { - public: - ROIPooling() = default; - ~ROIPooling() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(ROIPooling, PrimitiveC); - explicit ROIPooling(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetPooledH(int pooled_h); - void SetPooledW(int pooled_w); - void SetScale(float scale); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetPooledH() const; - int GetPooledW() const; - float GetScale() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_R_O_I_POOLING_H_ diff --git a/mindspore/lite/src/ops/round.cc b/mindspore/lite/src/ops/round.cc deleted file mode 100644 index 35512ef604..0000000000 --- a/mindspore/lite/src/ops/round.cc +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/round.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -#else -int Round::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateRound(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Round, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *RoundCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Round>(primitive); } -Registry RoundRegistry(schema::PrimitiveType_Round, RoundCreator); - -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/round.h b/mindspore/lite/src/ops/round.h deleted file mode 100644 index 9586a797fe..0000000000 --- a/mindspore/lite/src/ops/round.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_ROUND_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ROUND_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -class Round : public ArithmeticSelf { - public: - Round() = default; - ~Round() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Round, ArithmeticSelf); - explicit Round(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_ROUND_H_ diff --git a/mindspore/lite/src/ops/rsqrt.cc b/mindspore/lite/src/ops/rsqrt.cc deleted file mode 100644 index c2ae73ff52..0000000000 --- a/mindspore/lite/src/ops/rsqrt.cc +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/rsqrt.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Rsqrt::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Rsqrt; - } - if (this->primitive_->value.type != schema::PrimitiveType_Rsqrt) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::RsqrtT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int Rsqrt::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto val_offset = schema::CreateRsqrt(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Rsqrt, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *RsqrtCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Rsqrt>(primitive); } -Registry RsqrtRegistry(schema::PrimitiveType_Rsqrt, RsqrtCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/rsqrt.h b/mindspore/lite/src/ops/rsqrt.h deleted file mode 100644 index 720975bf2e..0000000000 --- a/mindspore/lite/src/ops/rsqrt.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_RSQRT_H_ -#define MINDSPORE_LITE_SRC_OPS_RSQRT_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -class Rsqrt : public ArithmeticSelf { - public: - Rsqrt() = default; - ~Rsqrt() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Rsqrt, ArithmeticSelf); - explicit Rsqrt(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_RSQRT_H_ diff --git a/mindspore/lite/src/ops/scale.cc b/mindspore/lite/src/ops/scale.cc deleted file mode 100644 index 26362b1d3c..0000000000 --- a/mindspore/lite/src/ops/scale.cc +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/scale.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Scale::GetAxis() const { return this->primitive_->value.AsScale()->axis; } -void Scale::SetAxis(int axis) { this->primitive_->value.AsScale()->axis = axis; } -int Scale::GetActivationType() const { return this->primitive_->value.AsScale()->activationType; } -void Scale::SetActivationType(int activation_type) { - this->primitive_->value.AsScale()->activationType = (schema::ActivationType)activation_type; -} - -#else - -int Scale::GetAxis() const { return this->primitive_->value_as_Scale()->axis(); } -int Scale::GetActivationType() const { return this->primitive_->value_as_Scale()->activationType(); } -int Scale::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Scale(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Scale return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateScale(*fbb, attr->axis(), attr->activationType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Scale, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *ScaleCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Scale>(primitive); } -Registry ScaleRegistry(schema::PrimitiveType_Scale, ScaleCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/scale.h b/mindspore/lite/src/ops/scale.h deleted file mode 100644 index b0d42762c1..0000000000 --- a/mindspore/lite/src/ops/scale.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SCALE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SCALE_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Scale : public PrimitiveC { - public: - Scale() = default; - ~Scale() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Scale, PrimitiveC); - explicit Scale(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAxis(int axis); - void SetActivationType(int activation_type); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int GetAxis() const; - int GetActivationType() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SCALE_H_ diff --git a/mindspore/lite/src/ops/scatter_nd.cc b/mindspore/lite/src/ops/scatter_nd.cc deleted file mode 100644 index fb5239fdd0..0000000000 --- a/mindspore/lite/src/ops/scatter_nd.cc +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/scatter_nd.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { - -namespace { -constexpr int kScatterNDInputNum = 3; -constexpr int kScatterNDOutputNum = 1; -constexpr int kScatterShapeIndex = 0; -constexpr int kScatterIndicesIndex = 1; -constexpr int kScatterUpdateIndex = 2; -} // namespace -int ScatterND::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - if (inputs_.size() != kScatterNDInputNum) { - MS_LOG(ERROR) << "inputs number is not equal to " << kScatterNDInputNum; - return RET_ERROR; - } - if (outputs_.size() != kScatterNDOutputNum) { - MS_LOG(ERROR) << "outputs number is not equal to " << kScatterNDInputNum; - return RET_ERROR; - } - auto shape = inputs_.at(kScatterShapeIndex); - if (shape == nullptr) { - MS_LOG(ERROR) << "shape null pointer dereferencing."; - return RET_ERROR; - } - auto indices = inputs_.at(kScatterIndicesIndex); - if (indices == nullptr) { - MS_LOG(ERROR) << "indices null pointer dereferencing."; - return RET_ERROR; - } - auto update = inputs_.at(kScatterUpdateIndex); - if (update == nullptr) { - MS_LOG(ERROR) << "update null pointer dereferencing."; - return RET_ERROR; - } - auto output = outputs_.front(); - if (output == nullptr) { - MS_LOG(ERROR) << "output null pointer dereferencing."; - return RET_ERROR; - } - output->set_data_type(update->data_type()); - output->set_format(update->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto shape_data = reinterpret_cast<int *>(shape->MutableData()); - std::vector<int> out_shape(shape_data, shape_data + shape->ElementsNum()); - output->set_shape(out_shape); - return RET_OK; -} -#ifdef PRIMITIVE_WRITEABLE -#else -int ScatterND::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto val_offset = schema::CreateScatterND(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ScatterND, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/scatter_nd.h b/mindspore/lite/src/ops/scatter_nd.h deleted file mode 100644 index 35d33cb540..0000000000 --- a/mindspore/lite/src/ops/scatter_nd.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SCATTER_ND_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SCATTER_ND_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class ScatterND : public PrimitiveC { - public: - ScatterND() = default; - ~ScatterND() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(ScatterND, PrimitiveC); - explicit ScatterND(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SCATTER_ND_H_ diff --git a/mindspore/lite/src/ops/schema_def.h b/mindspore/lite/src/ops/schema_def.h deleted file mode 100644 index 2231471b4a..0000000000 --- a/mindspore/lite/src/ops/schema_def.h +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_SRC_OPS_SCHEMA_DEF_H_ -#define MINDSPORE_LITE_SRC_OPS_SCHEMA_DEF_H_ -#include <string> -#include "src/ops/schema_register.h" -#ifdef PRIMITIVE_WRITEABLE -#include "ops/conv2d.h" -#include "schema/inner/model_generated.h" -#endif - -#ifdef GEN_SCHEMA_DEF -#define OP_SCHEMA_DEF(OP) \ - namespace mindspore::lite::ops { \ - std::string Gen##OP##Def() { \ - std::string op_def = "table "; \ - op_def.append(#OP); \ - op_def.append(" {\n"); -#elif PRIMITIVE_WRITEABLE -#define OP_SCHEMA_DEF(OP) \ - namespace mindspore::lite::ops { \ - mindspore::schema::OP##T *PrimitiveOp2SchemaOp(const mindspore::OP *op) { \ - mindspore::schema::OP##T *result_op = new (std::nothrow) mindspore::schema::OP##T(); -#else -#define OP_SCHEMA_DEF(OP) -#endif - -#ifdef GEN_SCHEMA_DEF -#define OP_ATTR(key, type) op_def.append(#key).append(": ").append(#type).append(";\n"); -#elif PRIMITIVE_WRITEABLE -#define OP_ATTR(key, type) result_op->key = op->get_##key(); -#else -#define OP_ATTR(key, type) -#endif - -#ifdef GEN_SCHEMA_DEF -#define OP_ATTR_WITH_VALUE(key, type, value) \ - op_def.append(#key).append(": ").append(#type).append(" = ").append(#value).append(";\n"); -#elif PRIMITIVE_WRITEABLE -#define OP_ATTR_WITH_VALUE(key, type, value) result_op->key = op->get_##key(); -#else -#define OP_ATTR_WITH_VALUE(key, type, value) -#endif - -#ifdef GEN_SCHEMA_DEF -#define OP_SCHEMA_DEF_END(OP) \ - op_def.append("}\n\n"); \ - return op_def; \ - } \ - SchemaOpRegister g_schema_op_##OP(Gen##OP##Def); \ - } // namespace mindspore::lite::ops -#elif PRIMITIVE_WRITEABLE -#define OP_SCHEMA_DEF_END(OP) \ - return result_op; \ - } \ - } // namespace mindspore::lite::ops -#else -#define OP_SCHEMA_DEF_END(OP) -#endif -#endif // MINDSPORE_LITE_SRC_OPS_SCHEMA_DEF_H_ diff --git a/mindspore/lite/src/ops/schema_register.h b/mindspore/lite/src/ops/schema_register.h index 1f70762650..6c993be9ee 100644 --- a/mindspore/lite/src/ops/schema_register.h +++ b/mindspore/lite/src/ops/schema_register.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -31,21 +31,26 @@ class SchemaRegisterImpl { void OpPush(GetSchemaDef func) { op_def_funcs_.push_back(func); } - void TypePush(GetSchemaDef func) { type_def_funcs_.push_back(func); } - const std::vector<GetSchemaDef> &GetAllOpDefCreateFuncs() const { return op_def_funcs_; } - const std::vector<GetSchemaDef> &GetAllTypeDefCreateFuncs() const { return type_def_funcs_; } + void SetPrimTypeGenFunc(GetSchemaDef func) { prim_type_gen_ = func; } + + GetSchemaDef GetPrimTypeGenFunc() const { return prim_type_gen_; } private: std::vector<GetSchemaDef> op_def_funcs_; - std::vector<GetSchemaDef> type_def_funcs_; + GetSchemaDef prim_type_gen_; }; class SchemaOpRegister { public: explicit SchemaOpRegister(GetSchemaDef func) { SchemaRegisterImpl::Instance()->OpPush(func); } }; + +class PrimitiveTypeRegister { + public: + explicit PrimitiveTypeRegister(GetSchemaDef func) { SchemaRegisterImpl::Instance()->SetPrimTypeGenFunc(func); } +}; } // namespace mindspore::lite::ops #endif // MINDSPORE_LITE_SRC_OPS_SCHEMA_REGISTER_H_ diff --git a/mindspore/lite/src/ops/select.cc b/mindspore/lite/src/ops/select.cc deleted file mode 100644 index 1bcb18dd67..0000000000 --- a/mindspore/lite/src/ops/select.cc +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/select.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif -#include "src/tensorlist.h" - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Select::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Select; - } - if (this->primitive_->value.type != schema::PrimitiveType_Select) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::SelectT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int Select::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Select(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Select return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateSelect(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Select, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SelectCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Select>(primitive); } -Registry SelectRegistry(schema::PrimitiveType_Select, SelectCreator); -#endif - -int Select::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(inputs_.size() == 2 * outputs_.size() + 1); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - for (size_t i = 0; i < outputs_.size(); i++) { - auto *input = inputs_[i + 1]; - auto *output = outputs_[i]; - if (input == nullptr) { - MS_LOG(ERROR) << "input tensor is nullptr"; - return RET_ERROR; - } - if (output == nullptr) { - MS_LOG(ERROR) << "output tensor is nullptr"; - return RET_ERROR; - } - output->set_data_type(input->data_type()); - output->set_shape(input->shape()); - output->set_format(input->format()); - auto data_type = input->data_type(); - if (data_type == kObjectTypeTensorType) { - auto input_tensorlist = reinterpret_cast<TensorList *>(input); - auto output_tensorlist = reinterpret_cast<TensorList *>(output); - output_tensorlist->set_element_shape(input_tensorlist->element_shape()); - output_tensorlist->set_max_elements_num(input_tensorlist->max_elements_num()); - output_tensorlist->set_tensors_data_type(input_tensorlist->tensors_data_type()); - } - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/select.h b/mindspore/lite/src/ops/select.h deleted file mode 100644 index 02f8ec452d..0000000000 --- a/mindspore/lite/src/ops/select.h +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include <vector> -#include "src/ops/primitive_c.h" - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SELECT_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SELECT_H_ - -namespace mindspore { -namespace lite { -class Select : public PrimitiveC { - public: - Select() = default; - ~Select() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Select, PrimitiveC); - explicit Select(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore -#endif // LITE_MINDSPORE_LITE_C_OPS_SELECT_H_ diff --git a/mindspore/lite/src/ops/sgd.cc b/mindspore/lite/src/ops/sgd.cc deleted file mode 100644 index 1862db81f4..0000000000 --- a/mindspore/lite/src/ops/sgd.cc +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/sgd.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float Sgd::GetWeightDecay() const { return this->primitive_->value.AsSgd()->weightDecay; } -float Sgd::GetDampening() const { return this->primitive_->value.AsSgd()->dampening; } -bool Sgd::GetUseNesterov() const { return this->primitive_->value.AsSgd()->useNesterov; } - -int Sgd::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Sgd; - } - if (this->primitive_->value.type != schema::PrimitiveType_Sgd) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = std::make_unique<schema::SgdT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - attr->weightDecay = GetValue<float>(prim.GetAttr("weight_decay")); - attr->dampening = GetValue<float>(prim.GetAttr("dampening")); - attr->useNesterov = GetValue<bool>(prim.GetAttr("nesterov")); - - this->primitive_->value.value = attr.release(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -float Sgd::GetWeightDecay() const { return this->primitive_->value_as_Sgd()->weightDecay(); } -float Sgd::GetDampening() const { return this->primitive_->value_as_Sgd()->dampening(); } -bool Sgd::GetUseNesterov() const { return this->primitive_->value_as_Sgd()->useNesterov(); } - -int Sgd::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Sgd(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Sgd return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateSgd(*fbb, attr->weightDecay(), attr->dampening(), attr->useNesterov()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Sgd, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SgdCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Sgd>(primitive); } -Registry SgdRegistry(schema::PrimitiveType_Sgd, SgdCreator); - -#endif - -int Sgd::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - if (6 != inputs.size()) { - MS_LOG(ERROR) << "Sgd should have at least 6 input tensors"; - return RET_ERROR; - } - - if (inputs.at(0)->ElementsNum() != inputs.at(1)->ElementsNum() || - inputs.at(0)->ElementsNum() != inputs.at(3)->ElementsNum() || inputs.at(2)->ElementsNum() != 1 || - inputs.at(4)->ElementsNum() != 1) { - MS_LOG(ERROR) << "error input data size!"; - return RET_ERROR; - } - if (!outputs.empty()) { - auto *out = outputs.front(); - MS_ASSERT(out != nullptr); - out->set_data_type(inputs.at(0)->data_type()); - out->set_format(inputs.at(0)->format()); - out->set_shape({1}); - } - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/sgd.h b/mindspore/lite/src/ops/sgd.h deleted file mode 100644 index 6d4903d77a..0000000000 --- a/mindspore/lite/src/ops/sgd.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_SGD_H_ -#define MINDSPORE_LITE_SRC_OPS_SGD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Sgd : public PrimitiveC { - public: - Sgd() = default; - ~Sgd() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Sgd, PrimitiveC); - explicit Sgd(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - float GetWeightDecay() const; - float GetDampening() const; - bool GetUseNesterov() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_SGD_H_ diff --git a/mindspore/lite/src/ops/shape.cc b/mindspore/lite/src/ops/shape.cc deleted file mode 100644 index 944824da76..0000000000 --- a/mindspore/lite/src/ops/shape.cc +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/shape.h" -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { - -namespace { -constexpr int kShapeInputNum = 1; -constexpr int kShapeOutputNum = 1; -} // namespace -int Shape::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - if (inputs_.size() != kShapeInputNum) { - MS_LOG(ERROR) << "inputs to Shape operator should be 1, but " << inputs_.size() << " is given."; - return RET_ERROR; - } - if (outputs_.size() != kShapeOutputNum) { - MS_LOG(ERROR) << "outputs to Shape operator should be 1, but " << outputs_.size() << " is given."; - return RET_ERROR; - } - auto in_tensor = inputs_.front(); - auto out_tensor = outputs_.front(); - out_tensor->set_data_type(kNumberTypeInt32); - out_tensor->set_format(in_tensor->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - std::vector<int> out_shape; - out_shape.push_back(static_cast<int>(in_tensor->shape().size())); - out_tensor->set_shape(out_shape); - return RET_OK; -} -#ifdef PRIMITIVE_WRITEABLE -#else -int Shape::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto val_offset = schema::CreateShape(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Shape, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *ShapeCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Shape>(primitive); } -Registry ShapeRegistry(schema::PrimitiveType_Shape, ShapeCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/shape.h b/mindspore/lite/src/ops/shape.h deleted file mode 100644 index b38efd28b4..0000000000 --- a/mindspore/lite/src/ops/shape.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SHAPE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SHAPE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Shape : public PrimitiveC { - public: - Shape() = default; - ~Shape() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Shape, PrimitiveC); - explicit Shape(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SHAPE_H_ diff --git a/mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits.cc b/mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits.cc deleted file mode 100644 index c1d6b2124d..0000000000 --- a/mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits.cc +++ /dev/null @@ -1,100 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/sigmoid_cross_entropy_with_logits.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int SigmoidCrossEntropyWithLogits::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_SigmoidCrossEntropyWithLogits; - } - if (this->primitive_->value.type != schema::PrimitiveType_SigmoidCrossEntropyWithLogits) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = std::make_unique<schema::SigmoidCrossEntropyWithLogitsT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - - this->primitive_->value.value = attr.release(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int SigmoidCrossEntropyWithLogits::UnPackToFlatBuilder(const schema::Primitive *primitive, - flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_SigmoidCrossEntropyWithLogits(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_SigmoidCrossEntropyWithLogits return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateSigmoidCrossEntropyWithLogits(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_SigmoidCrossEntropyWithLogits, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SigmoidCrossEntropyWithLogitsCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<SigmoidCrossEntropyWithLogits>(primitive); -} -Registry SigmoidCrossEntropyWithLogitsRegistry(schema::PrimitiveType_SigmoidCrossEntropyWithLogits, - SigmoidCrossEntropyWithLogitsCreator); -#endif - -int SigmoidCrossEntropyWithLogits::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - if (inputs.size() != 2) { - MS_LOG(ERROR) << "SigmoidCrossEntropyWithLogits should have 2 input tensors"; - return RET_ERROR; - } - - if (outputs.size() != 1) { - MS_LOG(ERROR) << "SigmoidCrossEntropyWithLogits should have 1 output tensors"; - return RET_ERROR; - } - - if (inputs[0]->ElementsNum() != inputs[1]->ElementsNum()) { - MS_LOG(ERROR) << "error input data size!"; - return RET_ERROR; - } - - auto *out = outputs.front(); - MS_ASSERT(out != nullptr); - out->set_data_type(inputs[0]->data_type()); - out->set_format(inputs[0]->format()); - out->set_shape(inputs[0]->shape()); - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits.h b/mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits.h deleted file mode 100644 index f7148f4bab..0000000000 --- a/mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_SIGMOID_CROSS_ENTROPY_WITH_LOGITS_H_ -#define MINDSPORE_LITE_SRC_OPS_SIGMOID_CROSS_ENTROPY_WITH_LOGITS_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class SigmoidCrossEntropyWithLogits : public PrimitiveC { - public: - SigmoidCrossEntropyWithLogits() = default; - ~SigmoidCrossEntropyWithLogits() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(SigmoidCrossEntropyWithLogits, PrimitiveC); - explicit SigmoidCrossEntropyWithLogits(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_SIGMOID_CROSS_ENTROPY_WITH_LOGITS_H_ diff --git a/mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits_grad.cc b/mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits_grad.cc deleted file mode 100644 index 3ab39fa5f1..0000000000 --- a/mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits_grad.cc +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/sigmoid_cross_entropy_with_logits_grad.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int SigmoidCrossEntropyWithLogitsGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad; - } - if (this->primitive_->value.type != schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = std::make_unique<schema::SigmoidCrossEntropyWithLogitsGradT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - - this->primitive_->value.value = attr.release(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int SigmoidCrossEntropyWithLogitsGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, - flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_SigmoidCrossEntropyWithLogitsGrad(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_SigmoidCrossEntropyWithLogitsGrad return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateSigmoidCrossEntropyWithLogitsGrad(*fbb); - auto prim_offset = - schema::CreatePrimitive(*fbb, schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SigmoidCrossEntropyWithLogitsGradCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<SigmoidCrossEntropyWithLogitsGrad>(primitive); -} -Registry SigmoidCrossEntropyWithLogitsGradRegistry(schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad, - SigmoidCrossEntropyWithLogitsGradCreator); -#endif - -int SigmoidCrossEntropyWithLogitsGrad::InferShape(std::vector<lite::Tensor *> inputs, - std::vector<lite::Tensor *> outputs) { - if (inputs.size() != 3) { - MS_LOG(ERROR) << "SigmoidCrossEntropyWithLogitsGrad should have 3 input tensors"; - return RET_ERROR; - } - - if (outputs.size() != 1) { - MS_LOG(ERROR) << "SigmoidCrossEntropyWithLogitsGrad should have 1 output tensors"; - return RET_ERROR; - } - - if (inputs[0]->ElementsNum() != inputs[1]->ElementsNum() || inputs[0]->ElementsNum() != inputs[2]->ElementsNum()) { - MS_LOG(ERROR) << "error input data size!"; - return RET_ERROR; - } - - auto *out = outputs.front(); - MS_ASSERT(out != nullptr); - out->set_data_type(inputs[0]->data_type()); - out->set_format(inputs[0]->format()); - out->set_shape(inputs[0]->shape()); - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits_grad.h b/mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits_grad.h deleted file mode 100644 index 716edd949f..0000000000 --- a/mindspore/lite/src/ops/sigmoid_cross_entropy_with_logits_grad.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_SIGMOID_CROSS_ENTROPY_WITH_LOGITS_GRAD_H_ -#define MINDSPORE_LITE_SRC_OPS_SIGMOID_CROSS_ENTROPY_WITH_LOGITS_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class SigmoidCrossEntropyWithLogitsGrad : public PrimitiveC { - public: - SigmoidCrossEntropyWithLogitsGrad() = default; - ~SigmoidCrossEntropyWithLogitsGrad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(SigmoidCrossEntropyWithLogitsGrad, PrimitiveC); - explicit SigmoidCrossEntropyWithLogitsGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_SIGMOID_CROSS_ENTROPY_WITH_LOGITS_GRAD_H_ diff --git a/mindspore/lite/src/ops/sin.cc b/mindspore/lite/src/ops/sin.cc deleted file mode 100644 index 4d39682bd2..0000000000 --- a/mindspore/lite/src/ops/sin.cc +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/sin.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Sin::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Sin; - } - if (this->primitive_->value.type != schema::PrimitiveType_Sin) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - this->primitive_->value.value = new (std::nothrow) schema::SinT(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else -int Sin::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto val_offset = schema::CreateSin(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Sin, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SinCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Sin>(primitive); } -Registry SinRegistry(schema::PrimitiveType_Sin, SinCreator); - -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/sin.h b/mindspore/lite/src/ops/sin.h deleted file mode 100644 index b8a00527ab..0000000000 --- a/mindspore/lite/src/ops/sin.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_SIN_H_ -#define MINDSPORE_LITE_SRC_OPS_SIN_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -class Sin : public ArithmeticSelf { - public: - Sin() = default; - ~Sin() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Sin, ArithmeticSelf); - explicit Sin(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_SIN_H_ diff --git a/mindspore/lite/src/ops/size.cc b/mindspore/lite/src/ops/size.cc deleted file mode 100644 index 104b92afb4..0000000000 --- a/mindspore/lite/src/ops/size.cc +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/size.h" -#include "src/common/common.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -constexpr int kShapeInputNum = 1; -constexpr int kShapeOutputNum = 1; -#ifdef PRIMITIVE_WRITEABLE -#else -int Size::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateSize(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Size, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *SizeCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Size>(primitive); } -Registry SizeRegistry(schema::PrimitiveType_Size, SizeCreator); -#endif - -int Size::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - if (inputs_.size() != kShapeInputNum) { - MS_LOG(ERROR) << "inputs to Shape operator should be 1, but " << inputs_.size() << " is given."; - return RET_ERROR; - } - if (outputs_.size() != kShapeOutputNum) { - MS_LOG(ERROR) << "outputs to Shape operator should be 1, but " << outputs_.size() << " is given."; - return RET_ERROR; - } - auto in_tensor = inputs_.front(); - auto out_tensor = outputs_.front(); - out_tensor->set_data_type(kNumberTypeInt32); - out_tensor->set_format(in_tensor->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - std::vector<int> out_shape; - out_shape.push_back(1); - out_tensor->set_shape(out_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/size.h b/mindspore/lite/src/ops/size.h deleted file mode 100644 index 48a3ac2152..0000000000 --- a/mindspore/lite/src/ops/size.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SIZE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SIZE_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Size : public PrimitiveC { - public: - Size() = default; - ~Size() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Size, PrimitiveC); - explicit Size(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SIZE_H_ diff --git a/mindspore/lite/src/ops/skip_gram.cc b/mindspore/lite/src/ops/skip_gram.cc deleted file mode 100644 index 253cce09a7..0000000000 --- a/mindspore/lite/src/ops/skip_gram.cc +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/skip_gram.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int SkipGram::GetNgramSize() const { return this->primitive_->value.AsSkipGram()->ngramSize; } -int SkipGram::GetMaxSkipSize() const { return this->primitive_->value.AsSkipGram()->maxSkipSize; } -bool SkipGram::GetIncludeAllNgrams() const { return this->primitive_->value.AsSkipGram()->includeAllGrams; } - -void SkipGram::SetNgramSize(int ngram_size) { this->primitive_->value.AsSkipGram()->ngramSize = ngram_size; } -void SkipGram::SetMaxSkipSize(int max_skip_size) { this->primitive_->value.AsSkipGram()->maxSkipSize = max_skip_size; } -void SkipGram::SetIncludeAllNgrams(bool include_all_ngrams) { - this->primitive_->value.AsSkipGram()->includeAllGrams = include_all_ngrams; -} - -#else -int SkipGram::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto attr = primitive->value_as_SkipGram(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_SkipGram return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateSkipGram(*fbb, attr->includeAllGrams(), attr->maxSkipSize(), attr->ngramSize()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_SkipGram, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -int SkipGram::GetNgramSize() const { return this->primitive_->value_as_SkipGram()->ngramSize(); } -int SkipGram::GetMaxSkipSize() const { return this->primitive_->value_as_SkipGram()->maxSkipSize(); } -bool SkipGram::GetIncludeAllNgrams() const { return this->primitive_->value_as_SkipGram()->includeAllGrams(); } - -PrimitiveC *SkipGramCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<SkipGram>(primitive); -} -Registry SkipGramRegistry(schema::PrimitiveType_SkipGram, SkipGramCreator); -#endif - -int SkipGram::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - if (inputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "Skip Gram should have one input"; - return RET_INPUT_TENSOR_ERROR; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "Skip Gram should have one outputs"; - return RET_INPUT_TENSOR_ERROR; - } - auto input = inputs_.front(); - auto output = outputs_.front(); - MS_ASSERT(input != nullptr); - output->set_format(input->format()); - output->set_data_type(input->data_type()); - - if (input->data_c() == nullptr) { - MS_LOG(INFO) << "Do infer shape in runtime."; - return RET_INFER_INVALID; - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/skip_gram.h b/mindspore/lite/src/ops/skip_gram.h deleted file mode 100644 index b2a7a570d7..0000000000 --- a/mindspore/lite/src/ops/skip_gram.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SKIP_GRAM_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SKIP_GRAM_H_ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class SkipGram : public PrimitiveC { - public: - SkipGram() = default; - ~SkipGram() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(SkipGram, PrimitiveC); - explicit SkipGram(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetNgramSize(int ngram_size); - void SetMaxSkipSize(int max_skip_size); - void SetIncludeAllNgrams(bool include_all_ngrams); - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetNgramSize() const; - int GetMaxSkipSize() const; - bool GetIncludeAllNgrams() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SKIP_GRAM_H_ diff --git a/mindspore/lite/src/ops/slice.cc b/mindspore/lite/src/ops/slice.cc deleted file mode 100644 index 734cf22ad0..0000000000 --- a/mindspore/lite/src/ops/slice.cc +++ /dev/null @@ -1,240 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/slice.h" -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -namespace { -constexpr int kSliceInputNum = 1; -constexpr int kSliceOutputNum = 1; -constexpr int kSliceMaxInputNum = 5; -} // namespace -#ifdef PRIMITIVE_WRITEABLE -int Slice::GetFormat() const { return this->primitive_->value.AsSlice()->format; } -std::vector<int> Slice::GetBegin() const { return this->primitive_->value.AsSlice()->begin; } -std::vector<int> Slice::GetSize() const { return this->primitive_->value.AsSlice()->size; } -std::vector<int> Slice::GetAxes() const { return this->primitive_->value.AsSlice()->axes; } - -void Slice::SetFormat(int format) { this->primitive_->value.AsSlice()->format = (schema::Format)format; } -void Slice::SetBegin(const std::vector<int> &begin) { this->primitive_->value.AsSlice()->begin = begin; } -void Slice::SetSize(const std::vector<int> &size) { this->primitive_->value.AsSlice()->size = size; } - -int Slice::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Slice; - } - if (this->primitive_->value.type != schema::PrimitiveType_Slice) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::SliceT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - if (inputs.size() >= kAnfPopulaterInputNumThree) { - auto beginNode = inputs[kAnfPopulaterInputNumOne]; - MS_ASSERT(beginNode != nullptr); - if (beginNode->isa<ValueNode>()) { - auto valueNode = beginNode->cast<ValueNodePtr>(); - MS_ASSERT(valueNode != nullptr); - auto value = valueNode->value(); - MS_ASSERT(value != nullptr); - if (value->isa<ValueTuple>()) { - auto valTuplPtr = dyn_cast<ValueTuple>(value); - MS_ASSERT(valTuplPtr != nullptr); - for (size_t i = 0; i < valTuplPtr->size(); i++) { - auto elem = (*valTuplPtr)[i]; - MS_ASSERT(elem != nullptr); - attr->begin.emplace_back(CastToInt(elem).front()); - } - } - } - auto sizeNode = inputs.at(kAnfPopulaterInputNumTwo); - MS_ASSERT(sizeNode != nullptr); - if (sizeNode->isa<ValueNode>()) { - auto valueNode = sizeNode->cast<ValueNodePtr>(); - MS_ASSERT(valueNode != nullptr); - auto value = valueNode->value(); - MS_ASSERT(value != nullptr); - if (value->isa<ValueTuple>()) { - auto valTuplPtr = dyn_cast<ValueTuple>(value); - MS_ASSERT(valTuplPtr != nullptr); - for (size_t i = 0; i < valTuplPtr->size(); i++) { - auto elem = (*valTuplPtr)[i]; - MS_ASSERT(elem != nullptr); - attr->size.emplace_back(CastToInt(elem).front()); - } - } - } - std::vector<int> axes; - axes.clear(); - for (size_t i = 0; i < attr->begin.size(); i++) { - axes.push_back(i); - } - attr->axes = axes; - } - this->primitive_->value.value = attr; - } - return RET_OK; -} - -#else - -int Slice::GetFormat() const { return this->primitive_->value_as_Slice()->format(); } -std::vector<int> Slice::GetBegin() const { - auto fb_vector = this->primitive_->value_as_Slice()->begin(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -std::vector<int> Slice::GetSize() const { - auto fb_vector = this->primitive_->value_as_Slice()->size(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} - -std::vector<int> Slice::GetAxes() const { - auto fb_vector = this->primitive_->value_as_Slice()->axes(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} - -int Slice::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto attr = primitive->value_as_Slice(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Slice return nullptr"; - return RET_ERROR; - } - - std::vector<int32_t> axes; - if (attr->axes() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->axes()->size()); i++) { - axes.push_back(attr->axes()->data()[i]); - } - } - std::vector<int32_t> begin; - if (attr->begin() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->begin()->size()); i++) { - begin.push_back(attr->begin()->data()[i]); - } - } - std::vector<int32_t> size; - if (attr->size() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->size()->size()); i++) { - size.push_back(attr->size()->data()[i]); - } - } - - auto val_offset = schema::CreateSliceDirect(*fbb, attr->format(), &axes, &begin, &size); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Slice, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SliceCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Slice>(primitive); } -Registry SliceRegistry(schema::PrimitiveType_Slice, SliceCreator); - -#endif - -std::vector<int> Slice::GetPostProcessBegin() const { return this->begin; } -std::vector<int> Slice::GetPostProcessSize() const { return this->size; } -int Slice::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - MS_ASSERT(this->primitive_ != nullptr); - if (inputs.size() < kSliceInputNum || outputs.size() != kSliceOutputNum) { - MS_LOG(ERROR) << "input size:" << inputs.size() << ",output size:" << outputs.size(); - return RET_PARAM_INVALID; - } - auto input = inputs.at(0); - outputs.at(0)->set_data_type(input->data_type()); - outputs.at(0)->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input_shape = input->shape(); - std::vector<int32_t> slice_begin(GetBegin()); - std::vector<int32_t> slice_size(GetSize()); - std::vector<int32_t> slice_axes(GetAxes()); - std::vector<int32_t> output_shape(input_shape.size()); - if (inputs.size() > kSliceInputNum && inputs.size() <= kSliceMaxInputNum) { - if (slice_begin.empty() && inputs.size() >= 2 && inputs.at(1)->data_c() != nullptr) { - for (int i = 0; i < inputs.at(1)->ElementsNum(); i++) { - slice_begin.emplace_back(static_cast<int *>(inputs.at(1)->data_c())[i]); - } - } - if (slice_size.empty() && inputs.size() >= 3 && inputs.at(2)->data_c() != nullptr) { - for (int i = 0; i < inputs.at(2)->ElementsNum(); i++) { - auto end = static_cast<int *>(inputs.at(2)->data_c())[i]; - auto size = end < 0 ? end : (end == INT32_MAX ? -1 : end - slice_begin.at(i)); - slice_size.emplace_back(size); - } - } - if (slice_axes.empty() && inputs.size() >= 4 && inputs.at(3)->data_c() != nullptr) { - for (int i = 0; i < inputs.at(3)->ElementsNum(); i++) { - slice_axes.emplace_back(static_cast<int *>(inputs.at(3)->data_c())[i]); - } - } - } - if (slice_begin.empty() || slice_size.empty() || slice_axes.empty()) { - MS_LOG(ERROR) << "Infershape failed."; - return RET_INFER_INVALID; - } - begin.assign(input_shape.size(), 0); - size.assign(input_shape.size(), -1); - for (size_t i = 0; i < slice_axes.size(); ++i) { - begin.at(slice_axes.at(i)) = slice_begin.at(i); - size.at(slice_axes.at(i)) = slice_size.at(i); - } - for (size_t i = 0; i < input_shape.size(); ++i) { - if (size.at(i) < 0 && size.at(i) != -1) { - MS_LOG(ERROR) << "Invalid size input!size[" << i << "]=" << size.at(i); - return RET_PARAM_INVALID; - } - if (begin.at(i) < 0) { - MS_LOG(ERROR) << "Invalid begin input " << begin.at(i) << " which should be >= 0"; - return RET_PARAM_INVALID; - } - if (input_shape.at(i) != 0 && input_shape.at(i) <= begin.at(i)) { - MS_LOG(ERROR) << "Invalid begin input!begin[" << i << "]=" << begin.at(i) << " which should be > " - << input_shape.at(i); - return RET_PARAM_INVALID; - } - if (input_shape.at(i) != 0 && size.at(i) > (input_shape.at(i) - begin.at(i))) { - MS_LOG(ERROR) << "Invalid size input " << size.at(i) << " which should be <= " << input_shape.at(i) - begin.at(i); - return RET_PARAM_INVALID; - } - - output_shape.at(i) = size.at(i) < 0 ? input_shape.at(i) - begin.at(i) : size.at(i); - } - - outputs.at(0)->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/slice.h b/mindspore/lite/src/ops/slice.h deleted file mode 100644 index 73c26c49be..0000000000 --- a/mindspore/lite/src/ops/slice.h +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_SLICE_H_ -#define MINDSPORE_LITE_SRC_OPS_SLICE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Slice : public PrimitiveC { - public: - Slice() = default; - ~Slice() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Slice, PrimitiveC); - explicit Slice(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetFormat(int format); - void SetBegin(const std::vector<int> &begin); - void SetSize(const std::vector<int> &size); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetFormat() const; - std::vector<int> GetBegin() const; - std::vector<int> GetSize() const; - std::vector<int> GetAxes() const; - // due to difference between tflite and onnx, when inferring shape, construct new parameters of begin and size. - // when running graph, we need to obtain new begins and sizes using the two function as below. - std::vector<int> GetPostProcessBegin() const; - std::vector<int> GetPostProcessSize() const; - - protected: - std::vector<int> begin = {0}; - std::vector<int> size = {-1}; -}; -} // namespace lite -} // namespace mindspore -#endif // MINDSPORE_LITE_SRC_OPS_SLICE_H_ diff --git a/mindspore/lite/src/ops/smooth_l1_loss.cc b/mindspore/lite/src/ops/smooth_l1_loss.cc deleted file mode 100644 index d3cb5c65ea..0000000000 --- a/mindspore/lite/src/ops/smooth_l1_loss.cc +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/smooth_l1_loss.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float SmoothL1Loss::GetBeta() const { return this->primitive_->value.AsSmoothL1Loss()->beta; } -int SmoothL1Loss::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_SmoothL1Loss; - } - if (this->primitive_->value.type != schema::PrimitiveType_SmoothL1Loss) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = std::make_unique<schema::SmoothL1LossT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - attr->beta = GetValue<float>(prim.GetAttr("beta")); - - this->primitive_->value.value = attr.release(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -float SmoothL1Loss::GetBeta() const { return this->primitive_->value_as_SmoothL1Loss()->beta(); } -int SmoothL1Loss::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_SmoothL1Loss(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_SmoothL1Loss return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateSmoothL1Loss(*fbb, attr->beta()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_SmoothL1Loss, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SmoothL1LossCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<SmoothL1Loss>(primitive); -} -Registry SmoothL1LossRegistry(schema::PrimitiveType_SmoothL1Loss, SmoothL1LossCreator); -#endif - -int SmoothL1Loss::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - if (inputs.size() != 2) { - MS_LOG(ERROR) << "SmoothL1Loss should have 2 input tensors"; - return RET_ERROR; - } - - if (outputs.size() != 1) { - MS_LOG(ERROR) << "SmoothL1Loss should have 1 output tensors"; - return RET_ERROR; - } - - if (inputs[0]->ElementsNum() != inputs[1]->ElementsNum()) { - MS_LOG(ERROR) << "error input data size!"; - return RET_ERROR; - } - - auto *out = outputs.front(); - MS_ASSERT(out != nullptr); - out->set_data_type(inputs[0]->data_type()); - out->set_format(inputs[0]->format()); - out->set_shape(inputs[0]->shape()); - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/smooth_l1_loss.h b/mindspore/lite/src/ops/smooth_l1_loss.h deleted file mode 100644 index 4e63fdacc1..0000000000 --- a/mindspore/lite/src/ops/smooth_l1_loss.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_SMOOTH_L1_LOSS_H_ -#define MINDSPORE_LITE_SRC_OPS_SMOOTH_L1_LOSS_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class SmoothL1Loss : public PrimitiveC { - public: - SmoothL1Loss() = default; - ~SmoothL1Loss() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(SmoothL1Loss, PrimitiveC); - explicit SmoothL1Loss(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - float GetBeta() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_SMOOTH_L1_LOSS_H_ diff --git a/mindspore/lite/src/ops/smooth_l1_loss_grad.cc b/mindspore/lite/src/ops/smooth_l1_loss_grad.cc deleted file mode 100644 index fcf3e91273..0000000000 --- a/mindspore/lite/src/ops/smooth_l1_loss_grad.cc +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "src/ops/smooth_l1_loss_grad.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -float SmoothL1LossGrad::GetBeta() const { return this->primitive_->value.AsSmoothL1LossGrad()->beta; } -int SmoothL1LossGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_SmoothL1LossGrad; - } - if (this->primitive_->value.type != schema::PrimitiveType_SmoothL1LossGrad) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = std::make_unique<schema::SmoothL1LossGradT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - attr->beta = GetValue<float>(prim.GetAttr("beta")); - - this->primitive_->value.value = attr.release(); - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -float SmoothL1LossGrad::GetBeta() const { return this->primitive_->value_as_SmoothL1LossGrad()->beta(); } -int SmoothL1LossGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_SmoothL1LossGrad(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_SmoothL1LossGrad return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateSmoothL1LossGrad(*fbb, attr->beta()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_SmoothL1LossGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SmoothL1LossGradCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<SmoothL1LossGrad>(primitive); -} -Registry SmoothL1LossGradRegistry(schema::PrimitiveType_SmoothL1LossGrad, SmoothL1LossGradCreator); -#endif - -int SmoothL1LossGrad::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - if (inputs.size() != 3) { - MS_LOG(ERROR) << "SmoothL1LossGrad should have 3 input tensors"; - return RET_ERROR; - } - - if (outputs.size() != 1) { - MS_LOG(ERROR) << "SmoothL1LossGrad should have 1 output tensors"; - return RET_ERROR; - } - - if (inputs[0]->ElementsNum() != inputs[1]->ElementsNum() || inputs[0]->ElementsNum() != inputs[2]->ElementsNum()) { - MS_LOG(ERROR) << "error input data size!"; - return RET_ERROR; - } - - auto *out = outputs.front(); - MS_ASSERT(out != nullptr); - out->set_data_type(inputs[0]->data_type()); - out->set_format(inputs[0]->format()); - out->set_shape(inputs[0]->shape()); - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/smooth_l1_loss_grad.h b/mindspore/lite/src/ops/smooth_l1_loss_grad.h deleted file mode 100644 index 2bdc73f788..0000000000 --- a/mindspore/lite/src/ops/smooth_l1_loss_grad.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_SMOOTH_L1_LOSS_GRAD_H_ -#define MINDSPORE_LITE_SRC_OPS_SMOOTH_L1_LOSS_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class SmoothL1LossGrad : public PrimitiveC { - public: - SmoothL1LossGrad() = default; - ~SmoothL1LossGrad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(SmoothL1LossGrad, PrimitiveC); - explicit SmoothL1LossGrad(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - float GetBeta() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_SMOOTH_L1_LOSS_GRAD_H_ diff --git a/mindspore/lite/src/ops/softmax.cc b/mindspore/lite/src/ops/softmax.cc deleted file mode 100644 index e8ae684939..0000000000 --- a/mindspore/lite/src/ops/softmax.cc +++ /dev/null @@ -1,99 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/softmax.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int SoftMax::GetAxis() const { return this->primitive_->value.AsSoftMax()->axis; } - -int SoftMax::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_SoftMax; - } - if (this->primitive_->value.type != schema::PrimitiveType_SoftMax) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::SoftMaxT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - auto prim_axis = CastToInt(prim.GetAttr("axis")).front(); - attr->axis = prim_axis; - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} - -void SoftMax::SetAxis(int axis) { this->primitive_->value.AsSoftMax()->axis = axis; } - -#else - -int SoftMax::GetAxis() const { return this->primitive_->value_as_SoftMax()->axis(); } -int SoftMax::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_SoftMax(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_SoftMax return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateSoftMax(*fbb, attr->axis()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_SoftMax, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SoftMaxCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<SoftMax>(primitive); } -Registry SoftMaxRegistry(schema::PrimitiveType_SoftMax, SoftMaxCreator); -#endif - -int SoftMax::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - if (input->shape().size() > 5) { - MS_LOG(ERROR) << "Softmax input dim must be less than 5, get " << input->shape().size(); - return RET_ERROR; - } - output->set_shape(input->shape()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/softmax.h b/mindspore/lite/src/ops/softmax.h deleted file mode 100644 index 656cbbc1cb..0000000000 --- a/mindspore/lite/src/ops/softmax.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SOFT_MAX_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SOFT_MAX_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class SoftMax : public PrimitiveC { - public: - SoftMax() = default; - ~SoftMax() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(SoftMax, PrimitiveC); - explicit SoftMax(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - void SetAxis(int axis); - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetAxis() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SOFT_MAX_H_ diff --git a/mindspore/lite/src/ops/softmax_cross_entropy.cc b/mindspore/lite/src/ops/softmax_cross_entropy.cc deleted file mode 100644 index 483bd7363b..0000000000 --- a/mindspore/lite/src/ops/softmax_cross_entropy.cc +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/softmax_cross_entropy.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int SoftmaxCrossEntropy::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_SoftmaxCrossEntropy; - } - if (this->primitive_->value.type != schema::PrimitiveType_SoftmaxCrossEntropy) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::SoftmaxCrossEntropyT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else - -int SoftmaxCrossEntropy::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_SoftmaxCrossEntropy(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_SoftmaxCrossEntropy return nullptr"; - return RET_ERROR; - } - - auto val_offset = schema::CreateSoftmaxCrossEntropy(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_SoftmaxCrossEntropy, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SoftmaxCrossEntropyCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<SoftmaxCrossEntropy>(primitive); -} -Registry SoftmaxCrossEntropyRegistry(schema::PrimitiveType_SoftmaxCrossEntropy, SoftmaxCrossEntropyCreator); -#endif - -int SoftmaxCrossEntropy::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - if (1 > outputs.size()) { - MS_LOG(ERROR) << "SoftmaxCrossEntropy should have at least one output"; - return RET_ERROR; - } - auto *in0 = inputs.front(); - MS_ASSERT(in0 != nullptr); - auto *out = outputs.front(); - MS_ASSERT(out != nullptr); - - std::vector<int> outshape; - outshape.push_back(in0->shape()[0]); - outshape.push_back(1); - out->set_shape(outshape); - out->set_data_type(in0->data_type()); - out->set_format(in0->format()); - - if (1 < outputs.size()) { - auto *grads = outputs.at(1); - MS_ASSERT(grads != nullptr); - grads->set_shape(in0->shape()); - grads->set_data_type(in0->data_type()); - grads->set_format(in0->format()); - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/softmax_cross_entropy.h b/mindspore/lite/src/ops/softmax_cross_entropy.h deleted file mode 100644 index 5eb028dd91..0000000000 --- a/mindspore/lite/src/ops/softmax_cross_entropy.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_SOFTMAX_CROSS_ENTROPY_H_ -#define MINDSPORE_LITE_SRC_OPS_SOFTMAX_CROSS_ENTROPY_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class SoftmaxCrossEntropy : public PrimitiveC { - public: - SoftmaxCrossEntropy() = default; - ~SoftmaxCrossEntropy() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(SoftmaxCrossEntropy, PrimitiveC); - explicit SoftmaxCrossEntropy(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAxis(const std::vector<int> &axis); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - - std::vector<int> GetAxis() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_SOFTMAX_CROSS_ENTROPY_H_ diff --git a/mindspore/lite/src/ops/space_to_batch.cc b/mindspore/lite/src/ops/space_to_batch.cc deleted file mode 100644 index 5a31ba90d7..0000000000 --- a/mindspore/lite/src/ops/space_to_batch.cc +++ /dev/null @@ -1,150 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/space_to_batch.h" -#include "src/common/common.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> SpaceToBatch::GetBlockShape() const { return this->primitive_->value.AsSpaceToBatch()->blockShape; } -std::vector<int> SpaceToBatch::GetPaddings() const { return this->primitive_->value.AsSpaceToBatch()->paddings; } - -void SpaceToBatch::SetBlockShape(const std::vector<int> &block_shape) { - this->primitive_->value.AsSpaceToBatch()->blockShape = block_shape; -} -void SpaceToBatch::SetPaddings(const std::vector<int> &paddings) { - this->primitive_->value.AsSpaceToBatch()->paddings = paddings; -} - -#else - -std::vector<int> SpaceToBatch::GetBlockShape() const { - auto fb_vector = this->primitive_->value_as_SpaceToBatch()->blockShape(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -std::vector<int> SpaceToBatch::GetPaddings() const { - auto fb_vector = this->primitive_->value_as_SpaceToBatch()->paddings(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -int SpaceToBatch::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_SpaceToBatch(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_SpaceToBatch return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> blockShape; - if (attr->blockShape() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->blockShape()->size()); i++) { - blockShape.push_back(attr->blockShape()->data()[i]); - } - } - std::vector<int32_t> paddings; - if (attr->paddings() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->paddings()->size()); i++) { - paddings.push_back(attr->paddings()->data()[i]); - } - } - auto val_offset = schema::CreateSpaceToBatchDirect(*fbb, &blockShape, &paddings); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_SpaceToBatch, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SpaceToBatchCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<SpaceToBatch>(primitive); -} -Registry SpaceToBatchRegistry(schema::PrimitiveType_SpaceToBatch, SpaceToBatchCreator); - -#endif - -namespace { -constexpr int kSpaceToBatchNDOutputNum = 1; -constexpr int kSpaceToBatchNDInputNum = 1; -} // namespace - -int SpaceToBatch::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - MS_ASSERT(this->primitive_ != nullptr); - if (outputs.size() != kSpaceToBatchNDOutputNum || inputs.size() != kSpaceToBatchNDInputNum) { - MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); - return 1; - } - - auto input = inputs.at(0); - if (input->format() != schema::Format::Format_NHWC) { - MS_LOG(ERROR) << "space_to_batch only support NHWC now!"; - return 1; - } - outputs[0]->set_data_type(input->data_type()); - outputs[0]->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input_shape = input->shape(); - if (input_shape.size() != kQuadrupleNum) { - MS_LOG(ERROR) << "Space_to_batch op only support 4D input currently. But got %d dimensionality input." - << kQuadrupleNum; - return RET_ERROR; - } - - auto block_shape_vector = GetBlockShape(); - for (int &iter : block_shape_vector) { - block_sizes_.emplace_back(iter); - } - - in_shape_.clear(); - padded_in_shape_.clear(); - paddings_.clear(); - in_shape_.emplace_back(input_shape.at(NHWC_N)); - padded_in_shape_.emplace_back(input_shape.at(NHWC_N)); - auto block_shape_size = block_shape_vector.size(); - for (size_t i = 0; i < block_shape_size; i++) { - in_shape_.emplace_back(input_shape.at(i + 1)); - padded_in_shape_.emplace_back(input_shape.at(i + 1) + (paddings_.at(2 * i) + paddings_.at(2 * i + 1))); - paddings_.emplace_back(paddings_.at(2 * i)); - paddings_.emplace_back(paddings_.at(2 * i + 1)); - if (paddings_.back() % block_sizes_.at(i)) { - MS_LOG(ERROR) << "Padded shape does not divide block size " << block_sizes_.at(i); - return 1; - } - } - in_shape_.emplace_back(input_shape.at(NHWC_C)); - padded_in_shape_.emplace_back(input_shape.at(NHWC_C)); - int padding_left = 0; - int padding_right = 0; - int block_w = 1; - if (block_shape_size == 2) { - padding_left = paddings_[2]; - padding_right = paddings_[3]; - block_w = block_sizes_[1]; - } - - std::vector<int32_t> output_shape(input_shape.size()); - output_shape[NHWC_N] = input_shape[NHWC_N] * (block_sizes_[0] * block_w); - output_shape[NHWC_H] = (input_shape[NHWC_H] + paddings_[0] + paddings_[1]) / block_sizes_[0]; - output_shape[NHWC_W] = (input_shape[NHWC_W] + padding_left + padding_right) / block_w; - output_shape[NHWC_C] = input_shape[NHWC_C]; - outputs[0]->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/space_to_batch.h b/mindspore/lite/src/ops/space_to_batch.h deleted file mode 100644 index 982120dfc1..0000000000 --- a/mindspore/lite/src/ops/space_to_batch.h +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_BATCH_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_BATCH_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class SpaceToBatch : public PrimitiveC { - public: - SpaceToBatch() = default; - ~SpaceToBatch() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(SpaceToBatch, PrimitiveC); - explicit SpaceToBatch(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetBlockShape(const std::vector<int> &block_shape); - void SetPaddings(const std::vector<int> &paddings); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) override; - - std::vector<int> GetBlockShape() const; - std::vector<int> GetPaddings() const; - - std::vector<int> BlockSizes() { return block_sizes_; } - std::vector<int> Paddings() { return block_sizes_; } - std::vector<int> InShape() { return block_sizes_; } - std::vector<int> PaddedInShape() { return block_sizes_; } - - private: - std::vector<int> block_sizes_; - std::vector<int> paddings_; - std::vector<int> in_shape_; - std::vector<int> padded_in_shape_; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_BATCH_H_ diff --git a/mindspore/lite/src/ops/space_to_batch_nd.cc b/mindspore/lite/src/ops/space_to_batch_nd.cc deleted file mode 100644 index ef2d33534e..0000000000 --- a/mindspore/lite/src/ops/space_to_batch_nd.cc +++ /dev/null @@ -1,213 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/space_to_batch_nd.h" -#include <limits> -#include "src/common/common.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -namespace { -constexpr int kSpaceToBatchNDOutputNum = 1; -constexpr int kSpaceToBatchNDOneInput = 1; -constexpr int kSpaceToBatchNDThreeInput = 3; -} // namespace - -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> SpaceToBatchND::GetBlockShape() const { - return this->primitive_->value.AsSpaceToBatchND()->blockShape; -} -std::vector<int> SpaceToBatchND::GetPaddings() const { return this->primitive_->value.AsSpaceToBatchND()->paddings; } - -void SpaceToBatchND::SetBlockShape(const std::vector<int> &block_shape) { - this->primitive_->value.AsSpaceToBatchND()->blockShape = block_shape; -} -void SpaceToBatchND::SetPaddings(const std::vector<int> &paddings) { - this->primitive_->value.AsSpaceToBatchND()->paddings = paddings; -} - -#else - -std::vector<int> SpaceToBatchND::GetBlockShape() const { - auto fb_vector = this->primitive_->value_as_SpaceToBatchND()->blockShape(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -std::vector<int> SpaceToBatchND::GetPaddings() const { - auto fb_vector = this->primitive_->value_as_SpaceToBatchND()->paddings(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} - -int SpaceToBatchND::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_SpaceToBatchND(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_SpaceToBatch return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> blockShape; - if (attr->blockShape() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->blockShape()->size()); i++) { - blockShape.push_back(attr->blockShape()->data()[i]); - } - } - std::vector<int32_t> paddings; - if (attr->paddings() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->paddings()->size()); i++) { - paddings.push_back(attr->paddings()->data()[i]); - } - } - auto val_offset = schema::CreateSpaceToBatchDirect(*fbb, &blockShape, &paddings); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_SpaceToBatchND, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SpaceToBatchNDCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<SpaceToBatchND>(primitive); -} -Registry SpaceToBatchNDRegistry(schema::PrimitiveType_SpaceToBatchND, SpaceToBatchNDCreator); - -#endif // PRIMITIVE_WRITEABLE - -int SpaceToBatchND::SetOutputShapeFromParam(const std::vector<lite::Tensor *> inputs, - std::vector<lite::Tensor *> outputs) { - auto input_shape = inputs[0]->shape(); - if (input_shape.size() != kQuadrupleNum) { - MS_LOG(ERROR) << "input shape dimension size only support " << kQuadrupleNum << " now!"; - return RET_ERROR; - } - auto block_shape = GetBlockShape(); - auto padding = GetPaddings(); - int padding_left = 0; - int padding_right = 0; - int block_w = 1; - if (block_shape.size() == 2) { - padding_left = padding.at(2); - padding_right = padding.at(3); - block_w = block_shape.at(1); - } - std::vector<int32_t> output_shape(input_shape.size()); - if (block_shape.at(0) * block_w > std::numeric_limits<int>::max() / input_shape.at(NHWC_N)) { - MS_LOG(ERROR) << "The value of block_shape.at(0) * block_w is too big"; - return RET_ERROR; - } - output_shape.at(NHWC_N) = input_shape.at(NHWC_N) * block_shape.at(0) * block_w; - if (padding.at(0) + padding.at(1) > std::numeric_limits<int>::max() - input_shape.at(NHWC_H)) { - MS_LOG(ERROR) << "The value of padding.at(0) + padding.at(1) is too big"; - return RET_ERROR; - } - output_shape.at(NHWC_H) = (input_shape.at(NHWC_H) + padding.at(0) + padding.at(1)) / block_shape.at(0); - if (padding_left + padding_right > std::numeric_limits<int>::max() - input_shape.at(NHWC_W)) { - MS_LOG(ERROR) << "The value of padding_left + padding_right is too big"; - return RET_ERROR; - } - output_shape.at(NHWC_W) = (input_shape.at(NHWC_W) + padding_left + padding_right) / block_w; - if (input_shape.size() > 3) { - output_shape.at(NHWC_C) = input_shape.at(NHWC_C); - } - outputs.at(0)->set_shape(output_shape); - return RET_OK; -} - -int SpaceToBatchND::SetOutputShapeFromInput(const std::vector<lite::Tensor *> inputs, - std::vector<lite::Tensor *> outputs) { - auto input_shape = inputs[0]->shape(); - if (input_shape.size() != kQuadrupleNum) { - MS_LOG(ERROR) << "input shape dimension size only support " << kQuadrupleNum << " now!"; - return RET_ERROR; - } - MS_ASSERT(inputs[2]->ElementsNum() == 4); - auto block_shape_data = inputs[1]->data_c(); - auto block_shape = static_cast<int *>(block_shape_data); - auto padding_data = inputs[2]->data_c(); - auto padding = static_cast<int *>(padding_data); - int padding_left = 0; - int padding_right = 0; - int block_w = 1; - if (inputs[1]->ElementsNum() == 2) { - padding_left = padding[2]; - padding_right = padding[3]; - block_w = block_shape[1]; - } - std::vector<int32_t> output_shape(input_shape.size()); - if (block_shape[0] * block_w > std::numeric_limits<int>::max() / input_shape.at(NHWC_N)) { - MS_LOG(ERROR) << "The value of block_shape.at(0) * block_w is too big"; - return RET_ERROR; - } - output_shape.at(NHWC_N) = input_shape.at(NHWC_N) * block_shape[0] * block_w; - if (padding[0] + padding[1] > std::numeric_limits<int>::max() - input_shape.at(NHWC_H)) { - MS_LOG(ERROR) << "The value of padding.at(0) + padding.at(1) is too big"; - return RET_ERROR; - } - output_shape.at(NHWC_H) = (input_shape.at(NHWC_H) + padding[0] + padding[1]) / block_shape[0]; - if (padding_left + padding_right > std::numeric_limits<int>::max() - input_shape.at(NHWC_W)) { - MS_LOG(ERROR) << "The value of padding_left + padding_right is too big"; - return RET_ERROR; - } - output_shape.at(NHWC_W) = (input_shape.at(NHWC_W) + padding_left + padding_right) / block_w; - if (input_shape.size() > 3) { - output_shape.at(NHWC_C) = input_shape.at(NHWC_C); - } - outputs.at(0)->set_shape(output_shape); - return RET_OK; -} - -int SpaceToBatchND::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - if (outputs.size() != kSpaceToBatchNDOutputNum || - (inputs.size() != kSpaceToBatchNDOneInput && inputs.size() != kSpaceToBatchNDThreeInput)) { - MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); - return 1; - } - - auto input = inputs.at(0); - if (input->format() != schema::Format::Format_NHWC) { - MS_LOG(ERROR) << "space_to_batch_nd only support NHWC now!"; - return RET_ERROR; - } - outputs.at(0)->set_data_type(input->data_type()); - outputs.at(0)->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - if (inputs.size() == kSpaceToBatchNDOneInput) { - auto ret = SetOutputShapeFromParam(inputs, outputs); - if (ret != RET_OK) { - MS_LOG(ERROR) << "SetOutputShapeFromParam failed"; - return ret; - } - } - if (inputs.size() == kSpaceToBatchNDThreeInput) { - if (inputs[0]->data_c() == nullptr) { - return RET_INFER_INVALID; - } - MS_ASSERT(inputs[1]->data_c() != nullptr); - MS_ASSERT(inputs[2]->data_c() != nullptr); - auto ret = SetOutputShapeFromInput(inputs, outputs); - if (ret != RET_OK) { - MS_LOG(ERROR) << "SetOutputShapeFromInput failed"; - return ret; - } - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/space_to_batch_nd.h b/mindspore/lite/src/ops/space_to_batch_nd.h deleted file mode 100644 index f77780b909..0000000000 --- a/mindspore/lite/src/ops/space_to_batch_nd.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_BATCH_N_D_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_BATCH_N_D_H_ - -#include <vector> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class SpaceToBatchND : public PrimitiveC { - public: - SpaceToBatchND() = default; - ~SpaceToBatchND() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(SpaceToBatchND, PrimitiveC); - explicit SpaceToBatchND(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetBlockShape(const std::vector<int> &block_shape); - void SetPaddings(const std::vector<int> &paddings); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - std::vector<int> GetBlockShape() const; - std::vector<int> GetPaddings() const; - int InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) override; - int SetOutputShapeFromParam(const std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs); - int SetOutputShapeFromInput(const std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs); -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_BATCH_N_D_H_ diff --git a/mindspore/lite/src/ops/space_to_depth.cc b/mindspore/lite/src/ops/space_to_depth.cc deleted file mode 100644 index e6c5eddcf3..0000000000 --- a/mindspore/lite/src/ops/space_to_depth.cc +++ /dev/null @@ -1,109 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/space_to_depth.h" -#include <limits> -#include "src/common/common.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int SpaceToDepth::GetBlockSize() const { return this->primitive_->value.AsSpaceToDepth()->blockSize; } -int SpaceToDepth::GetFormat() const { return this->primitive_->value.AsSpaceToDepth()->format; } - -void SpaceToDepth::SetBlockSize(int block_size) { this->primitive_->value.AsSpaceToDepth()->blockSize = block_size; } -void SpaceToDepth::SetFormat(int format) { this->primitive_->value.AsSpaceToDepth()->format = (schema::Format)format; } - -#else - -int SpaceToDepth::GetBlockSize() const { return this->primitive_->value_as_SpaceToDepth()->blockSize(); } -int SpaceToDepth::GetFormat() const { return this->primitive_->value_as_SpaceToDepth()->format(); } -int SpaceToDepth::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_SpaceToDepth(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_SpaceToDepth return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateSpaceToDepth(*fbb, attr->blockSize(), attr->format()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_SpaceToDepth, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SpaceToDepthCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<SpaceToDepth>(primitive); -} -Registry SpaceToDepthRegistry(schema::PrimitiveType_SpaceToDepth, SpaceToDepthCreator); -#endif - -namespace { -constexpr int kSpaceToDepthOutputNum = 1; -constexpr int kSpaceToDepthInputNum = 1; -} // namespace - -int SpaceToDepth::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - MS_ASSERT(this->primitive_ != nullptr); - if (outputs.size() != kSpaceToDepthOutputNum || inputs.size() != kSpaceToDepthInputNum) { - MS_LOG(ERROR) << "Invalid output/input size! output size: " << outputs.size() << ",input size: " << inputs.size(); - return RET_ERROR; - } - - auto input = inputs.at(0); - if (input->format() != schema::Format::Format_NHWC) { - MS_LOG(ERROR) << "space_to_depth only support NHWC now!"; - return RET_ERROR; - } - outputs.at(0)->set_format(input->format()); - outputs.at(0)->set_data_type(input->data_type()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input_shape = input->shape(); - if (input_shape.size() != kQuadrupleNum) { - MS_LOG(ERROR) << "input shape dimension size should == " << kQuadrupleNum; - return RET_ERROR; - } - - int32_t block_size = GetBlockSize(); - if (block_size == 0) { - MS_LOG(ERROR) << "block_size is zero"; - return RET_ERROR; - } - if (input_shape.at(NHWC_H) % block_size != 0 || input_shape.at(NHWC_H) == 0 || - input_shape.at(NHWC_W) % block_size != 0 || input_shape.at(NHWC_W) == 0) { - MS_LOG(ERROR) << "input dimension h or w size error!"; - return RET_ERROR; - } - std::vector<int32_t> output_shape(input_shape.size()); - output_shape.at(NHWC_N) = input_shape.at(NHWC_N); - output_shape.at(NHWC_H) = input_shape.at(NHWC_H) / block_size; - output_shape.at(NHWC_W) = input_shape.at(NHWC_W) / block_size; - if (block_size * block_size > std::numeric_limits<int32_t>::max() / input_shape.at(NHWC_C)) { - MS_LOG(ERROR) << "The value of block_size * block_size is too big"; - return RET_ERROR; - } - output_shape.at(NHWC_C) = input_shape.at(NHWC_C) * (block_size * block_size); - outputs.at(0)->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/space_to_depth.h b/mindspore/lite/src/ops/space_to_depth.h deleted file mode 100644 index 3c85c2d272..0000000000 --- a/mindspore/lite/src/ops/space_to_depth.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_DEPTH_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_DEPTH_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class SpaceToDepth : public PrimitiveC { - public: - SpaceToDepth() = default; - ~SpaceToDepth() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(SpaceToDepth, PrimitiveC); - explicit SpaceToDepth(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetBlockSize(int block_size); - void SetFormat(int format); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetBlockSize() const; - int GetFormat() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SPACE_TO_DEPTH_H_ diff --git a/mindspore/lite/src/ops/sparse_softmax_cross_entropy.cc b/mindspore/lite/src/ops/sparse_softmax_cross_entropy.cc deleted file mode 100644 index 751afb084d..0000000000 --- a/mindspore/lite/src/ops/sparse_softmax_cross_entropy.cc +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/sparse_softmax_cross_entropy.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int SparseSoftmaxCrossEntropy::GetIsGrad() const { - return this->primitive_->value.AsSparseSoftmaxCrossEntropy()->isGrad; -} - -void SparseSoftmaxCrossEntropy::SetIsGrad(int isGrad) { - this->primitive_->value.AsSparseSoftmaxCrossEntropy()->isGrad = isGrad; -} - -int SparseSoftmaxCrossEntropy::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_SparseSoftmaxCrossEntropy; - } - if (this->primitive_->value.type != schema::PrimitiveType_SparseSoftmaxCrossEntropy) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::SparseSoftmaxCrossEntropyT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - - attr->isGrad = GetValue<bool>(prim.GetAttr("is_grad")); - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else - -int SparseSoftmaxCrossEntropy::GetIsGrad() const { - return this->primitive_->value_as_SparseSoftmaxCrossEntropy()->isGrad(); -} -int SparseSoftmaxCrossEntropy::UnPackToFlatBuilder(const schema::Primitive *primitive, - flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_SparseSoftmaxCrossEntropy(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_SparseSoftmaxCrossEntropy return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateSparseSoftmaxCrossEntropy(*fbb, attr->isGrad()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_SparseSoftmaxCrossEntropy, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SparseSoftmaxCrossEntropyCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<SparseSoftmaxCrossEntropy>(primitive); -} -Registry SparseSoftmaxCrossEntropyRegistry(schema::PrimitiveType_SparseSoftmaxCrossEntropy, - SparseSoftmaxCrossEntropyCreator); -#endif - -int SparseSoftmaxCrossEntropy::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - if (2 != inputs.size()) { - MS_LOG(ERROR) << "SparseSoftmaxCrossEntropy should have at two inputs"; - return RET_ERROR; - } - - if (1 != outputs.size()) { - MS_LOG(ERROR) << "SparseSoftmaxCrossEntropy should have one output"; - return RET_ERROR; - } - auto *in0 = inputs.front(); - MS_ASSERT(in0 != nullptr); - auto *out = outputs.front(); - MS_ASSERT(out != nullptr); - - if (GetIsGrad() != 0) { - out->set_shape(in0->shape()); - out->set_data_type(in0->data_type()); - out->set_format(in0->format()); - } else { - std::vector<int> outshape; - outshape.push_back(1); - out->set_shape(outshape); - out->set_data_type(in0->data_type()); - out->set_format(in0->format()); - } - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/sparse_softmax_cross_entropy.h b/mindspore/lite/src/ops/sparse_softmax_cross_entropy.h deleted file mode 100644 index 21cfbad3ef..0000000000 --- a/mindspore/lite/src/ops/sparse_softmax_cross_entropy.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_SPARSE_SOFTMAX_CROSS_ENTROPY_H_ -#define MINDSPORE_LITE_SRC_OPS_SPARSE_SOFTMAX_CROSS_ENTROPY_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class SparseSoftmaxCrossEntropy : public PrimitiveC { - public: - SparseSoftmaxCrossEntropy() = default; - ~SparseSoftmaxCrossEntropy() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(SparseSoftmaxCrossEntropy, PrimitiveC); - explicit SparseSoftmaxCrossEntropy(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetIsGrad(int isGrad); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - - int GetIsGrad() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_SPARSE_SOFTMAX_CROSS_ENTROPY_H_ diff --git a/mindspore/lite/src/ops/sparse_to_dense.cc b/mindspore/lite/src/ops/sparse_to_dense.cc deleted file mode 100644 index c92dd5ac76..0000000000 --- a/mindspore/lite/src/ops/sparse_to_dense.cc +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/sparse_to_dense.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifndef PRIMITIVE_WRITEABLE -int SparseToDense::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_SparseToDense(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_SparseToDense return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateSparseToDense(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_SparseToDense, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SparseToDenseCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<SparseToDense>(primitive); -} -Registry SparseToDenseRegistry(schema::PrimitiveType_SparseToDense, SparseToDenseCreator); -#endif - -int SparseToDense::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto output = outputs_.front(); - if (output == nullptr) { - MS_LOG(ERROR) << "output null pointer dereferencing."; - return RET_ERROR; - } - auto input2 = inputs_.at(2); - outputs_.at(0)->set_data_type(input2->data_type()); - outputs_.at(0)->set_format(input2->format()); - - if (!infer_flag()) { - return RET_INFER_INVALID; - } - if (this->primitive_ == nullptr) { - return RET_NULL_PTR; - } - - auto input1 = inputs_.at(1); - int *input1_data = reinterpret_cast<int *>(input1->MutableData()); - std::vector<int> output_shape; - for (int i = 0; i < input1->ElementsNum(); i++) { - output_shape.push_back(input1_data[i]); - } - outputs_.at(0)->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/sparse_to_dense.h b/mindspore/lite/src/ops/sparse_to_dense.h deleted file mode 100644 index 0a5e4429c3..0000000000 --- a/mindspore/lite/src/ops/sparse_to_dense.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SPARSE_TO_DENSE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SPARSE_TO_DENSE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class SparseToDense : public PrimitiveC { - public: - SparseToDense() = default; - ~SparseToDense() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(SparseToDense, PrimitiveC); - explicit SparseToDense(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetOutputShape(const std::vector<int> &output_shape); - void SetSparseValue(const std::vector<int> &sparse_value); - void SetDefaultValue(const std::vector<int> &default_value); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - std::vector<int> GetOutputShape() const; - std::vector<int> GetSparseValue() const; - std::vector<int> GetDefaultValue() const; - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SPARSE_TO_DENSE_H_ diff --git a/mindspore/lite/src/ops/split.cc b/mindspore/lite/src/ops/split.cc deleted file mode 100644 index 45cf029488..0000000000 --- a/mindspore/lite/src/ops/split.cc +++ /dev/null @@ -1,165 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/split.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Split::GetNumberSplit() const { return this->primitive_->value.AsSplit()->numberSplit; } -std::vector<int> Split::GetSizeSplit() const { return this->primitive_->value.AsSplit()->sizeSplits; } -int Split::GetSplitDim() const { return this->primitive_->value.AsSplit()->splitDim; } - -void Split::SetNumberSplit(int number_split) { this->primitive_->value.AsSplit()->numberSplit = number_split; } -void Split::SetSizeSplits(const std::vector<int> &size_splits) { - this->primitive_->value.AsSplit()->sizeSplits = size_splits; -} -void Split::SetSplitDim(int split_dim) { this->primitive_->value.AsSplit()->splitDim = split_dim; } - -int Split::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Split; - } - if (this->primitive_->value.type != schema::PrimitiveType_Split) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::SplitT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - attr->splitDim = CastToInt(prim.GetAttr("axis")).front(); - attr->numberSplit = CastToInt(prim.GetAttr("output_num")).front(); - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - - return RET_OK; -} - -#else - -int Split::GetNumberSplit() const { return this->primitive_->value_as_Split()->numberSplit(); } -std::vector<int> Split::GetSizeSplit() const { - auto fb_vector = this->primitive_->value_as_Split()->sizeSplits(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -int Split::GetSplitDim() const { return this->primitive_->value_as_Split()->splitDim(); } - -int Split::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Split(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Split return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> sizeSplits; - if (attr->sizeSplits() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->sizeSplits()->size()); i++) { - sizeSplits.push_back(attr->sizeSplits()->data()[i]); - } - } - auto val_offset = schema::CreateSplitDirect(*fbb, attr->numberSplit(), &sizeSplits, attr->splitDim()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Split, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SplitCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Split>(primitive); } -Registry SplitRegistry(schema::PrimitiveType_Split, SplitCreator); -#endif - -namespace { -constexpr int kSplitInputNum = 1; -} // namespace -int Split::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - if (inputs_.size() < kSplitInputNum) { - MS_LOG(ERROR) << "inputs number is less to " << kSplitInputNum; - return RET_ERROR; - } - if (outputs_.empty()) { - MS_LOG(ERROR) << "split has no output."; - return RET_ERROR; - } - for (auto &output : outputs_) { - output->set_data_type(input->data_type()); - output->set_format(input->format()); - } - size_splits_ = GetSizeSplit(); - num_split_ = GetNumberSplit() == 0 ? static_cast<int>(outputs_.size()) : GetNumberSplit(); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - size_t split_dim = GetSplitDim() < 0 ? input->shape().size() + GetSplitDim() : GetSplitDim(); - std::vector<int> input_shape = input->shape(); - if (split_dim > input_shape.size()) { - MS_LOG(ERROR) << "split dim is out of range, which is " << input_shape.size(); - return RET_INPUT_PARAM_INVALID; - } - if (static_cast<int>(outputs_.size()) != num_split_) { - MS_LOG(ERROR) << "outputs number is not equal to " << num_split_; - return RET_ERROR; - } - if (size_splits_.empty()) { - if (input_shape[split_dim] % num_split_ != 0) { - MS_LOG(ERROR) << "cannot split to equal size, which dim is " << input_shape[split_dim] << ", num split is " - << num_split_; - return RET_INPUT_PARAM_INVALID; - } - for (int i = 0; i < num_split_; ++i) { - size_splits_.push_back(input_shape[split_dim] / num_split_); - } - } - for (int i = 0; i < num_split_; ++i) { - std::vector<int> output_shape; - output_shape.insert(output_shape.begin(), input_shape.begin(), input_shape.end()); - int split_dim_i = input_shape.at(split_dim); - // support split size is -1 in the end. - if (i == num_split_ - 1 && size_splits_[i] == -1) { - for (size_t j = 0; j < size_splits_.size() - 1; ++j) { - split_dim_i -= size_splits_[j]; - } - size_splits_[i] = split_dim_i; - } else { - split_dim_i = size_splits_[i]; - } - output_shape.at(split_dim) = split_dim_i; - outputs_.at(i)->set_shape(output_shape); - outputs_.at(i)->set_data_type(input->data_type()); - outputs_.at(i)->set_format(input->format()); - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/split.h b/mindspore/lite/src/ops/split.h deleted file mode 100644 index bbdf7515d3..0000000000 --- a/mindspore/lite/src/ops/split.h +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SPLIT_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SPLIT_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Split : public PrimitiveC { - public: - Split() = default; - ~Split() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Split, PrimitiveC); - explicit Split(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetNumberSplit(int number_split); - void SetSizeSplits(const std::vector<int> &size_splits); - void SetSplitDim(int split_dim); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetNumberSplit() const; - std::vector<int> GetSizeSplit() const; - int GetSplitDim() const; - int num_split() const { return num_split_; } - std::vector<int> size_splits() const { return size_splits_; } - - protected: - int num_split_ = 0; - std::vector<int> size_splits_; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SPLIT_H_ diff --git a/mindspore/lite/src/ops/sqrt.cc b/mindspore/lite/src/ops/sqrt.cc deleted file mode 100644 index 099cad8ec9..0000000000 --- a/mindspore/lite/src/ops/sqrt.cc +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/sqrt.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Sqrt::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Sqrt; - } - if (this->primitive_->value.type != schema::PrimitiveType_Sqrt) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::SqrtT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int Sqrt::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto val_offset = schema::CreateSqrt(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Sqrt, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SqrtCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Sqrt>(primitive); } -Registry SqrtRegistry(schema::PrimitiveType_Sqrt, SqrtCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/sqrt.h b/mindspore/lite/src/ops/sqrt.h deleted file mode 100644 index 6f6ca94369..0000000000 --- a/mindspore/lite/src/ops/sqrt.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SQRT_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SQRT_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -class Sqrt : public ArithmeticSelf { - public: - Sqrt() = default; - ~Sqrt() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Sqrt, ArithmeticSelf); - explicit Sqrt(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SQRT_H_ diff --git a/mindspore/lite/src/ops/square.cc b/mindspore/lite/src/ops/square.cc deleted file mode 100644 index 8a126389c1..0000000000 --- a/mindspore/lite/src/ops/square.cc +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/square.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Square::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Square; - } - if (this->primitive_->value.type != schema::PrimitiveType_Square) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::SquareT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int Square::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto val_offset = schema::CreateSquare(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Square, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SquareCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Square>(primitive); } -Registry SquareRegistry(schema::PrimitiveType_Square, SquareCreator); -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/square.h b/mindspore/lite/src/ops/square.h deleted file mode 100644 index b86e2bc9bc..0000000000 --- a/mindspore/lite/src/ops/square.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef LITE_MINDSPORE_LITE_C_OPS_SQUARE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SQUARE_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic_self.h" - -namespace mindspore { -namespace lite { -class Square : public ArithmeticSelf { - public: - Square() = default; - ~Square() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Square, ArithmeticSelf); - explicit Square(schema::PrimitiveT *primitive) : ArithmeticSelf(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SQUARE_H_ diff --git a/mindspore/lite/src/ops/squared_difference.cc b/mindspore/lite/src/ops/squared_difference.cc deleted file mode 100644 index 5ef7c43f2c..0000000000 --- a/mindspore/lite/src/ops/squared_difference.cc +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/squared_difference.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -#else -int SquaredDifference::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto val_offset = schema::CreateSquaredDifference(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_SquaredDifference, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SquaredDifferenceCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<SquaredDifference>(primitive); -} -Registry SquaredDifferenceRegistry(schema::PrimitiveType_SquaredDifference, SquaredDifferenceCreator); - -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/squared_difference.h b/mindspore/lite/src/ops/squared_difference.h deleted file mode 100644 index 1847979bb4..0000000000 --- a/mindspore/lite/src/ops/squared_difference.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SQUARED_DIFFERENCE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SQUARED_DIFFERENCE_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic.h" - -namespace mindspore { -namespace lite { -class SquaredDifference : public Arithmetic { - public: - SquaredDifference() = default; - ~SquaredDifference() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(SquaredDifference, Arithmetic); - explicit SquaredDifference(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SQUARED_DIFFERENCE_H_ diff --git a/mindspore/lite/src/ops/squeeze.cc b/mindspore/lite/src/ops/squeeze.cc deleted file mode 100644 index abae035d29..0000000000 --- a/mindspore/lite/src/ops/squeeze.cc +++ /dev/null @@ -1,139 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/squeeze.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> Squeeze::GetAxis() const { return this->primitive_->value.AsSqueeze()->axis; } - -void Squeeze::SetAxis(const std::vector<int> &axis) { this->primitive_->value.AsSqueeze()->axis = axis; } - -int Squeeze::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Squeeze; - } - if (this->primitive_->value.type != schema::PrimitiveType_Squeeze) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::SqueezeT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - if (prim.GetAttr("axis") == nullptr) { - MS_LOG(INFO) << "Squeeze's attr xis is set to default"; - attr->axis = {0}; - } else { - attr->axis = CastToInt(prim.GetAttr("axis")); - } - this->primitive_->value.value = attr; - } - return RET_OK; -} - -#else - -std::vector<int> Squeeze::GetAxis() const { - auto fb_vector = this->primitive_->value_as_Squeeze()->axis(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -int Squeeze::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Squeeze(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Squeeze return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> axis; - if (attr->axis() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->axis()->size()); i++) { - axis.push_back(attr->axis()->data()[i]); - } - } - auto val_offset = schema::CreateSqueezeDirect(*fbb, &axis); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Squeeze, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SqueezeCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Squeeze>(primitive); } -Registry SqueezeRegistry(schema::PrimitiveType_Squeeze, SqueezeCreator); -#endif - -namespace { -constexpr int kSqueezeInputNum = 1; -constexpr int kSqueezeOutputNum = 1; -} // namespace -int Squeeze::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - if (kSqueezeInputNum != inputs_.size()) { - MS_LOG(ERROR) << "Add should has " << kSqueezeInputNum << " inputs"; - return -1; - } - if (kSqueezeOutputNum != outputs_.size()) { - MS_LOG(ERROR) << "Add should has " << kSqueezeOutputNum << " outputs"; - return -1; - } - auto *in_tensor = inputs_.front(); - outputs_.front()->set_data_type(in_tensor->data_type()); - outputs_.front()->set_format(in_tensor->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto in_shape = in_tensor->shape(); - std::vector<int> out_shape; - - auto axis = GetAxis(); - std::vector<int> axes; - std::transform(axis.begin(), axis.end(), std::back_inserter(axes), - [in_shape](int a) { return a >= 0 ? a : a + in_shape.size(); }); - if (axes.size() == 0) { - for (size_t i = 0; i < in_shape.size(); i++) { - if (in_shape.at(i) != 1) { - out_shape.push_back(in_shape.at(i)); - } - } - } else { - size_t axisIdx = 0; - for (size_t i = 0; i < in_shape.size(); i++) { - if (axisIdx < axes.size() && axes.at(axisIdx) == static_cast<int>(i)) { - MS_ASSERT(in_shape.at(i) == 1); - axisIdx++; - continue; - } else { - out_shape.push_back(in_shape.at(i)); - } - } - } - outputs_.front()->set_shape(out_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/squeeze.h b/mindspore/lite/src/ops/squeeze.h deleted file mode 100644 index 16f95eaddc..0000000000 --- a/mindspore/lite/src/ops/squeeze.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_SQUEEZE_H_ -#define MINDSPORE_LITE_SRC_OPS_SQUEEZE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Squeeze : public PrimitiveC { - public: - Squeeze() = default; - ~Squeeze() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Squeeze, PrimitiveC); - explicit Squeeze(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAxis(const std::vector<int> &axis); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - std::vector<int> GetAxis() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_SQUEEZE_H_ diff --git a/mindspore/lite/src/ops/stack.cc b/mindspore/lite/src/ops/stack.cc deleted file mode 100644 index 222217b530..0000000000 --- a/mindspore/lite/src/ops/stack.cc +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/stack.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Stack::GetAxis() const { return this->primitive_->value.AsStack()->axis; } -int Stack::GetN() const { return this->primitive_->value.AsStack()->n; } -std::vector<int> Stack::GetIsScale() const { return this->primitive_->value.AsStack()->isScale; } - -void Stack::SetAxis(int axis) { this->primitive_->value.AsStack()->axis = axis; } -void Stack::SetN(int n) { this->primitive_->value.AsStack()->n = n; } -void Stack::SetIsScale(const std::vector<int> &is_scale) { this->primitive_->value.AsStack()->isScale = is_scale; } - -#else - -int Stack::GetAxis() const { return this->primitive_->value_as_Stack()->axis(); } -int Stack::GetN() const { return this->primitive_->value_as_Stack()->n(); } -std::vector<int> Stack::GetIsScale() const { - auto fb_vector = this->primitive_->value_as_Stack()->isScale(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -int Stack::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Stack(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Stack return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> isScale; - if (attr->isScale() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->isScale()->size()); i++) { - isScale.push_back(attr->isScale()->data()[i]); - } - } - auto val_offset = schema::CreateStackDirect(*fbb, attr->axis(), attr->n(), &isScale); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Stack, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *StackCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Stack>(primitive); } -Registry StackRegistry(schema::PrimitiveType_Stack, StackCreator); - -#endif - -namespace { -constexpr int kStackOutputNum = 1; -constexpr int kStackMinInputNum = 1; -} // namespace -int Stack::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - MS_ASSERT(this->primitive_ != nullptr); - if (outputs.size() != kStackOutputNum) { - MS_LOG(ERROR) << "Invalid output size:" << outputs.size(); - return RET_PARAM_INVALID; - } - if (inputs.size() < kStackMinInputNum) { - MS_LOG(ERROR) << "Invalid input size " << inputs.size(); - return RET_PARAM_INVALID; - } - auto input = inputs.at(0); - auto input0_data_type = input->data_type(); - outputs.at(0)->set_data_type(input0_data_type); - outputs.at(0)->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input_shape = input->shape(); - - std::vector<int32_t> output_shape = input_shape; - auto axis = GetAxis() < 0 ? GetAxis() + input_shape.size() + 1 : GetAxis(); - if (axis < 0 || axis > input_shape.size()) { - MS_LOG(ERROR) << "Invalid axis " << GetAxis(); - return RET_PARAM_INVALID; - } - - for (size_t i = 1; i < inputs.size(); ++i) { - auto input_shape_tmp = inputs.at(i)->shape(); - if (input_shape_tmp.size() != input_shape.size()) { - MS_LOG(ERROR) << "All input shape size should be the same!"; - return RET_PARAM_INVALID; - } - for (size_t j = 0; j < input_shape.size(); ++j) { - if (input_shape_tmp.at(j) != input_shape.at(j)) { - MS_LOG(ERROR) << "All input shape should be the same!"; - return RET_PARAM_INVALID; - } - } - if (inputs.at(i)->data_type() != input0_data_type) { - MS_LOG(ERROR) << "All input shuld have the same data type!input[" << i - << "] data type = " << inputs.at(i)->data_type(); - return RET_PARAM_INVALID; - } - } - output_shape.insert(output_shape.begin() + axis, inputs.size()); - outputs.at(0)->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/stack.h b/mindspore/lite/src/ops/stack.h deleted file mode 100644 index dab5637028..0000000000 --- a/mindspore/lite/src/ops/stack.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_STACK_H_ -#define LITE_MINDSPORE_LITE_C_OPS_STACK_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Stack : public PrimitiveC { - public: - Stack() = default; - ~Stack() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Stack, PrimitiveC); - explicit Stack(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAxis(int axis); - void SetN(int n); - void SetIsScale(const std::vector<int> &is_scale); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetAxis() const; - int GetN() const; - std::vector<int> GetIsScale() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_STACK_H_ diff --git a/mindspore/lite/src/ops/strided_slice.cc b/mindspore/lite/src/ops/strided_slice.cc deleted file mode 100644 index 08ea8a90de..0000000000 --- a/mindspore/lite/src/ops/strided_slice.cc +++ /dev/null @@ -1,454 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/strided_slice.h" -#include "src/ops/populate/strided_slice_populate.h" -#include <algorithm> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int StridedSlice::GetBeginMask() const { return this->primitive_->value.AsStridedSlice()->beginMask; } -int StridedSlice::GetEndMask() const { return this->primitive_->value.AsStridedSlice()->endMask; } -int StridedSlice::GetEllipsisMask() const { return this->primitive_->value.AsStridedSlice()->ellipsisMask; } -int StridedSlice::GetNewAxisMask() const { return this->primitive_->value.AsStridedSlice()->newAxisMask; } -int StridedSlice::GetShrinkAxisMask() const { return this->primitive_->value.AsStridedSlice()->shrinkAxisMask; } -std::vector<int> StridedSlice::GetBegin() const { return this->primitive_->value.AsStridedSlice()->begin; } -std::vector<int> StridedSlice::GetEnd() const { return this->primitive_->value.AsStridedSlice()->end; } -std::vector<int> StridedSlice::GetStride() const { return this->primitive_->value.AsStridedSlice()->stride; } -std::vector<int> StridedSlice::GetIsScale() const { return this->primitive_->value.AsStridedSlice()->isScale; } - -void StridedSlice::SetBeginMask(int begin_mask) { this->primitive_->value.AsStridedSlice()->beginMask = begin_mask; } -void StridedSlice::SetEndMask(int end_mask) { this->primitive_->value.AsStridedSlice()->endMask = end_mask; } -void StridedSlice::SetEllipsisMask(int ellipsis_mask) { - this->primitive_->value.AsStridedSlice()->ellipsisMask = ellipsis_mask; -} -void StridedSlice::SetNewAxisMask(int new_axis_mask) { - this->primitive_->value.AsStridedSlice()->newAxisMask = new_axis_mask; -} -void StridedSlice::SetShrinkAxisMask(int shrink_axis_mask) { - this->primitive_->value.AsStridedSlice()->shrinkAxisMask = shrink_axis_mask; -} -void StridedSlice::SetBegin(const std::vector<int> &begin) { this->primitive_->value.AsStridedSlice()->begin = begin; } -void StridedSlice::SetEnd(const std::vector<int> &end) { this->primitive_->value.AsStridedSlice()->end = end; } -void StridedSlice::SetStride(const std::vector<int> &stride) { - this->primitive_->value.AsStridedSlice()->stride = stride; -} -void StridedSlice::SetIsScale(const std::vector<int> &is_scale) { - this->primitive_->value.AsStridedSlice()->isScale = is_scale; -} - -int StridedSlice::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_StridedSlice; - } - if (this->primitive_->value.type != schema::PrimitiveType_StridedSlice) { - MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::StridedSliceT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new StridedSlice failed"; - return RET_ERROR; - } - attr->beginMask = CastToInt(prim.GetAttr("begin_mask")).front(); - attr->endMask = CastToInt(prim.GetAttr("end_mask")).front(); - attr->ellipsisMask = CastToInt(prim.GetAttr("ellipsis_mask")).front(); - attr->newAxisMask = CastToInt(prim.GetAttr("new_axis_mask")).front(); - attr->shrinkAxisMask = CastToInt(prim.GetAttr("shrink_axis_mask")).front(); - auto inputNodeFirst = inputs[kAnfPopulaterInputNumOne]; - std::vector<int> beginVec; - GetAttrDataFromInput(inputNodeFirst, &beginVec); - attr->begin = beginVec; - - auto inputNodeSecond = inputs[kAnfPopulaterInputNumTwo]; - std::vector<int> endVec; - GetAttrDataFromInput(inputNodeSecond, &endVec); - attr->end = endVec; - - auto inputNodeThird = inputs[kAnfPopulaterInputNumThree]; - std::vector<int> strideVec; - GetAttrDataFromInput(inputNodeThird, &strideVec); - attr->stride = strideVec; - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else - -int StridedSlice::GetBeginMask() const { return this->primitive_->value_as_StridedSlice()->beginMask(); } -int StridedSlice::GetEndMask() const { return this->primitive_->value_as_StridedSlice()->endMask(); } -int StridedSlice::GetEllipsisMask() const { return this->primitive_->value_as_StridedSlice()->ellipsisMask(); } -int StridedSlice::GetNewAxisMask() const { return this->primitive_->value_as_StridedSlice()->newAxisMask(); } -int StridedSlice::GetShrinkAxisMask() const { return this->primitive_->value_as_StridedSlice()->shrinkAxisMask(); } -std::vector<int> StridedSlice::GetBegin() const { - auto fb_vector = this->primitive_->value_as_StridedSlice()->begin(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -std::vector<int> StridedSlice::GetEnd() const { - auto fb_vector = this->primitive_->value_as_StridedSlice()->end(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -std::vector<int> StridedSlice::GetStride() const { - auto fb_vector = this->primitive_->value_as_StridedSlice()->stride(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -std::vector<int> StridedSlice::GetIsScale() const { - auto fb_vector = this->primitive_->value_as_StridedSlice()->isScale(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -int StridedSlice::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_StridedSlice(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_StridedSlice return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> begin; - if (attr->begin() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->begin()->size()); i++) { - begin.push_back(attr->begin()->data()[i]); - } - } - std::vector<int32_t> end; - if (attr->end() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->end()->size()); i++) { - end.push_back(attr->end()->data()[i]); - } - } - std::vector<int32_t> stride; - if (attr->stride() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->stride()->size()); i++) { - stride.push_back(attr->stride()->data()[i]); - } - } - std::vector<int32_t> isScale; - if (attr->isScale() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->isScale()->size()); i++) { - isScale.push_back(attr->isScale()->data()[i]); - } - } - auto val_offset = - schema::CreateStridedSliceDirect(*fbb, attr->beginMask(), attr->endMask(), attr->ellipsisMask(), - attr->newAxisMask(), attr->shrinkAxisMask(), &begin, &end, &stride, &isScale); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_StridedSlice, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *StridedSliceCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<StridedSlice>(primitive); -} -Registry StridedSliceRegistry(schema::PrimitiveType_StridedSlice, StridedSliceCreator); -#endif - -namespace { -constexpr size_t kStridedSliceOutputNum = 1; -constexpr size_t kStridedSliceInputNum = 1; -constexpr size_t kStridedSliceMultiInputNumMin = 3; -constexpr size_t kStridedSliceMultiInputNumMax = 5; -} // namespace -bool StridedSlice::CheckInputs(std::vector<lite::Tensor *> inputs_) { - for (size_t i = 1; i < inputs_.size(); ++i) { - if (inputs_.at(i)->data_c() == nullptr) { - MS_LOG(DEBUG) << "strided_slice has input from other node, which only can be obtained when running."; - return false; - } - } - - return ndim_ <= in_shape_.size(); -} - -void StridedSlice::ApplyNewAxisMask() { - for (size_t i = 0; i < new_axis_mask_.size(); i++) { - if (new_axis_mask_.at(i)) { - ndim_ += 1; - in_shape_.insert(in_shape_.begin() + i, 1); - begins_.at(i) = 0; - ends_.at(i) = 1; - strides_.at(i) = 1; - - begins_.emplace_back(0); - ends_.emplace_back(in_shape_.at(ndim_ - 1)); - strides_.emplace_back(1); - - begins_mask_.at(i) = false; - ends_mask_.at(i) = false; - ellipsis_mask_.at(i) = false; - shrink_axis_mask_.at(i) = false; - } - } -} - -std::vector<int> StridedSlice::ApplyShrinkMask(std::vector<int> out_shape) { - auto old_out_shape = out_shape; - out_shape.clear(); - for (size_t i = 0; i < shrink_axis_mask_.size(); i++) { - if (shrink_axis_mask_.at(i)) { - ends_.at(i) = begins_.at(i) + 1; - strides_.at(i) = 1; - } else { - out_shape.emplace_back(old_out_shape.at(i)); - } - } - for (size_t i = shrink_axis_mask_.size(); i < old_out_shape.size(); i++) { - out_shape.emplace_back(old_out_shape.at(i)); - } - return out_shape; -} - -/*only one bit will be used if multiple bits are true.*/ -void StridedSlice::ApplyEllipsisMask() { - for (size_t i = 0; i < ellipsis_mask_.size(); i++) { - if (ellipsis_mask_.at(i)) { - begins_.at(i) = 0; - ends_.at(i) = in_shape_.at(i); - break; - } - } -} - -void StridedSlice::ApplyBeginMask() { - for (size_t i = 0; i < ndim_; i++) { - if (begins_mask_.at(i)) { - begins_.at(i) = 0; - } - } -} - -void StridedSlice::ApplyEndMask() { - for (size_t i = 0; i < ndim_; i++) { - if (ends_mask_.at(i)) { - ends_.at(i) = in_shape_.at(i); - } - } -} - -void StridedSlice::TransIndexToPositive() { - for (int i = 0; i < static_cast<int>(begins_.size()); ++i) { - if (begins_.at(i) < 0) { - begins_.at(i) += in_shape_.at(i); - } - if (ends_.at(i) < 0) { - ends_.at(i) += in_shape_.at(i); - } - } -} - -int StridedSlice::HandleAxesInputExist(const std::vector<lite::Tensor *> &inputs) { - // when axes input exist: - // input order: data, begin, end, axes(opt), stride(opt) - auto input_tensor = inputs.at(0); - MS_ASSERT(input_tensor != nullptr); - auto begin_tensor = inputs.at(1); - MS_ASSERT(begin_tensor != nullptr); - int *begin_data = reinterpret_cast<int *>(begin_tensor->MutableData()); - auto end_tensor = inputs.at(2); - MS_ASSERT(end_tensor != nullptr); - int *end_data = reinterpret_cast<int *>(end_tensor->MutableData()); - if (begin_data == nullptr || end_data == nullptr) { - return RET_INFER_ERR; - } - // when input contains axes, begins, ends, strides will be expand to the same length as input rank - ndim_ = static_cast<int>(input_tensor->shape().size()); - int begin_ndim = begin_tensor->ElementsNum(); - - int *axes_data = nullptr; - auto axes_tensor = inputs.at(3); - if (axes_tensor->ElementsNum() != 0) { - MS_ASSERT(axes_tensor->ElementsNum() == begin_ndim); - axes_data = reinterpret_cast<int *>(axes_tensor->MutableData()); - if (axes_data == nullptr) { - return RET_INFER_ERR; - } - } - - int *stride_data = nullptr; - auto stride_tensor = inputs.at(4); - if (stride_tensor->ElementsNum() != 0) { - MS_ASSERT(stride_tensor->ElementsNum() == begin_ndim); - stride_data = reinterpret_cast<int *>(stride_tensor->MutableData()); - if (stride_data == nullptr) { - return RET_INFER_ERR; - } - } - - std::vector<int> axes; - if (axes_data == nullptr) { - for (int i = 0; i < begin_ndim; ++i) { - axes.push_back(i); - } - } else { - axes.assign(axes_data, axes_data + begin_ndim); - for (int i = 0; i < begin_ndim; ++i) { - if (axes.at(i) < 0) { - axes.at(i) += ndim_; - } - } - } - - in_shape_.assign(ndim_, 0); - begins_.assign(ndim_, 0); - ends_.assign(ndim_, 0); - strides_.assign(ndim_, 0); - auto input_shape = input_tensor->shape(); - for (size_t i = 0; i < ndim_; ++i) { - in_shape_.at(i) = input_shape.at(i); - } - for (size_t i = 0; i < ndim_; ++i) { - auto axes_it = std::find(axes.begin(), axes.end(), i); - if (axes_it != axes.end()) { - auto axis = axes_it - axes.begin(); - // begins or ends exceed limit will be set to limit - begins_.at(i) = std::max(std::min(begin_data[axis], input_shape.at(i) - 1), -input_shape.at(i)); - ends_.at(i) = std::max(std::min(end_data[axis], input_shape.at(i)), -input_shape.at(i) - 1); - strides_.at(i) = stride_data[axis]; - } else { - begins_.at(i) = 0; - ends_.at(i) = input_shape.at(i); - strides_.at(i) = 1; - } - } - return RET_OK; -} - -// note: begin, end, stride length are equal, but may less than rank of input -int StridedSlice::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - MS_ASSERT(this->primitive_ != nullptr); - if (outputs.size() != kStridedSliceOutputNum) { - MS_LOG(ERROR) << "Invalid output size:" << outputs.size(); - return RET_PARAM_INVALID; - } - if (inputs.size() != kStridedSliceInputNum && - !(inputs.size() <= kStridedSliceMultiInputNumMax && inputs.size() >= kStridedSliceMultiInputNumMin)) { - MS_LOG(ERROR) << "Invalid input size " << inputs.size(); - return RET_PARAM_INVALID; - } - auto input = inputs.at(0); - outputs.front()->set_data_type(input->data_type()); - outputs.at(0)->set_format(input->format()); - MS_ASSERT(input != nullptr); - auto input_shape = input->shape(); - auto inferflag = infer_flag(); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - in_shape_.clear(); - if (inferflag) { - in_shape_.assign(input_shape.begin(), input_shape.end()); - } - begins_.clear(); - ends_.clear(); - strides_.clear(); - if (inputs.size() == kStridedSliceInputNum) { - ndim_ = static_cast<int>(GetBegin().size()); - - for (size_t i = 0; i < ndim_; i++) { - begins_.emplace_back((GetBegin()).at(i)); - ends_.emplace_back((GetEnd()).at(i)); - strides_.emplace_back((GetStride()).at(i)); - } - } - if (!CheckInputs(inputs)) { - MS_LOG(DEBUG) << "Do infer shape in runtime."; - return RET_INFER_INVALID; - } - if (inputs.size() == 4) { - // input order: input, begins, ends, strides. - auto begin_tensor = inputs.at(1); - int *begin_data = reinterpret_cast<int *>(begin_tensor->MutableData()); - auto end_tensor = inputs.at(2); - int *end_data = reinterpret_cast<int *>(end_tensor->MutableData()); - auto stride_tensor = inputs.at(3); - int *stride_data = reinterpret_cast<int *>(stride_tensor->MutableData()); - if (begin_data == nullptr || end_data == nullptr || stride_data == nullptr) { - return RET_INFER_ERR; - } - ndim_ = begin_tensor->ElementsNum(); - for (size_t i = 0; i < ndim_; ++i) { - begins_.emplace_back(begin_data[i]); - ends_.emplace_back(end_data[i]); - strides_.emplace_back(stride_data[i]); - } - } - if (inputs.size() == 5) { - // input order: input, begins, end, axes, strides - auto ret = HandleAxesInputExist(inputs); - if (ret != RET_OK) { - return ret; - } - } - - // set all mask to original input shape - begins_mask_.resize(ndim_); - ends_mask_.resize(ndim_); - ellipsis_mask_.resize(ndim_); - new_axis_mask_.resize(ndim_); - shrink_axis_mask_.resize(ndim_); - - // convert bit to vector - for (size_t i = 0; i < ndim_; i++) { - begins_mask_.at(i) = static_cast<uint32_t>(GetBeginMask()) & (1 << i); - ends_mask_.at(i) = static_cast<uint32_t>(GetEndMask()) & (1 << i); - ellipsis_mask_.at(i) = static_cast<uint32_t>(GetEllipsisMask()) & (1 << i); - new_axis_mask_.at(i) = static_cast<uint32_t>(GetNewAxisMask()) & (1 << i); - shrink_axis_mask_.at(i) = static_cast<uint32_t>(GetShrinkAxisMask()) & (1 << i); - } - - ApplyNewAxisMask(); - ApplyBeginMask(); - ApplyEndMask(); - ApplyEllipsisMask(); - - if (!inferflag) { - return RET_OK; - } - std::vector<int> output_shape(in_shape_); - - TransIndexToPositive(); - for (size_t i = 0; i < ndim_; i++) { - if (strides_.at(i) == 0) { - MS_LOG(ERROR) << "strides should not be 0."; - return RET_INFER_ERR; - } - output_shape.at(i) = - (ends_.at(i) - begins_.at(i) + strides_.at(i) + (strides_.at(i) < 0 ? 1 : -1)) / strides_.at(i); - } - - output_shape = ApplyShrinkMask(output_shape); - - outputs.front()->set_shape(output_shape); - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/strided_slice.h b/mindspore/lite/src/ops/strided_slice.h deleted file mode 100644 index 1e6d2652de..0000000000 --- a/mindspore/lite/src/ops/strided_slice.h +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_STRIDED_SLICE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_STRIDED_SLICE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class StridedSlice : public PrimitiveC { - public: - StridedSlice() = default; - ~StridedSlice() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(StridedSlice, PrimitiveC); - explicit StridedSlice(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetBeginMask(int begin_mask); - void SetEndMask(int end_mask); - void SetEllipsisMask(int ellipsis_mask); - void SetNewAxisMask(int new_axis_mask); - void SetShrinkAxisMask(int shrink_axis_mask); - void SetBegin(const std::vector<int> &begin); - void SetEnd(const std::vector<int> &end); - void SetStride(const std::vector<int> &stride); - void SetIsScale(const std::vector<int> &is_scale); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - bool CheckInputs(std::vector<lite::Tensor *> inputs_); - int GetBeginMask() const; - int GetEndMask() const; - int GetEllipsisMask() const; - int GetNewAxisMask() const; - int GetShrinkAxisMask() const; - std::vector<int> GetBegin() const; - std::vector<int> GetEnd() const; - std::vector<int> GetStride() const; - std::vector<int> GetIsScale() const; - - int NDims() { return this->ndim_; } - void ApplyNewAxisMask(); - std::vector<int> ApplyShrinkMask(std::vector<int> out_shape); - void ApplyBeginMask(); - void ApplyEndMask(); - void ApplyEllipsisMask(); - std::vector<int> GetInShape() { return this->in_shape_; } - std::vector<int> GetBegins() { return this->begins_; } - std::vector<int> GetEnds() { return this->ends_; } - std::vector<int> GetStrides() { return this->strides_; } - - protected: - size_t ndim_ = 0; - std::vector<int> in_shape_; - std::vector<int> begins_; - std::vector<int> ends_; - std::vector<int> strides_; - std::vector<bool> begins_mask_; - std::vector<bool> ends_mask_; - std::vector<bool> ellipsis_mask_; - std::vector<bool> new_axis_mask_; - std::vector<bool> shrink_axis_mask_; - void TransIndexToPositive(); - int HandleAxesInputExist(const std::vector<lite::Tensor *> &inputs); -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_STRIDED_SLICE_H_ diff --git a/mindspore/lite/src/ops/strided_slice_grad.cc b/mindspore/lite/src/ops/strided_slice_grad.cc deleted file mode 100644 index ea4ca037c4..0000000000 --- a/mindspore/lite/src/ops/strided_slice_grad.cc +++ /dev/null @@ -1,266 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/strided_slice_grad.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { - -#ifdef PRIMITIVE_WRITEABLE -int StridedSliceGrad::GetBeginMask() const { return this->primitive_->value.AsStridedSliceGrad()->beginMask; } -int StridedSliceGrad::GetEndMask() const { return this->primitive_->value.AsStridedSliceGrad()->endMask; } -int StridedSliceGrad::GetEllipsisMask() const { return this->primitive_->value.AsStridedSliceGrad()->ellipsisMask; } -int StridedSliceGrad::GetNewAxisMask() const { return this->primitive_->value.AsStridedSliceGrad()->newAxisMask; } -int StridedSliceGrad::GetShrinkAxisMask() const { return this->primitive_->value.AsStridedSliceGrad()->shrinkAxisMask; } -std::vector<int> StridedSliceGrad::GetBegin() const { return this->primitive_->value.AsStridedSliceGrad()->begin; } -std::vector<int> StridedSliceGrad::GetEnd() const { return this->primitive_->value.AsStridedSliceGrad()->end; } -std::vector<int> StridedSliceGrad::GetStride() const { return this->primitive_->value.AsStridedSliceGrad()->stride; } -std::vector<int> StridedSliceGrad::GetIsScale() const { return this->primitive_->value.AsStridedSliceGrad()->isScale; } - -void StridedSliceGrad::SetBeginMask(int begin_mask) { - this->primitive_->value.AsStridedSliceGrad()->beginMask = begin_mask; -} -void StridedSliceGrad::SetEndMask(int end_mask) { this->primitive_->value.AsStridedSliceGrad()->endMask = end_mask; } -void StridedSliceGrad::SetEllipsisMask(int ellipsis_mask) { - this->primitive_->value.AsStridedSliceGrad()->ellipsisMask = ellipsis_mask; -} -void StridedSliceGrad::SetNewAxisMask(int new_axis_mask) { - this->primitive_->value.AsStridedSliceGrad()->newAxisMask = new_axis_mask; -} -void StridedSliceGrad::SetShrinkAxisMask(int shrink_axis_mask) { - this->primitive_->value.AsStridedSliceGrad()->shrinkAxisMask = shrink_axis_mask; -} -void StridedSliceGrad::SetBegin(const std::vector<int> &begin) { - this->primitive_->value.AsStridedSliceGrad()->begin = begin; -} -void StridedSliceGrad::SetEnd(const std::vector<int> &end) { this->primitive_->value.AsStridedSliceGrad()->end = end; } -void StridedSliceGrad::SetStride(const std::vector<int> &stride) { - this->primitive_->value.AsStridedSliceGrad()->stride = stride; -} -void StridedSliceGrad::SetIsScale(const std::vector<int> &is_scale) { - this->primitive_->value.AsStridedSliceGrad()->isScale = is_scale; -} - -int StridedSliceGrad::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_StridedSliceGrad; - } - if (this->primitive_->value.type != schema::PrimitiveType_StridedSliceGrad) { - MS_LOG(ERROR) << "primitive_ type is error:" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::StridedSliceGradT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new StridedSliceGrad failed"; - return RET_ERROR; - } - attr->beginMask = CastToInt(prim.GetAttr("begin_mask")).front(); - attr->endMask = CastToInt(prim.GetAttr("end_mask")).front(); - attr->ellipsisMask = CastToInt(prim.GetAttr("ellipsis_mask")).front(); - attr->newAxisMask = CastToInt(prim.GetAttr("new_axis_mask")).front(); - attr->shrinkAxisMask = CastToInt(prim.GetAttr("shrink_axis_mask")).front(); - auto inputNodeFirst = inputs[kAnfPopulaterInputNumOne]; - std::vector<int> beginVec; - GetAttrDataFromInput(inputNodeFirst, &beginVec); - attr->begin = beginVec; - - auto inputNodeSecond = inputs[kAnfPopulaterInputNumTwo]; - std::vector<int> endVec; - GetAttrDataFromInput(inputNodeSecond, &endVec); - attr->end = endVec; - - auto inputNodeThird = inputs[kAnfPopulaterInputNumThree]; - std::vector<int> strideVec; - GetAttrDataFromInput(inputNodeThird, &strideVec); - attr->stride = strideVec; - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else - -int StridedSliceGrad::GetBeginMask() const { return this->primitive_->value_as_StridedSliceGrad()->beginMask(); } -int StridedSliceGrad::GetEndMask() const { return this->primitive_->value_as_StridedSliceGrad()->endMask(); } -int StridedSliceGrad::GetEllipsisMask() const { return this->primitive_->value_as_StridedSliceGrad()->ellipsisMask(); } -int StridedSliceGrad::GetNewAxisMask() const { return this->primitive_->value_as_StridedSliceGrad()->newAxisMask(); } -int StridedSliceGrad::GetShrinkAxisMask() const { - return this->primitive_->value_as_StridedSliceGrad()->shrinkAxisMask(); -} -std::vector<int> StridedSliceGrad::GetBegin() const { - auto fb_vector = this->primitive_->value_as_StridedSliceGrad()->begin(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -std::vector<int> StridedSliceGrad::GetEnd() const { - auto fb_vector = this->primitive_->value_as_StridedSliceGrad()->end(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -std::vector<int> StridedSliceGrad::GetStride() const { - auto fb_vector = this->primitive_->value_as_StridedSliceGrad()->stride(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -std::vector<int> StridedSliceGrad::GetIsScale() const { - auto fb_vector = this->primitive_->value_as_StridedSliceGrad()->isScale(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -int StridedSliceGrad::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_StridedSliceGrad(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_StridedSliceGrad return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> begin; - if (attr->begin() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->begin()->size()); i++) { - begin.push_back(attr->begin()->data()[i]); - } - } - std::vector<int32_t> end; - if (attr->end() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->end()->size()); i++) { - end.push_back(attr->end()->data()[i]); - } - } - std::vector<int32_t> stride; - if (attr->stride() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->stride()->size()); i++) { - stride.push_back(attr->stride()->data()[i]); - } - } - std::vector<int32_t> isScale; - if (attr->isScale() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->isScale()->size()); i++) { - isScale.push_back(attr->isScale()->data()[i]); - } - } - auto val_offset = - schema::CreateStridedSliceGradDirect(*fbb, attr->beginMask(), attr->endMask(), attr->ellipsisMask(), - attr->newAxisMask(), attr->shrinkAxisMask(), &begin, &end, &stride, &isScale); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_StridedSliceGrad, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *StridedSliceGradCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<StridedSliceGrad>(primitive); -} -Registry StridedSliceGradRegistry(schema::PrimitiveType_StridedSliceGrad, StridedSliceGradCreator); -#endif - -namespace { -constexpr size_t kStridedSliceGradOutputNum = 1; -constexpr size_t kStridedSliceGradMultiInputNumMax = 5; -} // namespace - -int StridedSliceGrad::InferShape(std::vector<lite::Tensor *> inputs, std::vector<lite::Tensor *> outputs) { - MS_ASSERT(this->primitive_ != nullptr); - if (outputs.size() != kStridedSliceGradOutputNum) { - MS_LOG(ERROR) << "Invalid output size:" << outputs.size(); - return RET_PARAM_INVALID; - } - if (inputs.size() != kStridedSliceGradMultiInputNumMax) { - MS_LOG(ERROR) << "Invalid input size " << inputs.size(); - return RET_PARAM_INVALID; - } - auto input = inputs.at(0); - outputs.front()->set_data_type(input->data_type()); - outputs.at(0)->set_format(input->format()); - MS_ASSERT(input != nullptr); - auto input_shape = input->shape(); - auto inferflag = infer_flag(); - - in_shape_.clear(); - if (inferflag) { - in_shape_.assign(input_shape.begin(), input_shape.end()); - } - begins_.clear(); - ends_.clear(); - strides_.clear(); - - if (!CheckInputs(inputs)) { - MS_LOG(DEBUG) << "Do infer shape in runtime."; - return RET_INFER_INVALID; - } - - // input order: dy, shapex, begins, ends, strides. - auto begin_tensor = inputs.at(2); - int *begin_data = reinterpret_cast<int *>(begin_tensor->MutableData()); - auto end_tensor = inputs.at(3); - int *end_data = reinterpret_cast<int *>(end_tensor->MutableData()); - auto stride_tensor = inputs.at(4); - int *stride_data = reinterpret_cast<int *>(stride_tensor->MutableData()); - if (begin_data == nullptr || end_data == nullptr || stride_data == nullptr) { - return RET_INFER_ERR; - } - ndim_ = begin_tensor->ElementsNum(); - for (size_t i = 0; i < ndim_; ++i) { - begins_.emplace_back(begin_data[i]); - ends_.emplace_back(end_data[i]); - strides_.emplace_back(stride_data[i]); - } - - // set all mask to original input shape - begins_mask_.resize(ndim_); - ends_mask_.resize(ndim_); - ellipsis_mask_.resize(ndim_); - new_axis_mask_.resize(ndim_); - shrink_axis_mask_.resize(ndim_); - - for (size_t i = 0; i < ndim_; i++) { - begins_mask_.at(i) = static_cast<bool>(GetBeginMask()) & (1 << i); - ends_mask_.at(i) = static_cast<bool>(GetEndMask()) & (1 << i); - ellipsis_mask_.at(i) = static_cast<bool>(GetEllipsisMask()) & (1 << i); - new_axis_mask_.at(i) = static_cast<bool>(GetNewAxisMask()) & (1 << i); - shrink_axis_mask_.at(i) = static_cast<bool>(GetShrinkAxisMask()) & (1 << i); - } - - ApplyNewAxisMask(); - ApplyBeginMask(); - ApplyEndMask(); - ApplyEllipsisMask(); - - if (!inferflag) { - return RET_OK; - } - - auto output_size = inputs.at(1)->shape().at(0); - std::vector<int> output_shape; - MS_ASSERT(inputs.at(1)->MutableData() != nullptr); - for (int i = 0; i < output_size; i++) { - output_shape.push_back(static_cast<int *>(inputs.at(1)->MutableData())[i]); - } - outputs.front()->set_shape(output_shape); - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/strided_slice_grad.h b/mindspore/lite/src/ops/strided_slice_grad.h deleted file mode 100644 index f0951ccf20..0000000000 --- a/mindspore/lite/src/ops/strided_slice_grad.h +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_STRIDED_SLICE_GRAD_H_ -#define MINDSPORE_LITE_SRC_OPS_STRIDED_SLICE_GRAD_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/strided_slice.h" - -namespace mindspore { -namespace lite { -class StridedSliceGrad : public StridedSlice { - public: - StridedSliceGrad() = default; - ~StridedSliceGrad() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(StridedSliceGrad, StridedSlice); - explicit StridedSliceGrad(schema::PrimitiveT *primitive) : StridedSlice(primitive) {} - void SetBeginMask(int begin_mask); - void SetEndMask(int end_mask); - void SetEllipsisMask(int ellipsis_mask); - void SetNewAxisMask(int new_axis_mask); - void SetShrinkAxisMask(int shrink_axis_mask); - void SetBegin(const std::vector<int> &begin); - void SetEnd(const std::vector<int> &end); - void SetStride(const std::vector<int> &stride); - void SetIsScale(const std::vector<int> &is_scale); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - // bool CheckInputs(std::vector<lite::Tensor *> inputs_); - int GetBeginMask() const; - int GetEndMask() const; - int GetEllipsisMask() const; - int GetNewAxisMask() const; - int GetShrinkAxisMask() const; - std::vector<int> GetBegin() const; - std::vector<int> GetEnd() const; - std::vector<int> GetStride() const; - std::vector<int> GetIsScale() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_STRIDED_SLICE_GRAD_H_ diff --git a/mindspore/lite/src/ops/sub.cc b/mindspore/lite/src/ops/sub.cc deleted file mode 100644 index 52d4d75418..0000000000 --- a/mindspore/lite/src/ops/sub.cc +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/sub.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Sub::GetActivationType() const { return this->primitive_->value.AsSub()->activationType; } - -void Sub::SetActivationType(int activation_type) { - this->primitive_->value.AsSub()->activationType = (schema::ActivationType)activation_type; -} - -int Sub::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Sub; - } - if (this->primitive_->value.type != schema::PrimitiveType_Sub) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::SubT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - attr->activationType = schema::ActivationType_NO_ACTIVATION; - this->primitive_->value.value = attr; - } - return RET_OK; -} - -#else - -int Sub::GetActivationType() const { return this->primitive_->value_as_Sub()->activationType(); } -int Sub::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Sub(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Sub return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateSub(*fbb, attr->activationType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Sub, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SubCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Sub>(primitive); } -Registry SubRegistry(schema::PrimitiveType_Sub, SubCreator); - -#endif - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/sub.h b/mindspore/lite/src/ops/sub.h deleted file mode 100644 index d431851ee3..0000000000 --- a/mindspore/lite/src/ops/sub.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_OPS_SUB_H_ -#define MINDSPORE_LITE_SRC_OPS_SUB_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/arithmetic.h" - -namespace mindspore { -namespace lite { -class Sub : public Arithmetic { - public: - Sub() = default; - ~Sub() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Sub, Arithmetic); - explicit Sub(schema::PrimitiveT *primitive) : Arithmetic(primitive) {} - void SetActivationType(int activation_type); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int GetActivationType() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_SRC_OPS_SUB_H_ diff --git a/mindspore/lite/src/ops/switch.cc b/mindspore/lite/src/ops/switch.cc deleted file mode 100644 index 0e08a17d08..0000000000 --- a/mindspore/lite/src/ops/switch.cc +++ /dev/null @@ -1,142 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/switch.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif -#include "src/tensorlist.h" - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Switch::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Switch; - } - if (this->primitive_->value.type != schema::PrimitiveType_Switch) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::SwitchT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int Switch::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Switch(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Switch return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateSwitch(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Switch, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *SwitchCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Switch>(primitive); } -Registry SwitchRegistry(schema::PrimitiveType_Switch, SwitchCreator); -#endif - -int Switch::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(2 * (inputs_.size() - 1) == outputs_.size()); - for (size_t i = 0; i < outputs_.size() / 2; i++) { - auto *input = inputs_[i + 1]; - auto *output_true = outputs_[i]; - auto *output_false = outputs_[i + outputs_.size() / 2]; - if (input == nullptr) { - MS_LOG(ERROR) << "input tensor is nullptr"; - return RET_ERROR; - } - if (output_true == nullptr || output_false == nullptr) { - MS_LOG(ERROR) << "output tensor is nullptr"; - return RET_ERROR; - } - output_true->set_data_type(input->data_type()); - output_false->set_data_type(input->data_type()); - output_true->set_format(input->format()); - output_false->set_format(input->format()); - auto data_type = input->data_type(); - if (data_type != kObjectTypeTensorType) { - continue; - } else { - auto input_tensorlist = reinterpret_cast<TensorList *>(input); - auto output_true_tensorlist = reinterpret_cast<TensorList *>(output_true); - auto output_false_tensorlist = reinterpret_cast<TensorList *>(output_false); - output_true_tensorlist->set_element_shape(input_tensorlist->element_shape()); - output_false_tensorlist->set_element_shape(input_tensorlist->element_shape()); - output_true_tensorlist->set_max_elements_num(input_tensorlist->max_elements_num()); - output_false_tensorlist->set_max_elements_num(input_tensorlist->max_elements_num()); - output_true_tensorlist->set_tensors_data_type(input_tensorlist->tensors_data_type()); - output_false_tensorlist->set_tensors_data_type(input_tensorlist->tensors_data_type()); - } - } - if (!infer_flag()) { - return RET_INFER_INVALID; - } - for (size_t i = 0; i < outputs_.size() / 2; i++) { - auto *input = inputs_[i + 1]; - auto *output_true = outputs_[i]; - auto *output_false = outputs_[i + outputs_.size() / 2]; - if (input == nullptr) { - MS_LOG(ERROR) << "input tensor is nullptr"; - return RET_ERROR; - } - if (output_true == nullptr || output_false == nullptr) { - MS_LOG(ERROR) << "output tensor is nullptr"; - return RET_ERROR; - } - output_true->set_shape(input->shape()); - output_false->set_shape(input->shape()); - auto data_type = input->data_type(); - if (data_type != kObjectTypeTensorType) { - continue; - } else { - auto input_tensorlist = reinterpret_cast<TensorList *>(input); - auto output_true_tensorlist = reinterpret_cast<TensorList *>(output_true); - auto output_false_tensorlist = reinterpret_cast<TensorList *>(output_false); - output_true_tensorlist->set_element_shape(input_tensorlist->element_shape()); - output_false_tensorlist->set_element_shape(input_tensorlist->element_shape()); - output_true_tensorlist->set_max_elements_num(input_tensorlist->max_elements_num()); - output_false_tensorlist->set_max_elements_num(input_tensorlist->max_elements_num()); - output_true_tensorlist->set_tensors_data_type(input_tensorlist->tensors_data_type()); - output_false_tensorlist->set_tensors_data_type(input_tensorlist->tensors_data_type()); - } - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/switch.h b/mindspore/lite/src/ops/switch.h deleted file mode 100644 index 80e6d4fed7..0000000000 --- a/mindspore/lite/src/ops/switch.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_SWITCH_H_ -#define LITE_MINDSPORE_LITE_C_OPS_SWITCH_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Switch : public PrimitiveC { - public: - Switch() = default; - ~Switch() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Switch, PrimitiveC); - explicit Switch(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_SWITCH_H_ diff --git a/mindspore/lite/src/ops/tensorlist_fromtensor.cc b/mindspore/lite/src/ops/tensorlist_fromtensor.cc deleted file mode 100644 index 441250de03..0000000000 --- a/mindspore/lite/src/ops/tensorlist_fromtensor.cc +++ /dev/null @@ -1,147 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include <vector> -#include "src/ops/tensorlist_fromtensor.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int TensorListFromTensor::GetElementDType() const { - return this->primitive_->value.AsTensorListFromTensor()->elementDType; -} - -int TensorListFromTensor::GetShapeType() const { return this->primitive_->value.AsTensorListFromTensor()->shapeType; } - -void TensorListFromTensor::SetElementDType(int type) { - this->primitive_->value.AsTensorListFromTensor()->elementDType = type; -} - -void TensorListFromTensor::SetShapeType(int type) { - this->primitive_->value.AsTensorListFromTensor()->shapeType = type; -} - -int TensorListFromTensor::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_TensorListFromTensor; - } - if (this->primitive_->value.type != schema::PrimitiveType_TensorListFromTensor) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::TensorListFromTensorT(); - if (attr == nullptr) { - delete this->primitive_; - this->primitive_ = nullptr; - MS_LOG(ERROR) << "new TensorListFromTensorT value failed"; - return RET_ERROR; - } - if (prim.GetAttr("elementDType") == nullptr) { - delete this->primitive_; - this->primitive_ = nullptr; - delete attr; - MS_LOG(ERROR) << "TensorListFromTensorT's attr elementDType is not set"; - return RET_ERROR; - } else { - attr->elementDType = CastToInt(prim.GetAttr("elementDType")).front(); - } - if (prim.GetAttr("shapeType") == nullptr) { - delete this->primitive_; - this->primitive_ = nullptr; - delete attr; - MS_LOG(ERROR) << "TensorListFromTensorT's attr shapeType is not set"; - return RET_ERROR; - } else { - attr->shapeType = CastToInt(prim.GetAttr("shapeType")).front(); - } - this->primitive_->value.value = attr; - } - return RET_OK; -} -#else -int TensorListFromTensor::GetElementDType() const { - return this->primitive_->value_as_TensorListFromTensor()->elementDType(); -} - -int TensorListFromTensor::GetShapeType() const { - return this->primitive_->value_as_TensorListFromTensor()->shapeType(); -} - -int TensorListFromTensor::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_TensorListFromTensor(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_TensorListFromTensor return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateTensorListFromTensor(*fbb, attr->elementDType(), attr->shapeType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_TensorListFromTensor, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *TensorListFromTensorCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<TensorListFromTensor>(primitive); -} -Registry TensorListFromTensorRegistry(schema::PrimitiveType_TensorListFromTensor, TensorListFromTensorCreator); -#endif - -int TensorListFromTensor::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input0 = inputs_[0]; - MS_ASSERT(input0 != nullptr); - std::vector<int> input0_shape = input0->shape(); - if (input0_shape.size() < 1) { - MS_LOG(ERROR) << "input0_shape.size():" << input0_shape.size() << " must be greater than 0!"; - return RET_ERROR; - } - int dim0 = input0_shape[0]; - if (dim0 < 0) { - MS_LOG(ERROR) << "inputs_[0] dim0:" << dim0 << " must greater than or equal to 0"; - return RET_ERROR; - } - auto input1 = inputs_[1]; - MS_ASSERT(input1 != nullptr); - if (input1->data_c() == nullptr) { - MS_LOG(ERROR) << "input1->data_c() is nullptr"; - return RET_NULL_PTR; - } - auto ele_shape_ptr = reinterpret_cast<int *>(input1->data_c()); - auto output = reinterpret_cast<TensorList *>(outputs_[0]); - MS_ASSERT(output != nullptr); - std::vector<std::vector<int> > tensor_shape(dim0, std::vector<int>(input0_shape.begin() + 1, input0_shape.end())); - output->set_element_shape(std::vector<int>(ele_shape_ptr, ele_shape_ptr + input1->ElementsNum())); - output->set_shape(std::vector<int>(1, dim0)); - output->set_data_type(kObjectTypeTensorType); - output->MallocTensorListData(input0->data_type(), tensor_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/tensorlist_fromtensor.h b/mindspore/lite/src/ops/tensorlist_fromtensor.h deleted file mode 100644 index 6c7de6209c..0000000000 --- a/mindspore/lite/src/ops/tensorlist_fromtensor.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include <vector> -#include "src/ops/primitive_c.h" -#include "src/tensorlist.h" - -#ifndef LITE_MINDSPORE_LITE_C_OPS_TENSORLISTFROMTENSOR_H_ -#define LITE_MINDSPORE_LITE_C_OPS_TENSORLISTFROMTENSOR_H_ -namespace mindspore { -namespace lite { -class TensorListFromTensor : public PrimitiveC { - public: - TensorListFromTensor() = default; - ~TensorListFromTensor() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(TensorListFromTensor, PrimitiveC); - void SetElementDType(int type); - void SetShapeType(int type); - explicit TensorListFromTensor(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int GetElementDType() const; - int GetShapeType() const; - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_TENSORLISTFROMTENSOR_H_ diff --git a/mindspore/lite/src/ops/tensorlist_getitem.cc b/mindspore/lite/src/ops/tensorlist_getitem.cc deleted file mode 100644 index 2a499f775f..0000000000 --- a/mindspore/lite/src/ops/tensorlist_getitem.cc +++ /dev/null @@ -1,192 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include <vector> -#include "src/ops/tensorlist_getitem.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -TypeId TensorListGetItem::GetElementDType() const { - return (TypeId)(this->primitive_->value.AsTensorListGetItem()->elementDType); -} - -void TensorListGetItem::SetElementDType(int type) { - this->primitive_->value.AsTensorListGetItem()->elementDType = type; -} - -int TensorListGetItem::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_TensorListGetItem; - } - if (this->primitive_->value.type != schema::PrimitiveType_TensorListGetItem) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::TensorListGetItemT(); - if (attr == nullptr) { - delete this->primitive_; - this->primitive_ = nullptr; - MS_LOG(ERROR) << "new TensorListGetItemT value failed"; - return RET_ERROR; - } - if (prim.GetAttr("elementDType") == nullptr) { - delete this->primitive_; - this->primitive_ = nullptr; - delete attr; - MS_LOG(ERROR) << "TensorListGetItem's attr elementDType is not set"; - return RET_ERROR; - } else { - attr->elementDType = CastToInt(prim.GetAttr("elementDType")).front(); - } - this->primitive_->value.value = attr; - } - return RET_OK; -} -#else -TypeId TensorListGetItem::GetElementDType() const { - return (TypeId)(this->primitive_->value_as_TensorListGetItem()->elementDType()); -} - -int TensorListGetItem::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_TensorListGetItem(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_TensorListGetItem return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateTensorListGetItem(*fbb, attr->elementDType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_TensorListGetItem, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *TensorListGetItemCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<TensorListGetItem>(primitive); -} -Registry TensorListGetItemRegistry(schema::PrimitiveType_TensorListGetItem, TensorListGetItemCreator); -#endif -bool TensorListGetItem::IsFullyDefined(const std::vector<int> &shape) const { - for (size_t i = 0; i < shape.size(); ++i) { - if (shape[i] < 0) { - return false; - } - } - return true; -} - -int TensorListGetItem::MergeShape(const std::vector<int> &tmp) { - if (element_shape_.size() != tmp.size()) { - MS_LOG(ERROR) << "element_shape_.size():" << element_shape_.size() << " must be equal to tmp.size():" << tmp.size(); - return RET_ERROR; - } - for (size_t j = 0; j < tmp.size(); ++j) { - if (element_shape_[j] >= 0 && tmp[j] >= 0 && element_shape_[j] != tmp[j]) { - MS_LOG(ERROR) << "element_shape_[" << j << "]:" << element_shape_[j] << " must be equal to tmp[" << j - << "]:" << tmp[j]; - return RET_ERROR; - } - element_shape_[j] = element_shape_[j] >= 0 ? element_shape_[j] : tmp[j]; - } - return RET_OK; -} - -int TensorListGetItem::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - if (!infer_flag()) { - return RET_INFER_INVALID; - } - MS_ASSERT(inputs_.size() >= 3); - MS_ASSERT(inputs_.at(0) != nullptr); - MS_ASSERT(inputs_.at(1) != nullptr); - MS_ASSERT(inputs_.at(2) != nullptr); - auto input0 = reinterpret_cast<TensorList *>(inputs_.at(0)); - if (input0->root_tensor() != nullptr) { - input0 = reinterpret_cast<TensorList *>(input0->root_tensor()); - } - auto get_index = inputs_.at(1); - MS_ASSERT(get_index != nullptr); - if (get_index->ElementsNum() != 1) { - MS_LOG(ERROR) << "get_index->ElementsNum():" << get_index->ElementsNum() << " must be equal to 1!"; - return RET_ERROR; - } - if (get_index->data_c() == nullptr) { - MS_LOG(DEBUG) << "get_index->data_c() is nullptr"; - return RET_INFER_INVALID; - } - index_ = reinterpret_cast<int *>(get_index->data_c())[0]; - if (index_ < 0 || index_ > (input0->ElementsNum() - 1)) { - MS_LOG(ERROR) << "index_:" << index_ << "must in [0, " << input0->ElementsNum() - 1 << "]"; - return RET_ERROR; - } - auto tensor_index = input0->GetTensor(index_); - if (tensor_index == nullptr) { - return RET_INFER_INVALID; - } - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (tensor_index->data_type() != kTypeUnknown) { - output->set_data_type(tensor_index->data_type()); - output->set_shape(tensor_index->shape()); - } else { - auto input2 = inputs_[2]; - if (input2->data_c() == nullptr) { - MS_LOG(ERROR) << "input2->data_c() is nullptr"; - return RET_NULL_PTR; - } - auto ele_shape_data = reinterpret_cast<int *>(input2->data_c()); - for (int i = 0; i < input2->ElementsNum(); ++i) { - element_shape_.push_back(ele_shape_data[i]); - } - auto status = MergeShape(input0->element_shape()); - if (status != RET_OK) { - return RET_ERROR; - } - if (!IsFullyDefined(element_shape_)) { - for (int i = 0; i < input0->ElementsNum(); ++i) { - auto input = input0->GetTensor(i); - MS_ASSERT(input != nullptr); - if (input->data_type() != kTypeUnknown) { - status = MergeShape(input->shape()); - if (status != RET_OK) { - return RET_ERROR; - } - } - } - } - if (!IsFullyDefined(element_shape_)) { - MS_LOG(ERROR) << "element_shape_ is not fullyDefined!"; - return RET_ERROR; - } - output->set_data_type(input0->tensors_data_type()); - output->set_shape(element_shape_); - } - output->set_format(input0->GetTensor(index_)->format()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/tensorlist_getitem.h b/mindspore/lite/src/ops/tensorlist_getitem.h deleted file mode 100644 index 93f8eea307..0000000000 --- a/mindspore/lite/src/ops/tensorlist_getitem.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include <vector> -#include "src/ops/primitive_c.h" -#include "src/tensorlist.h" -#include "ir/dtype/type_id.h" - -#ifndef LITE_MINDSPORE_LITE_C_OPS_TENSORLISTGETITEM_H_ -#define LITE_MINDSPORE_LITE_C_OPS_TENSORLISTGETITEM_H_ -namespace mindspore { -namespace lite { -class TensorListGetItem : public PrimitiveC { - public: - TensorListGetItem() = default; - ~TensorListGetItem() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(TensorListGetItem, PrimitiveC); - void SetElementDType(int type); - explicit TensorListGetItem(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - TypeId GetElementDType() const; - int MergeShape(const std::vector<int> &tmp); - bool IsFullyDefined(const std::vector<int> &shape) const; - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - - private: - int index_ = -1; - std::vector<int> element_shape_; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_TENSORLISTGETITEM_H_ diff --git a/mindspore/lite/src/ops/tensorlist_reserve.cc b/mindspore/lite/src/ops/tensorlist_reserve.cc deleted file mode 100644 index 4cbe1b87bb..0000000000 --- a/mindspore/lite/src/ops/tensorlist_reserve.cc +++ /dev/null @@ -1,138 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include <vector> -#include "src/ops/tensorlist_reserve.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -TypeId TensorListReserve::GetElementDType() const { - return (TypeId)(this->primitive_->value.AsTensorListReserve()->elementDType); -} - -void TensorListReserve::SetElementDType(int type) { - this->primitive_->value.AsTensorListReserve()->elementDType = type; -} - -int TensorListReserve::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_TensorListReserve; - } - if (this->primitive_->value.type != schema::PrimitiveType_TensorListReserve) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::TensorListReserveT(); - if (attr == nullptr) { - delete this->primitive_; - this->primitive_ = nullptr; - MS_LOG(ERROR) << "new TensorListReserveT value failed"; - return RET_ERROR; - } - if (prim.GetAttr("elementDType") == nullptr) { - delete this->primitive_; - this->primitive_ = nullptr; - delete attr; - MS_LOG(ERROR) << "TensorListReserve's attr elementDType is not set"; - return RET_ERROR; - } else { - attr->elementDType = CastToInt(prim.GetAttr("elementDType")).front(); - } - this->primitive_->value.value = attr; - } - return RET_OK; -} - -#else -TypeId TensorListReserve::GetElementDType() const { - return (TypeId)(this->primitive_->value_as_TensorListReserve()->elementDType()); -} - -int TensorListReserve::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(primitive != nullptr); - MS_ASSERT(fbb != nullptr); - auto attr = primitive->value_as_TensorListReserve(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_TensorListReserve return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateTensorListReserve(*fbb, attr->elementDType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_TensorListReserve, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *TensorListReserveCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<TensorListReserve>(primitive); -} -Registry TensorListReserveRegistry(schema::PrimitiveType_TensorListReserve, TensorListReserveCreator); -#endif - -int TensorListReserve::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - // input0: element_shape_tensor - // input1: num_elements - auto input0 = inputs_.front(); - MS_ASSERT(input0 != nullptr); - auto ele_shape_type = input0->data_type(); - if (ele_shape_type != kNumberTypeInt && ele_shape_type != kNumberTypeInt32) { - MS_LOG(ERROR) << "ele_shape_tensor.data_type():" << ele_shape_type << " is not int"; - return RET_ERROR; - } - if (input0->data_c() == nullptr) { - MS_LOG(ERROR) << "input0->data_c() is nullptr"; - return RET_INFER_INVALID; - } - auto ele_shape_ptr = reinterpret_cast<int *>(input0->data_c()); - - auto input1 = inputs_[1]; - MS_ASSERT(input1 != nullptr); - auto num_ele_type = input1->data_type(); - if (num_ele_type != kNumberTypeInt && ele_shape_type != kNumberTypeInt32) { - MS_LOG(ERROR) << "num_ele_tensor.data_type():" << num_ele_type << " is not int"; - return RET_ERROR; - } - if (input1->ElementsNum() != 1) { - MS_LOG(ERROR) << "input1->ElementsNum() must be equal to 1"; - return RET_ERROR; - } - if (input1->data_c() == nullptr) { - MS_LOG(ERROR) << "input1->data_c() is nullptr"; - return RET_INFER_INVALID; - } - int num_elements = reinterpret_cast<int *>(input1->data_c())[0]; - auto output = reinterpret_cast<TensorList *>(outputs_[0]); - MS_ASSERT(output != nullptr); - output->set_data_type(kObjectTypeTensorType); - std::vector<std::vector<int> > tmp_shape(num_elements, std::vector<int>()); - output->set_element_shape(std::vector<int>(ele_shape_ptr, ele_shape_ptr + input0->ElementsNum())); - output->set_shape(std::vector<int>(1, num_elements)); - output->MallocTensorListData(kTypeUnknown, tmp_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/tensorlist_reserve.h b/mindspore/lite/src/ops/tensorlist_reserve.h deleted file mode 100644 index 126b9aa8da..0000000000 --- a/mindspore/lite/src/ops/tensorlist_reserve.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include <vector> -#include "src/ops/primitive_c.h" -#include "src/tensorlist.h" -#include "ir/dtype/type_id.h" - -#ifndef LITE_MINDSPORE_LITE_C_OPS_TENSORLISTRESERVE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_TENSORLISTRESERVE_H_ -namespace mindspore { -namespace lite { -class TensorListReserve : public PrimitiveC { - public: - TensorListReserve() = default; - ~TensorListReserve() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(TensorListReserve, PrimitiveC); - void SetElementDType(int type); - explicit TensorListReserve(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - TypeId GetElementDType() const; - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_TENSORLISTRESERVE_H_ diff --git a/mindspore/lite/src/ops/tensorlist_setitem.cc b/mindspore/lite/src/ops/tensorlist_setitem.cc deleted file mode 100644 index 7891d7253c..0000000000 --- a/mindspore/lite/src/ops/tensorlist_setitem.cc +++ /dev/null @@ -1,168 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include <vector> -#include "src/ops/tensorlist_setitem.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -TypeId TensorListSetItem::GetElementDType() const { - return (TypeId)(this->primitive_->value.AsTensorListSetItem()->elementDType); -} - -void TensorListSetItem::SetElementDType(int type) { - this->primitive_->value.AsTensorListSetItem()->elementDType = type; -} - -int TensorListSetItem::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_TensorListSetItem; - } - if (this->primitive_->value.type != schema::PrimitiveType_TensorListSetItem) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::TensorListSetItemT(); - if (attr == nullptr) { - delete this->primitive_; - this->primitive_ = nullptr; - MS_LOG(ERROR) << "new TensorListSetItemT value failed"; - return RET_ERROR; - } - if (prim.GetAttr("elementDType") == nullptr) { - delete this->primitive_; - this->primitive_ = nullptr; - delete attr; - MS_LOG(ERROR) << "TensorListSetItem's attr elementDType is not set"; - return RET_ERROR; - } else { - attr->elementDType = CastToInt(prim.GetAttr("elementDType")).front(); - } - this->primitive_->value.value = attr; - } - return RET_OK; -} -#else -TypeId TensorListSetItem::GetElementDType() const { - return (TypeId)(this->primitive_->value_as_TensorListSetItem()->elementDType()); -} - -int TensorListSetItem::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_TensorListSetItem(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_TensorListSetItem return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateTensorListSetItem(*fbb, attr->elementDType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_TensorListSetItem, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *TensorListSetItemCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<TensorListSetItem>(primitive); -} -Registry TensorListSetItemRegistry(schema::PrimitiveType_TensorListSetItem, TensorListSetItemCreator); -#endif - -int TensorListSetItem::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - auto input0 = reinterpret_cast<TensorList *>(inputs_[0]); - MS_ASSERT(input0 != nullptr); - auto get_index = inputs_[1]; - MS_ASSERT(get_index != nullptr); - auto value_tensor = inputs_[2]; - MS_ASSERT(value_tensor != nullptr); - auto output0 = reinterpret_cast<TensorList *>(outputs_[0]); - MS_ASSERT(output0 != nullptr); - - output0->set_data_type(input0->data_type()); - output0->set_format(input0->format()); - - if (!infer_flag()) { - return RET_INFER_INVALID; - } - if (get_index->data_c() == nullptr || value_tensor->data_c() == nullptr) { - return RET_INFER_INVALID; - } - - if (get_index->data_type() != kNumberTypeInt && get_index->data_type() != kNumberTypeInt32) { - MS_LOG(ERROR) << "inputs_[1]->data_type():" << get_index->data_type() << " is not int"; - return RET_ERROR; - } - if (get_index->ElementsNum() != 1) { - MS_LOG(ERROR) << "inputs_[1].ElementsNum():" << get_index->ElementsNum() << " must be equal to 1!"; - return RET_ERROR; - } - if (get_index->data_c() == nullptr) { - MS_LOG(ERROR) << "get_index->data_c() is nullptr"; - return RET_NULL_PTR; - } - int index = reinterpret_cast<int *>(get_index->data_c())[0]; - if (index < 0 || (index >= static_cast<int>(input0->tensors().size()) && index != 0)) { - MS_LOG(ERROR) << "index_:" << index << "must in [0, " << input0->tensors().size() << "]"; - return RET_ERROR; - } - - output0->set_max_elements_num(input0->max_elements_num()); - - if (input0->tensors().empty() && input0->element_shape().empty() && index == 0) { - input0->set_element_shape(value_tensor->shape()); - output0->set_element_shape(value_tensor->shape()); - } else { - output0->set_element_shape(input0->element_shape()); - } - std::vector<std::vector<int> > out_shape; - if (index == 0 && input0->tensors().size() == 0) { // uninitialized tensorlist - out_shape.push_back(value_tensor->shape()); - output0->set_shape(std::vector<int>{1}); - } else { - output0->set_shape(input0->shape()); - for (int i = 0; i < input0->ElementsNum(); ++i) { - auto src_ptr = input0->GetTensor(i); - if (src_ptr == nullptr) { - MS_LOG(ERROR) << "input0->tensors_[" << i << "] is nullptr!"; - return RET_ERROR; - } - if (src_ptr->data_type() != kTypeUnknown) { - out_shape.push_back(src_ptr->shape()); - } else { - out_shape.push_back(std::vector<int>()); - } - } - } - if (input0->tensors_data_type() == kTypeUnknown) { - input0->set_tensors_data_type(value_tensor->data_type()); - } - out_shape[index] = value_tensor->shape(); - output0->MallocTensorListData(input0->tensors_data_type(), out_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/tensorlist_setitem.h b/mindspore/lite/src/ops/tensorlist_setitem.h deleted file mode 100644 index 7df2e06e75..0000000000 --- a/mindspore/lite/src/ops/tensorlist_setitem.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include <vector> -#include "src/ops/primitive_c.h" -#include "src/tensorlist.h" -#include "ir/dtype/type_id.h" - -#ifndef LITE_MINDSPORE_LITE_C_OPS_TENSORLISTSETITEM_H_ -#define LITE_MINDSPORE_LITE_C_OPS_TENSORLISTSETITEM_H_ -namespace mindspore { -namespace lite { -class TensorListSetItem : public PrimitiveC { - public: - TensorListSetItem() = default; - ~TensorListSetItem() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(TensorListSetItem, PrimitiveC); - void SetElementDType(int type); - explicit TensorListSetItem(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - TypeId GetElementDType() const; - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_TENSORLISTSETITEM_H_ diff --git a/mindspore/lite/src/ops/tensorlist_stack.cc b/mindspore/lite/src/ops/tensorlist_stack.cc deleted file mode 100644 index 9011db8f32..0000000000 --- a/mindspore/lite/src/ops/tensorlist_stack.cc +++ /dev/null @@ -1,196 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include <vector> -#include "src/ops/tensorlist_stack.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -TypeId TensorListStack::GetElementDType() const { - return (TypeId)(this->primitive_->value.AsTensorListStack()->elementDType); -} - -int TensorListStack::GetNumElements() const { return this->primitive_->value.AsTensorListStack()->numElements; } - -void TensorListStack::SetElementDType(int type) { this->primitive_->value.AsTensorListStack()->elementDType = type; } - -void TensorListStack::SetNumElements(int num_elements) { - this->primitive_->value.AsTensorListStack()->numElements = num_elements; -} - -int TensorListStack::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_TensorListStack; - } - if (this->primitive_->value.type != schema::PrimitiveType_TensorListStack) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::TensorListStackT(); - if (attr == nullptr) { - delete this->primitive_; - this->primitive_ = nullptr; - MS_LOG(ERROR) << "new TensorListStackT value failed"; - return RET_ERROR; - } - if (prim.GetAttr("elementDType") == nullptr) { - delete this->primitive_; - this->primitive_ = nullptr; - delete attr; - MS_LOG(ERROR) << "TensorListStack's attr elementDType is not set"; - return RET_ERROR; - } else { - attr->elementDType = CastToInt(prim.GetAttr("elementDType")).front(); - } - if (prim.GetAttr("numElements") == nullptr) { - delete this->primitive_; - this->primitive_ = nullptr; - delete attr; - MS_LOG(ERROR) << "TensorListStack's attr numElements is not set"; - return RET_ERROR; - } else { - attr->numElements = CastToInt(prim.GetAttr("numElements")).front(); - } - this->primitive_->value.value = attr; - } - return RET_OK; -} -#else -TypeId TensorListStack::GetElementDType() const { - return (TypeId)(this->primitive_->value_as_TensorListStack()->elementDType()); -} - -int TensorListStack::GetNumElements() const { return this->primitive_->value_as_TensorListStack()->numElements(); } - -int TensorListStack::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_TensorListStack(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_TensorListStack return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateTensorListStack(*fbb, attr->numElements(), attr->elementDType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_TensorListStack, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *TensorListStackCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<TensorListStack>(primitive); -} -Registry TensorListStackRegistry(schema::PrimitiveType_TensorListStack, TensorListStackCreator); -#endif - -bool TensorListStack::IsFullyDefined(const std::vector<int> &shape) const { - for (size_t i = 0; i < shape.size(); ++i) { - if (shape[i] < 0) { - return false; - } - } - return true; -} - -int TensorListStack::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input0 = reinterpret_cast<TensorList *>(inputs_.front()); - MS_ASSERT(input0 != nullptr); - if (input0->ElementsNum() == 0) { - MS_LOG(ERROR) << "Try to stack a empty tensorlist!"; - return RET_ERROR; - } - auto ele_shape = inputs_[1]; // element shape - MS_ASSERT(ele_shape != nullptr); - if (ele_shape->data_c() == nullptr) { - MS_LOG(ERROR) << "ele_shape->data_c() is nullptr"; - return RET_NULL_PTR; - } - auto ele_shape_ptr = reinterpret_cast<int *>(ele_shape->data_c()); - output_shape_.clear(); - for (int i = 0; i < ele_shape->ElementsNum(); ++i) { - output_shape_.push_back(ele_shape_ptr[i]); - } - - auto status = MergeShape(input0->element_shape()); - if (status == RET_ERROR) { - MS_LOG(ERROR) << "Merge element_shape is error!"; - return RET_ERROR; - } - if (!IsFullyDefined(output_shape_)) { - MS_LOG(ERROR) << "output_shape_ Is Not FullyDefined!"; - return RET_ERROR; - } - if (!IsFullyDefined(input0->element_shape())) { - for (int i = 0; i < input0->ElementsNum(); ++i) { - auto tensor_ele = input0->GetTensor(i); - MS_ASSERT(tensor_ele != nullptr); - if (tensor_ele->data_type() != kTypeUnknown) { - status = MergeShape(tensor_ele->shape()); - if (status == RET_ERROR) { - MS_LOG(ERROR) << "Merge input0->tensors_[" << i << "] is error!"; - return RET_ERROR; - } - } - } - } - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(input0->tensors_data_type()); - output_shape_.insert(output_shape_.begin(), input0->ElementsNum()); - output->set_shape(output_shape_); - output->set_format(input0->format()); - return RET_OK; -} - -int TensorListStack::MergeShape(const std::vector<int> &shape) { - size_t dim0 = shape.size(); - size_t dim1 = output_shape_.size(); - if (dim1 >= unKnownRank_ || output_shape_[0] == -1) { - output_shape_ = shape; - return RET_OK; - } - if (dim1 != dim0) { - MS_LOG(ERROR) << "shape.size():" << dim1 << " must be equal output_shape_.size():" << dim0; - return RET_ERROR; - } - for (size_t i = 0; i < dim0; ++i) { - int dim0_size = shape[i]; - int dim1_size = output_shape_[i]; - if (dim0_size >= 0 && dim1_size >= 0 && dim0_size != dim1_size) { - MS_LOG(ERROR) << "shape[" << i << "]:" << dim0_size << " is incompatible with output_shape_[" << i - << "]:" << dim1_size; - return RET_ERROR; - } - output_shape_[i] = dim1_size >= 0 ? dim1_size : dim0_size; - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/tensorlist_stack.h b/mindspore/lite/src/ops/tensorlist_stack.h deleted file mode 100644 index b83db1d2c7..0000000000 --- a/mindspore/lite/src/ops/tensorlist_stack.h +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include <vector> -#include <functional> -#include "src/ops/primitive_c.h" -#include "src/tensorlist.h" -#include "ir/dtype/type_id.h" - -#ifndef LITE_MINDSPORE_LITE_C_OPS_TENSORLISTSTACK_H_ -#define LITE_MINDSPORE_LITE_C_OPS_TENSORLISTSTACK_H_ -namespace mindspore { -namespace lite { -class TensorListStack : public PrimitiveC { - public: - // tensor:input, element_dtype, num_elements(default=-1:reprent any tensor dim0), element_shape - TensorListStack() = default; - ~TensorListStack() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(TensorListStack, PrimitiveC); - void SetElementDType(int type); - void SetNumElements(int num_elements); - explicit TensorListStack(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - TypeId GetElementDType() const; - int GetNumElements() const; - bool IsFullyDefined(const std::vector<int> &shape) const; - int MergeShape(const std::vector<int> &shape); - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - - private: - size_t unKnownRank_ = 255; - std::vector<int> output_shape_; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_TENSORLISTSTACK_H_ diff --git a/mindspore/lite/src/ops/tile.cc b/mindspore/lite/src/ops/tile.cc deleted file mode 100644 index 2b6a784a64..0000000000 --- a/mindspore/lite/src/ops/tile.cc +++ /dev/null @@ -1,201 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/tile.h" -#include <limits> -#include <algorithm> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> Tile::GetMultiples() const { return this->primitive_->value.AsTile()->multiples; } - -void Tile::SetMultiples(const std::vector<int> &multiples) { this->primitive_->value.AsTile()->multiples = multiples; } - -std::vector<int> Tile::GetDims() const { return this->primitive_->value.AsTile()->dims; } - -void Tile::SetDims(const std::vector<int> &dims) { this->primitive_->value.AsTile()->dims = dims; } - -int Tile::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Tile; - } - if (this->primitive_->value.type != schema::PrimitiveType_Tile) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::TileT(); - - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - if (inputs.size() == kAnfPopulaterInputNumTwo) { - auto inputNode = inputs[kAnfPopulaterInputNumOne]; - MS_ASSERT(inputNode != nullptr); - if (inputNode->isa<ValueNode>()) { - auto valueNode = inputNode->cast<ValueNodePtr>(); - MS_ASSERT(valueNode != nullptr); - auto value = valueNode->value(); - MS_ASSERT(value != nullptr); - if (value->isa<ValueTuple>()) { - auto valTuplPtr = dyn_cast<ValueTuple>(value); - MS_ASSERT(valTuplPtr != nullptr); - for (size_t i = 0; i < valTuplPtr->size(); i++) { - auto elem = (*valTuplPtr)[i]; - MS_ASSERT(elem != nullptr); - attr->multiples.emplace_back(CastToInt(elem).front()); - } - } else { - int multiple = CastToInt(value).front(); - attr->multiples = {multiple}; - } - } - } - if (prim.GetAttr("dims") == nullptr) { - MS_LOG(INFO) << "Tile's attr dims is set to default. The operator in mindspore has no attribute" - "named dims and all the dimensions needs to be multiplied by default."; - for (size_t i = 0; i < attr->multiples.size(); i++) { - attr->dims.push_back(i); - } - } else { - attr->dims = CastToInt(prim.GetAttr("dims")); - } - this->primitive_->value.value = attr; - } - return RET_OK; -} - -#else - -std::vector<int> Tile::GetMultiples() const { - auto fb_vector = this->primitive_->value_as_Tile()->multiples(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} - -std::vector<int> Tile::GetDims() const { - auto fb_vector = this->primitive_->value_as_Tile()->dims(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -int Tile::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Tile(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Tile return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> multiples; - if (attr->multiples() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->multiples()->size()); i++) { - multiples.push_back(attr->multiples()->data()[i]); - } - } - std::vector<int32_t> dims; - if (attr->dims() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->dims()->size()); i++) { - dims.push_back(attr->dims()->data()[i]); - } - } - auto val_offset = schema::CreateTileDirect(*fbb, &multiples, &dims); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Tile, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *TileCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Tile>(primitive); } -Registry TileRegistry(schema::PrimitiveType_Tile, TileCreator); -#endif - -int Tile::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - std::vector<int> out_shape; - std::vector<int> multiples; - if (inputs_.size() == 2) { - if (inputs_[1]->data_c() == nullptr) { - MS_LOG(INFO) << "Do infer shape in runtime."; - return RET_INFER_INVALID; - } - int data_num = inputs_[1]->ElementsNum(); - if (data_num > static_cast<int>(input->shape().size())) { - MS_LOG(ERROR) << "multiples data num cannot be larger than input shape size."; - return RET_INPUT_TENSOR_ERROR; - } - multiples.resize(data_num); - memcpy(multiples.data(), inputs_[1]->data_c(), inputs_[1]->Size()); - } else { - multiples = GetMultiples(); - } - if (train_flag()) { - const size_t in_dims = input->shape().size(); - const size_t delta_dims = in_dims - multiples.size(); - - size_t i = 0; - for (; i < delta_dims; ++i) { - int tmp = input->shape().at(i); - out_shape.push_back(tmp); - } - for (; i < in_dims; ++i) { - int tmp = input->shape().at(i) * (multiples[i - delta_dims]); - out_shape.push_back(tmp); - } - } else { - std::vector<int> dims = GetDims(); - if (inputs_.size() == 2 && dims.empty()) { - for (int dim = 0; dim < inputs_[1]->ElementsNum(); ++dim) { - dims.push_back(dim); - } - } - const size_t in_dims = input->shape().size(); - - MS_ASSERT(multiples.size() == dims.size()); - for (size_t i = 0; i < in_dims; ++i) { - out_shape.push_back(input->shape().at(i)); - } - for (size_t i = 0; i < dims.size(); ++i) { - if (input->shape().at(dims.at(i)) != 0 && - multiples.at(i) > std::numeric_limits<int>::max() / input->shape().at(dims.at(i))) { - MS_LOG(ERROR) << "The value of multiples[" << i << "] is too big"; - return RET_ERROR; - } - out_shape.at(dims.at(i)) = input->shape().at(dims.at(i)) * (multiples.at(i)); - } - } - output->set_shape(out_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/tile.h b/mindspore/lite/src/ops/tile.h deleted file mode 100644 index 70e266d8a1..0000000000 --- a/mindspore/lite/src/ops/tile.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_TILE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_TILE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Tile : public PrimitiveC { - public: - Tile() = default; - ~Tile() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Tile, PrimitiveC); - explicit Tile(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetMultiples(const std::vector<int> &multiples); - void SetDims(const std::vector<int> &dims); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - std::vector<int> GetMultiples() const; - std::vector<int> GetDims() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_TILE_H_ diff --git a/mindspore/lite/src/ops/topk.cc b/mindspore/lite/src/ops/topk.cc deleted file mode 100644 index 5207e7d64e..0000000000 --- a/mindspore/lite/src/ops/topk.cc +++ /dev/null @@ -1,128 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/topk.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int TopK::GetK() const { return this->primitive_->value.AsTopK()->k; } -bool TopK::GetSorted() const { return this->primitive_->value.AsTopK()->sorted; } - -void TopK::SetK(int k) { this->primitive_->value.AsTopK()->k = k; } -void TopK::SetSorted(bool sorted) { this->primitive_->value.AsTopK()->sorted = sorted; } -int TopK::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_TopK; - } - if (this->primitive_->value.type != schema::PrimitiveType_TopK) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::TopKT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - // the k value of mindspore models is one of inputs instead of an attribute. - attr->k = 0; - if (prim.GetAttr("sorted") != nullptr) { - attr->sorted = GetValue<bool>(prim.GetAttr("sorted")); - } - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else - -int TopK::GetK() const { return this->primitive_->value_as_TopK()->k(); } -bool TopK::GetSorted() const { return this->primitive_->value_as_TopK()->sorted(); } -int TopK::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_TopK(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_TopK return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateTopK(*fbb, attr->k(), attr->sorted()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_TopK, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *TopKCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<TopK>(primitive); } -Registry TopKRegistry(schema::PrimitiveType_TopK, TopKCreator); - -#endif - -int TopK::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - if ((inputs_.size() != kSingleNum && inputs_.size() != kDoubleNum) || outputs_.size() != kDoubleNum) { - MS_LOG(ERROR) << "input size: " << inputs_.size() << ", output size: " << outputs_.size(); - return RET_INPUT_TENSOR_ERROR; - } - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - if (input->shape().size() == kQuadrupleNum && input->format() != schema::Format::Format_NHWC) { - MS_LOG(ERROR) << "topk only support NHWC now!"; - return RET_FORMAT_ERR; - } - auto output0 = outputs_.front(); - MS_ASSERT(output0 != nullptr); - auto output1 = outputs_.at(1); - MS_ASSERT(output1 != nullptr); - output0->set_data_type(input->data_type()); - output0->set_format(input->format()); - output1->set_data_type(kNumberTypeInt32); - output1->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto out_shape = input->shape(); - if (inputs_.size() == kSingleNum) { - out_shape.at(out_shape.size() - 1) = GetK(); - } else if (inputs_.size() == kDoubleNum) { - if (inputs_.at(1)->data_c() == nullptr) { - return RET_INFER_INVALID; - } else { - int *data = reinterpret_cast<int32_t *>(inputs_.at(1)->data_c()); - out_shape.at(out_shape.size() - 1) = *data; - } - } - if (inputs_.size() == kDoubleNum && inputs_.at(1)->data_c() != nullptr) { - out_shape.at(out_shape.size() - 1) = reinterpret_cast<int *>(inputs_.at(1)->data_c())[0]; - } - output0->set_shape(out_shape); - output1->set_shape(out_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/topk.h b/mindspore/lite/src/ops/topk.h deleted file mode 100644 index 6364002c2e..0000000000 --- a/mindspore/lite/src/ops/topk.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_TOP_K_H_ -#define LITE_MINDSPORE_LITE_C_OPS_TOP_K_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class TopK : public PrimitiveC { - public: - TopK() = default; - ~TopK() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(TopK, PrimitiveC); - explicit TopK(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetK(int k); - void SetSorted(bool sorted); - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetK() const; - bool GetSorted() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_TOP_K_H_ diff --git a/mindspore/lite/src/ops/transpose.cc b/mindspore/lite/src/ops/transpose.cc deleted file mode 100644 index 8c322eb25a..0000000000 --- a/mindspore/lite/src/ops/transpose.cc +++ /dev/null @@ -1,164 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/transpose.h" -#include <memory> -#include "include/errorcode.h" -#include "src/common/log_adapter.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> Transpose::GetPerm() const { return this->primitive_->value.AsTranspose()->perm; } -void Transpose::SetPerm(const std::vector<int> &perm) { this->primitive_->value.AsTranspose()->perm = perm; } - -int Transpose::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_Transpose; - } - if (this->primitive_->value.type != schema::PrimitiveType_Transpose) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::TransposeT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new TransposeT failed"; - return RET_ERROR; - } - MS_ASSERT(inputs.size() == kAnfPopulaterInputNumTwo); - auto inputNode = inputs[kAnfPopulaterInputNumOne]; - if (inputNode->isa<ValueNode>()) { - auto valNode = inputNode->cast<ValueNodePtr>(); - MS_ASSERT(valNode != nullptr); - auto val = valNode->value(); - MS_ASSERT(val != nullptr); - if (val->isa<ValueTuple>()) { - auto tuple = val->cast<ValueTuplePtr>(); - MS_ASSERT(tuple != nullptr); - for (size_t i = 0; i < tuple->size(); i++) { - auto elem = tuple->value().at(i); - MS_ASSERT(elem != nullptr); - attr->perm.emplace_back(CastToInt(elem).front()); - } - } - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - } - return RET_OK; -} - -#else - -std::vector<int> Transpose::GetPerm() const { - auto fb_vector = this->primitive_->value_as_Transpose()->perm(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} - -int Transpose::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Transpose(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Transpose return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> perm; - if (attr->perm() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->perm()->size()); i++) { - perm.push_back(attr->perm()->data()[i]); - } - } - - auto val_offset = schema::CreateTransposeDirect(*fbb, &perm, attr->conjugate()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Transpose, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *TransposeCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<Transpose>(primitive); -} -Registry TransposeRegistry(schema::PrimitiveType_Transpose, TransposeCreator); - -#endif - -int Transpose::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - auto input = inputs_.front(); - auto output = outputs_.front(); - MS_ASSERT(input != nullptr); - MS_ASSERT(output != nullptr); - - std::vector<int> perm = GetPerm(); - if (inputs_.size() == kDoubleNum) { - auto input_perm = inputs_.at(1); - MS_ASSERT(input_perm != nullptr); - if (input_perm->data_c() == nullptr) { - return RET_INFER_INVALID; - } - int *perm_data = reinterpret_cast<int *>(input_perm->data_c()); - perm = std::vector<int>{perm_data, perm_data + input_perm->ElementsNum()}; - } - std::vector<int> nchw2nhwc_perm = {0, 2, 3, 1}; - std::vector<int> nhwc2nchw_perm = {0, 3, 1, 2}; - std::vector<int> in_shape = input->shape(); - - output->set_data_type(input->data_type()); - if (input->format() == schema::Format::Format_NCHW && perm == nchw2nhwc_perm) { - output->set_format(schema::Format::Format_NHWC); - } else if (input->format() == schema::Format::Format_NHWC && perm == nhwc2nchw_perm) { - output->set_format(schema::Format::Format_NCHW); - } else { - output->set_format(input->format()); - } - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - if (in_shape.size() != 4 && perm.size() == 4) { - output->set_shape(in_shape); - return RET_OK; - } - std::vector<int> out_shape; - out_shape.resize(perm.size()); - for (size_t i = 0; i < perm.size(); ++i) { - out_shape.at(i) = in_shape.at(perm.at(i)); - } - if (perm.empty()) { - auto shape_size = in_shape.size(); - out_shape.resize(shape_size); - for (size_t i = 0; i < shape_size; ++i) { - out_shape[shape_size - i - 1] = in_shape[i]; - } - } - output->set_shape(out_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/transpose.h b/mindspore/lite/src/ops/transpose.h deleted file mode 100644 index adb5be37a8..0000000000 --- a/mindspore/lite/src/ops/transpose.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_TRANSPOSE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_TRANSPOSE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Transpose : public PrimitiveC { - public: - Transpose() = default; - ~Transpose() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Transpose, PrimitiveC); - explicit Transpose(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - void SetPerm(const std::vector<int> &perm); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - std::vector<int> GetPerm() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_TRANSPOSE_H_ diff --git a/mindspore/lite/src/ops/tuple_get_item.cc b/mindspore/lite/src/ops/tuple_get_item.cc deleted file mode 100644 index 2e4c0925a2..0000000000 --- a/mindspore/lite/src/ops/tuple_get_item.cc +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/tuple_get_item.h" -#include <vector> -#include <memory> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int TupleGetItem::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_TupleGetItem; - } - if (this->primitive_->value.type != schema::PrimitiveType_TupleGetItem) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::TupleGetItemT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int TupleGetItem::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto val_offset = schema::CreateTupleGetItem(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_TupleGetItem, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *TupleGetItemCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<TupleGetItem>(primitive); -} -Registry TupleGetItemRegistry(schema::PrimitiveType_TupleGetItem, TupleGetItemCreator); -#endif -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/tuple_get_item.h b/mindspore/lite/src/ops/tuple_get_item.h deleted file mode 100644 index eb4f8472fd..0000000000 --- a/mindspore/lite/src/ops/tuple_get_item.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_SRC_OPS_TUPLE_GET_ITEM_H_ -#define LITE_MINDSPORE_LITE_SRC_OPS_TUPLE_GET_ITEM_H_ - -#include <vector> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class TupleGetItem : public PrimitiveC { - public: - TupleGetItem() = default; - ~TupleGetItem() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(TupleGetItem, PrimitiveC); - explicit TupleGetItem(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_SRC_OPS_TUPLE_GET_ITEM_H_ diff --git a/mindspore/lite/src/ops/uniform_real.cc b/mindspore/lite/src/ops/uniform_real.cc deleted file mode 100644 index 34fd0da45c..0000000000 --- a/mindspore/lite/src/ops/uniform_real.cc +++ /dev/null @@ -1,101 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/uniform_real.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int UniformReal::GetSeed() const { return this->primitive_->value.AsUniformReal()->seed; } - -int UniformReal::GetSeed2() const { return this->primitive_->value.AsUniformReal()->seed2; } - -int UniformReal::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_UniformReal; - } - if (this->primitive_->value.type != schema::PrimitiveType_UniformReal) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::UniformRealT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - - this->primitive_->value.value = attr; - if (this->primitive_->value.value == nullptr) { - MS_LOG(ERROR) << "primitive value is nullptr"; - return RET_ERROR; - } - } - return RET_OK; -} -#else -int UniformReal::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_UniformReal(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_UniformReal return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateUniformReal(*fbb, attr->seed(), attr->seed2()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_UniformReal, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -int UniformReal::GetSeed() const { return this->primitive_->value_as_UniformReal()->seed(); } - -int UniformReal::GetSeed2() const { return this->primitive_->value_as_UniformReal()->seed2(); } - -PrimitiveC *UniformRealCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<UniformReal>(primitive); -} -Registry UniformRealRegistry(schema::PrimitiveType_UniformReal, UniformRealCreator); -#endif - -int UniformReal::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - if (!infer_flag()) { - return RET_INFER_INVALID; - } - auto input_data = static_cast<int32_t *>(inputs_[0]->data_c()); - if (input_data == nullptr) { - return RET_INFER_INVALID; - } - auto input_num = inputs_[0]->ElementsNum(); - std::vector<int> output_shape(input_num); - for (int i = 0; i < input_num; i++) { - output_shape[i] = input_data[i]; - } - outputs_[0]->set_shape(output_shape); - outputs_[0]->set_data_type(kNumberTypeFloat32); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/uniform_real.h b/mindspore/lite/src/ops/uniform_real.h deleted file mode 100644 index ed5a79669a..0000000000 --- a/mindspore/lite/src/ops/uniform_real.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_UNIFORM_REAL_H_ -#define LITE_MINDSPORE_LITE_C_OPS_UNIFORM_REAL_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class UniformReal : public PrimitiveC { - public: - UniformReal() = default; - ~UniformReal() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(UniformReal, PrimitiveC); - explicit UniformReal(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetSeed() const; - int GetSeed2() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_UNIFORM_REAL_H_ diff --git a/mindspore/lite/src/ops/unique.cc b/mindspore/lite/src/ops/unique.cc deleted file mode 100644 index 758f9f7158..0000000000 --- a/mindspore/lite/src/ops/unique.cc +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/unique.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifndef PRIMITIVE_WRITEABLE -int Unique::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Unique(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Unique return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateUnique(*fbb, attr->outType()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Unique, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *UniqueCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Unique>(primitive); } -Registry UniqueRegistry(schema::PrimitiveType_Unique, UniqueCreator); -#endif - -int Unique::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - if (inputs_.size() != kSingleNum || outputs_.size() != kDoubleNum) { - MS_LOG(ERROR) << "input size: " << inputs_.size() << ", output size: " << outputs_.size(); - return RET_INPUT_TENSOR_ERROR; - } - auto &input = inputs_.at(0); - MS_ASSERT(input != nullptr); - auto &output0 = outputs_.at(0); - MS_ASSERT(output0 != nullptr); - auto &output1 = outputs_.at(1); - MS_ASSERT(output1 != nullptr); - output0->set_data_type(input->data_type()); - output1->set_data_type(kNumberTypeInt32); - output1->set_format(input->format()); - output0->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - output0->set_shape(input->shape()); - output1->set_shape(input->shape()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/unique.h b/mindspore/lite/src/ops/unique.h deleted file mode 100644 index dfbb18b89d..0000000000 --- a/mindspore/lite/src/ops/unique.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_UNIQUE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_UNIQUE_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Unique : public PrimitiveC { - public: - Unique() = default; - ~Unique() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Unique, PrimitiveC); - explicit Unique(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; - -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_UNIQUE_H_ diff --git a/mindspore/lite/src/ops/unsorted_segment_sum.cc b/mindspore/lite/src/ops/unsorted_segment_sum.cc deleted file mode 100644 index 5cab20288d..0000000000 --- a/mindspore/lite/src/ops/unsorted_segment_sum.cc +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <memory> -#include "src/ops/unsorted_segment_sum.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE - -int UnsortedSegmentSum::GetNumSegments() const { return this->primitive_->value.AsUnsortedSegmentSum()->numSegments; } - -int UnsortedSegmentSum::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitive error"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_UnsortedSegmentSum; - } - if (this->primitive_->value.type != schema::PrimitiveType_UnsortedSegmentSum) { - MS_LOG(ERROR) << "UnSortedSegmentSum primitive value type : " - << schema::EnumNamePrimitiveType(primitive_->value.type) << "is not equal" - << schema::EnumNamePrimitiveType(schema::PrimitiveType_UnsortedSegmentSum); - delete this->primitive_; - this->primitive_ = nullptr; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - std::unique_ptr<schema::UnsortedSegmentSumT> attr = std::make_unique<schema::UnsortedSegmentSumT>(); - if (inputs.at(2)->isa<ValueNode>()) { - ValuePtr value = inputs.at(2)->cast<ValueNodePtr>()->value(); - attr->numSegments = CastToInt(value).front(); - this->primitive_->value.value = attr.release(); - } - } - return RET_OK; -} -#else -int UnsortedSegmentSum::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_UnsortedSegmentSum(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_UnsortedSegmentSum return nullptr"; - return RET_ERROR; - } - int num_segments = attr->numSegments(); - auto val_offset = schema::CreateUnsortedSegmentSum(*fbb, num_segments); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_UnsortedSegmentSum, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -int UnsortedSegmentSum::GetNumSegments() const { - int ret = this->primitive_->value_as_UnsortedSegmentSum()->numSegments(); - return ret; -} - -PrimitiveC *UnsortedSegmentSumCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<UnsortedSegmentSum>(primitive); -} -Registry UnsortedSegmentSumRegistry(schema::PrimitiveType_UnsortedSegmentSum, UnsortedSegmentSumCreator); -#endif -int UnsortedSegmentSum::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - // check inputs and outputs - if (inputs_.size() != 3) { - MS_LOG(ERROR) << "invalid inputs numbers"; - return RET_ERROR; - } - if (outputs_.size() != 1) { - MS_LOG(ERROR) << "invalid outputs numbers"; - return RET_ERROR; - } - Tensor *out = outputs_.front(); - Tensor *x = inputs_.front(); - Tensor *segment_id = inputs_.at(1); - std::vector<int> x_shape = x->shape(); - std::vector<int> segment_id_shape = segment_id->shape(); - int num_segments = GetNumSegments(); - std::vector<int> output_shape; - output_shape.push_back(num_segments); - for (int index = segment_id_shape.size(); index < static_cast<int>(x_shape.size()); index++) { - output_shape.push_back(x_shape.at(index)); - } - out->set_shape(output_shape); - out->set_format(x->format()); - out->set_data_type(x->data_type()); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/unsorted_segment_sum.h b/mindspore/lite/src/ops/unsorted_segment_sum.h deleted file mode 100644 index 3524c67649..0000000000 --- a/mindspore/lite/src/ops/unsorted_segment_sum.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <vector> -#include <set> -#include <cmath> -#include "src/ops/primitive_c.h" -#ifndef LITE_SRC_OPS_UNSORTED_SEGMENT_SUM_H_ -#define LITE_SRC_OPS_UNSORTED_SEGMENT_SUM_H_ -namespace mindspore { -namespace lite { -class UnsortedSegmentSum : public PrimitiveC { - public: - UnsortedSegmentSum() = default; - ~UnsortedSegmentSum() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(UnsortedSegmentSum, PrimitiveC); - explicit UnsortedSegmentSum(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - int GetNumSegments() const; -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; - - int GetNumSegments() const; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore -#endif // LITE_SRC_OPS_UNSORTED_SEGMENT_SUM_H_ diff --git a/mindspore/lite/src/ops/unsqueeze.cc b/mindspore/lite/src/ops/unsqueeze.cc deleted file mode 100644 index dbeb8b470e..0000000000 --- a/mindspore/lite/src/ops/unsqueeze.cc +++ /dev/null @@ -1,116 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/unsqueeze.h" -#include "include/errorcode.h" -#include "src/common/log_adapter.h" -#include "src/tensor.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<int> Unsqueeze::GetAxis() const { return this->primitive_->value.AsUnsqueeze()->axis; } - -void Unsqueeze::SetAxis(const std::vector<int> &axis) { this->primitive_->value.AsUnsqueeze()->axis = axis; } - -#else -bool predicate(int n) { return n != 1; } -std::vector<int> Unsqueeze::GetAxis() const { - auto fb_vector = this->primitive_->value_as_Unsqueeze()->axis(); - return std::vector<int>(fb_vector->begin(), fb_vector->end()); -} -int Unsqueeze::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Unsqueeze(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Unsqueeze return nullptr"; - return RET_ERROR; - } - std::vector<int32_t> axis; - if (attr->axis() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->axis()->size()); i++) { - axis.push_back(attr->axis()->data()[i]); - } - } - auto val_offset = schema::CreateUnsqueezeDirect(*fbb, &axis); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Unsqueeze, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *UnsqueezeCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<Unsqueeze>(primitive); -} -Registry UnsqueezeRegistry(schema::PrimitiveType_Unsqueeze, UnsqueezeCreator); - -#endif - -int Unsqueeze::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (inputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "input size is invalid"; - } - if (outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "output size is invalid"; - } - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - - auto dims = GetAxis(); - auto in_shape = input->shape(); - auto in_rank = in_shape.size(); - auto dim_rank = GetAxis().size(); - std::vector<int> out_shape; - if (dim_rank == 0) { - for (auto d : in_shape) { - if (d != 1) { - out_shape.push_back(d); - } - } - } else { - auto sz = in_rank + dim_rank; - size_t in_itr = 0; - size_t ax_itr = 0; - for (size_t i = 0; i < sz; i++) { - if (ax_itr < dim_rank && dims.at(ax_itr) == static_cast<int>(i)) { - out_shape.emplace_back(1); - ax_itr++; - } else if (ax_itr < dim_rank && dims.at(ax_itr) + sz == i) { - out_shape.emplace_back(1); - ax_itr++; - } else { - out_shape.emplace_back(in_shape.at(in_itr)); - in_itr++; - } - } - } - output->set_shape(out_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/unsqueeze.h b/mindspore/lite/src/ops/unsqueeze.h deleted file mode 100644 index 927417a226..0000000000 --- a/mindspore/lite/src/ops/unsqueeze.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_UNSQUEEZE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_UNSQUEEZE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Unsqueeze : public PrimitiveC { - public: - Unsqueeze() = default; - ~Unsqueeze() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Unsqueeze, PrimitiveC); - explicit Unsqueeze(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAxis(const std::vector<int> &axis); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - std::vector<int> GetAxis() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_UNSQUEEZE_H_ diff --git a/mindspore/lite/src/ops/unstack.cc b/mindspore/lite/src/ops/unstack.cc deleted file mode 100644 index 7913476f25..0000000000 --- a/mindspore/lite/src/ops/unstack.cc +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/unstack.h" -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -int Unstack::GetAxis() const { return this->primitive_->value.AsUnstack()->axis; } - -void Unstack::SetAxis(int axis) { this->primitive_->value.AsUnstack()->axis = axis; } - -#else - -int Unstack::GetAxis() const { return this->primitive_->value_as_Unstack()->axis(); } -int Unstack::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Unstack(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Unstack return nullptr"; - return RET_ERROR; - } - auto val_offset = schema::CreateUnstack(*fbb, attr->num(), attr->axis()); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Unstack, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *UnstackCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Unstack>(primitive); } -Registry UnstackRegistry(schema::PrimitiveType_Unstack, UnstackCreator); -#endif - -int Unstack::InferShape(std::vector<Tensor *> inputs, std::vector<Tensor *> outputs) { - auto input = inputs.at(0); - MS_ASSERT(input != nullptr); - auto input_shape = input->shape(); - - auto axis = GetAxis() < 0 ? GetAxis() + input_shape.size() : GetAxis(); - if (axis < 0 || axis >= input_shape.size()) { - MS_LOG(ERROR) << "Invalid axis " << GetAxis(); - return RET_PARAM_INVALID; - } - for (auto &out : outputs) { - MS_ASSERT(out != nullptr); - out->set_data_type(input->data_type()); - out->set_format(input->format()); - } - if (!infer_flag()) { - return RET_INFER_INVALID; - } - std::vector<int> output_shape; - for (size_t i = 0; i < input_shape.size(); ++i) { - if (i != axis) { - output_shape.push_back(input_shape.at(i)); - } - } - for (auto &out : outputs) { - MS_ASSERT(out != nullptr); - out->set_shape(output_shape); - } - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/unstack.h b/mindspore/lite/src/ops/unstack.h deleted file mode 100644 index 9dd73df784..0000000000 --- a/mindspore/lite/src/ops/unstack.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_UNSTACK_H_ -#define LITE_MINDSPORE_LITE_C_OPS_UNSTACK_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Unstack : public PrimitiveC { - public: - Unstack() = default; - ~Unstack() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Unstack, PrimitiveC); - explicit Unstack(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetAxis(int axis); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetAxis() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_UNSTACK_H_ diff --git a/mindspore/lite/src/ops/upsample.cc b/mindspore/lite/src/ops/upsample.cc deleted file mode 100644 index 913c968ef7..0000000000 --- a/mindspore/lite/src/ops/upsample.cc +++ /dev/null @@ -1,102 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/upsample.h" -#include <string> - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::string Upsample::GetMode() const { return this->primitive_->value.AsUpsample()->mode; } -std::vector<float> Upsample::GetScales() const { return this->primitive_->value.AsUpsample()->scales; } - -void Upsample::SetMode(std::string mode) { this->primitive_->value.AsUpsample()->mode = mode; } -void Upsample::SetScales(const std::vector<float> &scales) { this->primitive_->value.AsUpsample()->scales = scales; } - -#else - -std::string Upsample::GetMode() const { return this->primitive_->value_as_Upsample()->mode()->str(); } -std::vector<float> Upsample::GetScales() const { - auto fb_vector = this->primitive_->value_as_Upsample()->scales(); - return std::vector<float>(fb_vector->begin(), fb_vector->end()); -} -int Upsample::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Upsample(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Upsample return nullptr"; - return RET_ERROR; - } - std::vector<float> scales; - if (attr->scales() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->scales()->size()); i++) { - scales.push_back(attr->scales()->data()[i]); - } - } - auto val_offset = schema::CreateUpsampleDirect(*fbb, attr->mode()->c_str(), &scales); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Upsample, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} -PrimitiveC *UpsampleCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<Upsample>(primitive); -} -Registry UpsampleRegistry(schema::PrimitiveType_Upsample, UpsampleCreator); - -#endif -int Upsample::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - auto input_tensor = inputs_.at(0); - MS_ASSERT(input_tensor); - auto input_shape = input_tensor->shape(); - if (input_shape.size() != 4) { - MS_LOG(ERROR) << "Upsample InferShape input tensor rank should be 4"; - return RET_INFER_ERR; - } - auto scale_tensor = inputs_.at(1); - MS_ASSERT(scale_tensor); - auto scale_shape = scale_tensor->shape(); - if (scale_shape.size() != 1 && scale_shape.at(0) != 4) { - MS_LOG(ERROR) << "Upsample scale tensor shape should be 4"; - return RET_INFER_ERR; - } - auto scale = reinterpret_cast<float *>(scale_tensor->data_c()); - if (scale == nullptr) { - MS_LOG(ERROR) << "Upsample scale data nullptr"; - return RET_INFER_INVALID; - } - - std::vector<int> out_shape = input_shape; // n, h, w, c; n, c not changed, h = floor(input_h * scale_h). - int new_height = static_cast<int>(floor(input_shape.at(1) * scale[1])); - MS_ASSERT(new_height > 0); - int new_width = static_cast<int>(floor(input_shape.at(2) * scale[2])); - MS_ASSERT(new_width > 0); - out_shape.at(1) = new_height; - out_shape.at(2) = new_width; - - auto out_tensor = outputs_.at(0); - MS_ASSERT(out_tensor); - out_tensor->set_shape(out_shape); - out_tensor->set_data_type(input_tensor->data_type()); - return RET_OK; -} - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/upsample.h b/mindspore/lite/src/ops/upsample.h deleted file mode 100644 index dcd08863fc..0000000000 --- a/mindspore/lite/src/ops/upsample.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_UPSAMPLE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_UPSAMPLE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <string> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Upsample : public PrimitiveC { - public: - Upsample() = default; - ~Upsample() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Upsample, PrimitiveC); - explicit Upsample(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetMode(std::string mode); - void SetScales(const std::vector<float> &scales); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; - -#endif - std::string GetMode() const; - std::vector<float> GetScales() const; - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_UPSAMPLE_H_ diff --git a/mindspore/lite/src/ops/where.cc b/mindspore/lite/src/ops/where.cc deleted file mode 100644 index 95029d851d..0000000000 --- a/mindspore/lite/src/ops/where.cc +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/where.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE -std::vector<bool> Where::GetCondition() const { return this->primitive_->value.AsWhere()->condition; } - -void Where::SetCondition(const std::vector<bool> &condition) { - this->primitive_->value.AsWhere()->condition = condition; -} - -#else - -std::vector<bool> Where::GetCondition() const { - auto fb_vector = this->primitive_->value_as_Where()->condition(); - return std::vector<bool>(fb_vector->begin(), fb_vector->end()); -} -int Where::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_Where(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_Where return nullptr"; - return RET_ERROR; - } - std::vector<uint8_t> condition; - if (attr->condition() != nullptr) { - for (int i = 0; i < static_cast<int>(attr->condition()->size()); i++) { - condition.push_back(attr->condition()->data()[i]); - } - } - auto val_offset = schema::CreateWhereDirect(*fbb, &condition); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_Where, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *WhereCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<Where>(primitive); } -Registry WhereRegistry(schema::PrimitiveType_Where, WhereCreator); - -#endif - -int Where::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - // Need to dynamically allocate at runtime. - if (inputs_.size() == kSingleNum) { - return RET_INFER_INVALID; - } - - if (inputs_.size() < kTripleNum || outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "where input or output number invalid, Input size:" << inputs_.size() - << ", output size: " << outputs_.size(); - return RET_INPUT_TENSOR_ERROR; - } - - auto input0 = inputs_.at(0); - auto input1 = inputs_.at(1); - auto input2 = inputs_.at(2); - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - int num = input0->ElementsNum(); - int num1 = input1->ElementsNum(); - int num2 = input2->ElementsNum(); - int nummax = num > num1 ? num : (num1 > num2 ? num1 : num2); - auto shape_tmp = inputs_.at(0)->shape(); - auto shape_tmp1 = inputs_.at(1)->shape(); - auto shape_tmp2 = inputs_.at(2)->shape(); - int axisout = 0; - size_t temp = 0; - for (size_t j = 0; j < shape_tmp.size(); j++) { - if (shape_tmp.at(j) == shape_tmp1.at(j) && shape_tmp.at(j) != shape_tmp2.at(j)) { - axisout = j; - break; - } - if (shape_tmp.at(j) == shape_tmp2.at(j) && shape_tmp.at(j) != shape_tmp1.at(j)) { - axisout = j; - break; - } - if (shape_tmp1.at(j) == shape_tmp2.at(j) && shape_tmp.at(j) != shape_tmp1.at(j)) { - axisout = j; - break; - } - temp += 1; - if (temp == shape_tmp.size()) { - outputs_.at(0)->set_shape(shape_tmp); - output->set_data_type(input->data_type()); - return RET_OK; - } - } - auto output_shape = shape_tmp; - output_shape.at(axisout) = nummax; - outputs_.at(0)->set_shape(output_shape); - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/where.h b/mindspore/lite/src/ops/where.h deleted file mode 100644 index 5976ce9ccd..0000000000 --- a/mindspore/lite/src/ops/where.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_WHERE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_WHERE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class Where : public PrimitiveC { - public: - Where() = default; - ~Where() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(Where, PrimitiveC); - explicit Where(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - void SetCondition(const std::vector<bool> &condition); -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - std::vector<bool> GetCondition() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_WHERE_H_ diff --git a/mindspore/lite/src/ops/while.cc b/mindspore/lite/src/ops/while.cc deleted file mode 100644 index 60b1537942..0000000000 --- a/mindspore/lite/src/ops/while.cc +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/while.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { -#ifdef PRIMITIVE_WRITEABLE - -void While::SetCondSubgraphIndex(const int cond_subgraph_index) { - this->primitive_->value.AsWhile()->condSubgraphIndex = cond_subgraph_index; -} -void While::SetBodySubgraphIndex(const int body_subgraph_index) { - this->primitive_->value.AsWhile()->bodySubgraphIndex = body_subgraph_index; -} - -int While::GetCondSubgraphIndex() const { return this->primitive_->value.AsWhile()->condSubgraphIndex; } -int While::GetBodySubgraphIndex() const { return this->primitive_->value.AsWhile()->bodySubgraphIndex; } - -int While::UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) { - if (this->primitive_ == nullptr) { - this->primitive_ = new (std::nothrow) schema::PrimitiveT; - if (this->primitive_ == nullptr) { - MS_LOG(ERROR) << "new primitiveT failed"; - return RET_ERROR; - } - this->primitive_->value.type = schema::PrimitiveType_While; - } - if (this->primitive_->value.type != schema::PrimitiveType_While) { - MS_LOG(ERROR) << "Primitive type is error :" << this->primitive_->value.type; - return RET_ERROR; - } - if (this->primitive_->value.value == nullptr) { - auto attr = new (std::nothrow) schema::WhileT(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new primitiveT value failed"; - return RET_ERROR; - } - attr->bodySubgraphIndex = GetValue<bool>(prim.GetAttr("body_subgraph_index")); - attr->condSubgraphIndex = GetValue<bool>(prim.GetAttr("cond_subgraph_index")); - this->primitive_->value.value = attr; - } - return RET_OK; -} - -#else - -int While::GetCondSubgraphIndex() const { return this->primitive_->value_as_While()->condSubgraphIndex(); } -int While::GetBodySubgraphIndex() const { return this->primitive_->value_as_While()->bodySubgraphIndex(); } - -int While::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - auto attr = primitive->value_as_While(); - if (attr == nullptr) { - MS_LOG(ERROR) << "value_as_While return nullptr"; - return RET_ERROR; - } - auto cond_subgraph_index = attr->condSubgraphIndex(); - auto body_subgraph_index = attr->bodySubgraphIndex(); - auto val_offset = schema::CreateWhile(*fbb, body_subgraph_index, cond_subgraph_index); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_While, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *WhileCreator(const schema::Primitive *primitive) { return PrimitiveC::NewPrimitiveC<While>(primitive); } -Registry WhileRegistry(schema::PrimitiveType_While, WhileCreator); - -#endif - -int While::InferShape(std::vector<Tensor *> inputs_, std::vector<Tensor *> outputs_) { - if (inputs_.size() != outputs_.size()) { - MS_LOG(ERROR) << "The number of inputs and outputs varies"; - return RET_ERROR; - } - for (size_t i = 0; i < inputs_.size(); i++) { - outputs_.at(i)->set_data_type(inputs_.at(i)->data_type()); - outputs_.at(i)->set_format(inputs_.at(i)->format()); - outputs_.at(i)->set_shape(inputs_.at(i)->shape()); - } - - return RET_OK; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/while.h b/mindspore/lite/src/ops/while.h deleted file mode 100644 index 113cb121e6..0000000000 --- a/mindspore/lite/src/ops/while.h +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright 2019-2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_WHILE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_WHILE_H_ - -#include <vector> -#include <set> -#include <cmath> -#include <memory> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class While : public PrimitiveC { - public: - While() = default; - ~While() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(While, PrimitiveC); - explicit While(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} - int UnPackAttr(const Primitive &prim, const std::vector<AnfNodePtr> &inputs) override; - void SetCondSubgraphIndex(const int cond_subgraph_index); - void SetBodySubgraphIndex(const int body_subgraph_index); - -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; - int GetCondSubgraphIndex() const; - int GetBodySubgraphIndex() const; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_WHERE_H_ diff --git a/mindspore/lite/src/ops/zeros_like.cc b/mindspore/lite/src/ops/zeros_like.cc deleted file mode 100644 index 9e1b656ffc..0000000000 --- a/mindspore/lite/src/ops/zeros_like.cc +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/ops/zeros_like.h" - -#ifndef PRIMITIVE_WRITEABLE -#include "src/ops/ops_register.h" -#endif - -namespace mindspore { -namespace lite { - -#ifdef PRIMITIVE_WRITEABLE -#else -int ZerosLike::UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) { - MS_ASSERT(nullptr != primitive); - MS_ASSERT(nullptr != fbb); - - auto val_offset = schema::CreateZerosLike(*fbb); - auto prim_offset = schema::CreatePrimitive(*fbb, schema::PrimitiveType_ZerosLike, val_offset.o); - fbb->Finish(prim_offset); - return RET_OK; -} - -PrimitiveC *ZerosLikeCreator(const schema::Primitive *primitive) { - return PrimitiveC::NewPrimitiveC<ZerosLike>(primitive); -} -Registry ZerosLikeRegistry(schema::PrimitiveType_ZerosLike, ZerosLikeCreator); - -#endif - -int ZerosLike::InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) { - MS_ASSERT(this->primitive_ != nullptr); - auto input = inputs_.front(); - MS_ASSERT(input != nullptr); - auto output = outputs_.front(); - MS_ASSERT(output != nullptr); - if (inputs_.size() != kSingleNum || outputs_.size() != kSingleNum) { - MS_LOG(ERROR) << "zeroslike input or output number invalid, Input size:" << inputs_.size() - << ", output size: " << outputs_.size(); - return RET_INPUT_TENSOR_ERROR; - } - output->set_data_type(input->data_type()); - output->set_format(input->format()); - if (!infer_flag()) { - return RET_INFER_INVALID; - } - output->set_shape(input->shape()); - return RET_OK; -} - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/src/ops/zeros_like.h b/mindspore/lite/src/ops/zeros_like.h deleted file mode 100644 index 199598cfd7..0000000000 --- a/mindspore/lite/src/ops/zeros_like.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef LITE_MINDSPORE_LITE_C_OPS_ZEROS_LIKE_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ZEROS_LIKE_H_ - -#include <vector> -#include <set> -#include <cmath> - -#include "src/ops/primitive_c.h" - -namespace mindspore { -namespace lite { -class ZerosLike : public PrimitiveC { - public: - ZerosLike() = default; - ~ZerosLike() = default; -#ifdef PRIMITIVE_WRITEABLE - MS_DECLARE_PARENT(ZerosLike, PrimitiveC); - explicit ZerosLike(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} -#else - int UnPackToFlatBuilder(const schema::Primitive *primitive, flatbuffers::FlatBufferBuilder *fbb) override; -#endif - int InferShape(std::vector<lite::Tensor *> inputs_, std::vector<lite::Tensor *> outputs_) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // LITE_MINDSPORE_LITE_C_OPS_ZEROS_LIKE_H_ diff --git a/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.cc b/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.cc index e6982780d1..704adc3fab 100644 --- a/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.cc +++ b/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -119,7 +119,7 @@ int ConverterToNPUEltwiseMode(schema::EltwiseMode mode) { mode_num = 2; break; default: - MS_LOG(ERROR) << "Unsupport Eltwise mode."; + MS_LOG(ERROR) << "Unsupported Eltwise mode."; } return mode_num; } diff --git a/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.h b/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.h index 19bfcac060..32b63f0928 100644 --- a/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.h +++ b/mindspore/lite/src/runtime/agent/npu/npu_converter_utils.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/src/runtime/agent/npu/npu_executor.cc b/mindspore/lite/src/runtime/agent/npu/npu_executor.cc index e0150316a7..b88f5df9f8 100644 --- a/mindspore/lite/src/runtime/agent/npu/npu_executor.cc +++ b/mindspore/lite/src/runtime/agent/npu/npu_executor.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/src/runtime/agent/npu/npu_executor.h b/mindspore/lite/src/runtime/agent/npu/npu_executor.h index b6d107f347..5ecdb94e8f 100644 --- a/mindspore/lite/src/runtime/agent/npu/npu_executor.h +++ b/mindspore/lite/src/runtime/agent/npu/npu_executor.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/src/runtime/agent/npu/npu_manager.cc b/mindspore/lite/src/runtime/agent/npu/npu_manager.cc index 27bfa48b9a..7f0092ac3c 100644 --- a/mindspore/lite/src/runtime/agent/npu/npu_manager.cc +++ b/mindspore/lite/src/runtime/agent/npu/npu_manager.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/src/runtime/agent/npu/npu_manager.h b/mindspore/lite/src/runtime/agent/npu/npu_manager.h index c06dc006af..776a6534ef 100644 --- a/mindspore/lite/src/runtime/agent/npu/npu_manager.h +++ b/mindspore/lite/src/runtime/agent/npu/npu_manager.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,9 +28,8 @@ namespace mindspore::lite { static std::set<mindspore::schema::PrimitiveType> npu_trans_nodes = { - schema::PrimitiveType_Conv2D, schema::PrimitiveType_DeConv2D, - schema::PrimitiveType_DepthwiseConv2D, schema::PrimitiveType_DeDepthwiseConv2D, - schema::PrimitiveType_Resize, schema::PrimitiveType_Pooling}; + schema::PrimitiveType_Conv2DFusion, schema::PrimitiveType_Conv2dTransposeFusion, schema::PrimitiveType_Resize, + schema::PrimitiveType_MaxPoolFusion, schema::PrimitiveType_AvgPoolFusion}; struct SubGraphModel { public: SubGraphModel(int index, std::string model_name, domi::ModelBufferData *model_buffer_data) diff --git a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_base_pass.h b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_base_pass.h index 5087cad99d..2ba4975f36 100644 --- a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_base_pass.h +++ b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_base_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_fusion_pass.cc b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_fusion_pass.cc index aba0a415a3..42777037ad 100644 --- a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_fusion_pass.cc +++ b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_fusion_pass.cc @@ -267,7 +267,7 @@ int NPUFusionPass::Run() { i -= kernel->in_kernels().size(); ConcatFusion(kernel); continue; - case schema::PrimitiveType_Add: + case schema::PrimitiveType_AddFusion: case schema::PrimitiveType_Activation: case schema::PrimitiveType_Eltwise: i -= kernel->in_kernels().size(); diff --git a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_fusion_pass.h b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_fusion_pass.h index f895b66dac..2196263972 100644 --- a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_fusion_pass.h +++ b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ #define MINDSPORE_LITE_SRC_RUNTIME_AGENT_NPU_OPTIMIZER_NPU_FUSION_PASS_H_ #include <vector> #include "src/lite_kernel.h" -#include "src/ops/primitive_c.h" #include "src/runtime/agent/npu/optimizer/npu_base_pass.h" namespace mindspore::lite { class NPUFusionPass : public NPUBasePass { diff --git a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_insert_transform_pass.cc b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_insert_transform_pass.cc index 83785055c8..3277380fc1 100644 --- a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_insert_transform_pass.cc +++ b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_insert_transform_pass.cc @@ -21,9 +21,9 @@ namespace mindspore::lite { using kernel::KERNEL_ARCH::kNPU; enum InsertState { InsertNone, PreInsert, PostInsert, BothInsert }; -std::set<mindspore::schema::PrimitiveType> npu_insert_nodes = {schema::PrimitiveType_Concat, schema::PrimitiveType_Add, - schema::PrimitiveType_Eltwise, - schema::PrimitiveType_Activation}; +std::set<mindspore::schema::PrimitiveType> npu_insert_nodes = { + schema::PrimitiveType_Concat, schema::PrimitiveType_AddFusion, schema::PrimitiveType_Eltwise, + schema::PrimitiveType_Activation}; // this pass goal is to minimize subgraphs generated // by inserting nchw2nhwc or nhwc2nchw before or after the operator (e.g. concat, add, etc..) together with // fusion pass. If transpose inserted are more than half of input output, we will insert remaining input @@ -138,14 +138,32 @@ int NPUInsertTransformPass::InsertNode(kernel::LiteKernel *kernel, kernel::LiteK auto *nh2nc_kernel = NPUPassUtils::CreateNhwc2NchwKernel({in_tensor}, nh2nc_tensors, context_, nh2nc_name); trans_kernels->push_back(nh2nc_kernel); - insert_primitive_.push_back(nh2nc_kernel->GetPrimitive()); auto *nc2nh_kernel = NPUPassUtils::CreateNchw2NhwcKernel(nh2nc_tensors, nc2nh_tensors, context_, nc2nh_name); trans_kernels->push_back(nc2nh_kernel); - insert_primitive_.push_back(nc2nh_kernel->GetPrimitive()); - NPUPassUtils::UpdateKernel(nh2nc_kernel, in_kernels, {nc2nh_kernel}, {in_tensor}, nh2nc_tensors); - NPUPassUtils::UpdateKernel(nc2nh_kernel, {nh2nc_kernel}, out_kernels, nh2nc_tensors, nc2nh_tensors); + auto nh2nc_perm_tensor = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, Tensor::CONST_TENSOR); + auto nh2nc_data = nh2nc_perm_tensor->MutableData(); + if (nh2nc_data == nullptr) { + return RET_ERROR; + } + std::vector<int> nh2nc_perm_vector = {0, 3, 1, 2}; + memcpy(nh2nc_data, nh2nc_perm_vector.data(), 4 * sizeof(int)); + all_tensors_->push_back(nh2nc_perm_tensor); + + auto nc2nh_perm_tensor = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, Tensor::CONST_TENSOR); + auto nc2nh_data = nc2nh_perm_tensor->MutableData(); + if (nc2nh_data == nullptr) { + return RET_ERROR; + } + + std::vector<int> nc2nh_perm_vector = {0, 2, 3, 1}; + memcpy(nc2nh_data, nc2nh_perm_vector.data(), 4 * sizeof(int)); + all_tensors_->push_back(nc2nh_perm_tensor); + + NPUPassUtils::UpdateKernel(nh2nc_kernel, in_kernels, {nc2nh_kernel}, {in_tensor, nh2nc_perm_tensor}, nh2nc_tensors); + NPUPassUtils::UpdateKernel(nc2nh_kernel, {nh2nc_kernel}, out_kernels, {nh2nc_tensors[0], nc2nh_perm_tensor}, + nc2nh_tensors); if (kernel != nullptr) { NPUPassUtils::UpdateNH2NCTransNodePreKernel(kernel, nh2nc_kernel, post_kernel); } diff --git a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_insert_transform_pass.h b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_insert_transform_pass.h index adc2d09027..32cdee6699 100644 --- a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_insert_transform_pass.h +++ b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_insert_transform_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ #define MINDSPORE_LITE_SRC_RUNTIME_AGENT_NPU_OPTIMIZER_NPU_INSERT_TRANSFORM_PASS_H_ #include <vector> #include "src/lite_kernel.h" -#include "src/ops/primitive_c.h" #include "src/runtime/agent/npu/optimizer/npu_base_pass.h" namespace mindspore::lite { @@ -32,12 +31,6 @@ class NPUInsertTransformPass : public NPUBasePass { name_ = "NPUInsertTransformPass"; } - ~NPUInsertTransformPass() override { - for (auto primitive : insert_primitive_) { - delete primitive; - } - insert_primitive_.clear(); - } int Run() override; private: @@ -58,7 +51,6 @@ class NPUInsertTransformPass : public NPUBasePass { const InnerContext *context_; std::vector<kernel::LiteKernel *> *all_kernels_; std::vector<Tensor *> *all_tensors_; - std::vector<const PrimitiveC *> insert_primitive_; }; } // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_AGENT_NPU_OPTIMIZER_NPU_INSERT_TRANSFORM_PASS_H_ diff --git a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_manager.cc b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_manager.cc index ab555c599d..eecb62252c 100644 --- a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_manager.cc +++ b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_manager.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_manager.h b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_manager.h index 945c3a65b5..e370d144ce 100644 --- a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_manager.h +++ b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_manager.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_utils.cc b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_utils.cc index 139df3216d..000ff411bc 100644 --- a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_utils.cc +++ b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_utils.cc @@ -16,7 +16,6 @@ #include "src/runtime/agent/npu/optimizer/npu_pass_utils.h" #include "src/runtime/agent/npu/npu_manager.h" -#include "src/ops/transpose.h" #include "nnacl/transpose.h" #include "src/ops/populate/populate_register.h" #include "src/runtime/kernel/arm/fp32/transpose_fp32.h" @@ -24,50 +23,26 @@ namespace mindspore::lite { using kernel::KERNEL_ARCH::kCPU; using kernel::KERNEL_ARCH::kNPU; -PrimitiveC *NPUPassUtils::CreateTransposePrimitive() { - flatbuffers::FlatBufferBuilder fbb(1024); - auto val_offset = schema::CreateNchw2Nhwc(fbb); - auto prim_offset = schema::CreatePrimitive(fbb, schema::PrimitiveType_Transpose, val_offset.o); - fbb.Finish(prim_offset); - auto buf = fbb.GetBufferPointer(); - if (buf == nullptr) { - MS_LOG(ERROR) << "GetBufferPointer return nullptr"; - fbb.Clear(); - return nullptr; - } - auto primitive_buf = reinterpret_cast<char *>(malloc(fbb.GetSize())); - if (primitive_buf == nullptr) { - MS_LOG(ERROR) << "Malloc primitive buffer failed."; - fbb.Clear(); - return nullptr; - } - memcpy(primitive_buf, buf, fbb.GetSize()); - auto *primitive = PrimitiveC::NewPrimitiveC<Transpose>(flatbuffers::GetRoot<schema::Primitive>(primitive_buf)); - free(primitive_buf); - fbb.Clear(); - return primitive; -} kernel::LiteKernel *NPUPassUtils::CreateNchw2NhwcKernel(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors, const InnerContext *ctx, const std::string &name) { kernel::KernelKey key{kCPU, kNumberTypeFloat32, schema::PrimitiveType_Transpose}; - auto nchw2nhwc_primitive = CreateTransposePrimitive(); auto *transpose_param = reinterpret_cast<TransposeParameter *>(malloc(sizeof(TransposeParameter))); if (transpose_param == nullptr) { MS_LOG(ERROR) << "malloc TransposeParameter failed."; return nullptr; } memset(transpose_param, 0, sizeof(TransposeParameter)); - transpose_param->op_parameter_.type_ = nchw2nhwc_primitive->Type(); + transpose_param->op_parameter_.type_ = schema::PrimitiveType_Transpose; transpose_param->perm_[0] = 0; transpose_param->perm_[1] = 2; transpose_param->perm_[2] = 3; transpose_param->perm_[3] = 1; transpose_param->num_axes_ = 4; - auto kernel = new (std::nothrow) kernel::TransposeCPUKernel(reinterpret_cast<OpParameter *>(transpose_param), - in_tensors, out_tensors, ctx, nchw2nhwc_primitive); + auto kernel = new (std::nothrow) + kernel::TransposeCPUKernel(reinterpret_cast<OpParameter *>(transpose_param), in_tensors, out_tensors, ctx); if (kernel != nullptr) { kernel->set_desc(key); } else { @@ -83,22 +58,21 @@ kernel::LiteKernel *NPUPassUtils::CreateNhwc2NchwKernel(const std::vector<Tensor const std::vector<Tensor *> &out_tensors, const InnerContext *ctx, const std::string &name) { kernel::KernelKey key{kCPU, kNumberTypeFloat32, schema::PrimitiveType_Transpose}; - auto nhwc2nchw_primitive = CreateTransposePrimitive(); auto *transpose_param = reinterpret_cast<TransposeParameter *>(malloc(sizeof(TransposeParameter))); if (transpose_param == nullptr) { MS_LOG(ERROR) << "malloc TransposeParameter failed."; return nullptr; } memset(transpose_param, 0, sizeof(TransposeParameter)); - transpose_param->op_parameter_.type_ = nhwc2nchw_primitive->Type(); + transpose_param->op_parameter_.type_ = schema::PrimitiveType_Transpose; transpose_param->perm_[0] = 0; transpose_param->perm_[1] = 3; transpose_param->perm_[2] = 1; transpose_param->perm_[3] = 2; transpose_param->num_axes_ = 4; - auto kernel = new (std::nothrow) kernel::TransposeCPUKernel(reinterpret_cast<OpParameter *>(transpose_param), - in_tensors, out_tensors, ctx, nhwc2nchw_primitive); + auto kernel = new (std::nothrow) + kernel::TransposeCPUKernel(reinterpret_cast<OpParameter *>(transpose_param), in_tensors, out_tensors, ctx); if (kernel != nullptr) { kernel->set_desc(key); } else { diff --git a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_utils.h b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_utils.h index a92356b428..c1adf4d409 100644 --- a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_utils.h +++ b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_pass_utils.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ #define MINDSPORE_LITE_SRC_RUNTIME_AGENT_NPU_OPTIMIZER_NPU_PASS_UTILS_H_ #include <vector> #include <string> -#include "src/ops/primitive_c.h" #include "src/lite_kernel.h" namespace mindspore::lite { class NPUPassUtils { @@ -53,9 +52,6 @@ class NPUPassUtils { static bool IsNchw2Nhwc(const kernel::LiteKernel *kernel); static kernel::LiteKernel *KernelInputFromKernel(const kernel::LiteKernel *kernel, size_t in_tensor_index); - - private: - static PrimitiveC *CreateTransposePrimitive(); }; } // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_AGENT_NPU_OPTIMIZER_NPU_PASS_UTILS_H_ diff --git a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_transform_pass.cc b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_transform_pass.cc index fbabd1e25c..d545104136 100644 --- a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_transform_pass.cc +++ b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_transform_pass.cc @@ -44,19 +44,27 @@ int NPUTransformPass::InsertPreNodes(kernel::LiteKernel *kernel, std::vector<ker std::vector<Tensor *> pre_trans_out_tensors = {tensor}; all_tensors_->push_back(pre_trans_out_tensors[0]); + auto nh2nc_perm_tensor = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, Tensor::CONST_TENSOR); + auto nh2nc_data = nh2nc_perm_tensor->MutableData(); + if (nh2nc_data == nullptr) { + return RET_ERROR; + } + std::vector<int> nh2nc_perm_vector = {0, 3, 1, 2}; + memcpy(nh2nc_data, nh2nc_perm_vector.data(), 4 * sizeof(int)); + all_tensors_->push_back(nh2nc_perm_tensor); + // Create pre transform kernel: Nhwc2Nchw - auto *trans_kernel = - NPUPassUtils::CreateNhwc2NchwKernel({kernel->in_tensors()[0]}, pre_trans_out_tensors, context_, name); + auto *trans_kernel = NPUPassUtils::CreateNhwc2NchwKernel({kernel->in_tensors()[0], nh2nc_perm_tensor}, + pre_trans_out_tensors, context_, name); trans_kernels->push_back(trans_kernel); - insert_primitive_.push_back(trans_kernel->GetPrimitive()); // Set in_kernels, out_kernels, in_tensors, out_tensors for transform kernel std::vector<kernel::LiteKernel *> pre_trans_in_kernels; if (!is_input_kernel) { pre_trans_in_kernels = {pre_kernel}; } - NPUPassUtils::UpdateKernel(trans_kernel, pre_trans_in_kernels, {kernel}, {kernel->in_tensors()[0]}, + NPUPassUtils::UpdateKernel(trans_kernel, pre_trans_in_kernels, {kernel}, trans_kernel->in_tensors(), pre_trans_out_tensors); if (pre_kernel != nullptr) { @@ -93,14 +101,23 @@ int NPUTransformPass::InsertPostNodes(kernel::LiteKernel *kernel, std::vector<ke auto name = kernel->name() + "_post_trans" + "_Nchw2Nhwc" + std::to_string(total++); tensor->set_tensor_name(name + "/input0"); + auto nc2nh_perm_tensor = new Tensor(kNumberTypeInt32, {4}, schema::Format_NHWC, Tensor::CONST_TENSOR); + auto nc2nh_data = nc2nh_perm_tensor->MutableData(); + if (nc2nh_data == nullptr) { + return RET_ERROR; + } + + std::vector<int> nc2nh_perm_vector = {0, 2, 3, 1}; + memcpy(nc2nh_data, nc2nh_perm_vector.data(), 4 * sizeof(int)); + all_tensors_->push_back(nc2nh_perm_tensor); + // Create post transform kernel: Nchw2Nhwc - auto *post_trans_kernel = - NPUPassUtils::CreateNchw2NhwcKernel(post_trans_in_tensors, kernel->out_tensors(), context_, name); + auto *post_trans_kernel = NPUPassUtils::CreateNchw2NhwcKernel({post_trans_in_tensors[0], nc2nh_perm_tensor}, + kernel->out_tensors(), context_, name); // Set in_kernels, out_kernels, in_tensors, out_tensors for transform kernel - NPUPassUtils::UpdateKernel(post_trans_kernel, {kernel}, post_insert_kernels, post_trans_in_tensors, + NPUPassUtils::UpdateKernel(post_trans_kernel, {kernel}, post_insert_kernels, post_trans_kernel->in_tensors(), kernel->out_tensors()); - insert_primitive_.push_back(post_trans_kernel->GetPrimitive()); trans_kernels->push_back(post_trans_kernel); if (!is_output_kernel) { diff --git a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_transform_pass.h b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_transform_pass.h index 7ab87e85ea..6b75c91cb2 100644 --- a/mindspore/lite/src/runtime/agent/npu/optimizer/npu_transform_pass.h +++ b/mindspore/lite/src/runtime/agent/npu/optimizer/npu_transform_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ #define MINDSPORE_LITE_SRC_RUNTIME_AGENT_NPU_OPTIMIZER_NPU_TRANSFORM_PASS_H_ #include <vector> #include "src/lite_kernel.h" -#include "src/ops/primitive_c.h" #include "src/runtime/agent/npu/optimizer/npu_base_pass.h" namespace mindspore::lite { @@ -34,13 +33,6 @@ class NPUTransformPass : public NPUBasePass { name_ = "NPUTransformPass"; } - ~NPUTransformPass() override { - for (auto primitive : insert_primitive_) { - delete primitive; - } - insert_primitive_.clear(); - } - private: int InsertPreNodes(kernel::LiteKernel *kernel, std::vector<kernel::LiteKernel *> *trans_kernels); @@ -51,7 +43,6 @@ class NPUTransformPass : public NPUBasePass { const InnerContext *context_; std::vector<kernel::LiteKernel *> *all_kernels_; std::vector<Tensor *> *all_tensors_; - std::vector<const PrimitiveC *> insert_primitive_; }; } // namespace mindspore::lite #endif // MINDSPORE_LITE_SRC_RUNTIME_AGENT_NPU_OPTIMIZER_NPU_TRANSFORM_PASS_H_ diff --git a/mindspore/lite/src/runtime/agent/npu/subgraph_npu_kernel.cc b/mindspore/lite/src/runtime/agent/npu/subgraph_npu_kernel.cc index 542a772028..3c103c47bd 100644 --- a/mindspore/lite/src/runtime/agent/npu/subgraph_npu_kernel.cc +++ b/mindspore/lite/src/runtime/agent/npu/subgraph_npu_kernel.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/src/runtime/agent/npu/subgraph_npu_kernel.h b/mindspore/lite/src/runtime/agent/npu/subgraph_npu_kernel.h index 5aa5f5adac..c97e4a450c 100644 --- a/mindspore/lite/src/runtime/agent/npu/subgraph_npu_kernel.h +++ b/mindspore/lite/src/runtime/agent/npu/subgraph_npu_kernel.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/src/runtime/infer_manager.cc b/mindspore/lite/src/runtime/infer_manager.cc new file mode 100644 index 0000000000..3541515dce --- /dev/null +++ b/mindspore/lite/src/runtime/infer_manager.cc @@ -0,0 +1,432 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "src/runtime/infer_manager.h" +#include "src/common/tensor_util.h" +#include "schema/model_generated.h" +#include "nnacl/infer/common_infer.h" +#include "nnacl/infer/adam_infer.h" +#include "nnacl/infer/addn_infer.h" +#include "nnacl/infer/add_sub_grad_infer.h" +#include "nnacl/infer/apply_momentum_infer.h" +#include "nnacl/infer/argmin_max_infer.h" +#include "nnacl/infer/arithmetic_compare_infer.h" +#include "nnacl/infer/arithmetic_grad_infer.h" +#include "nnacl/infer/arithmetic_infer.h" +#include "nnacl/infer/assign_add_infer.h" +#include "nnacl/infer/assign_infer.h" +#include "nnacl/infer/audio_spectrogram_infer.h" +#include "nnacl/infer/batch_to_space_infer.h" +#include "nnacl/infer/bias_grad_infer.h" +#include "nnacl/infer/binary_cross_entropy_infer.h" +#include "nnacl/infer/bn_grad_infer.h" +#include "nnacl/infer/broadcast_to_infer.h" +#include "nnacl/infer/cast_infer.h" +#include "nnacl/infer/concat_infer.h" +#include "nnacl/infer/constant_of_shape_infer.h" +#include "nnacl/infer/conv2d_grad_filter_infer.h" +#include "nnacl/infer/conv2d_grad_input_infer.h" +#include "nnacl/infer/conv2d_infer.h" +#include "nnacl/infer/crop_infer.h" +#include "nnacl/infer/custom_extract_features_infer.h" +#include "nnacl/infer/custom_normalize_infer.h" +#include "nnacl/infer/custom_predict_infer.h" +#include "nnacl/infer/deconv2d_infer.h" +#include "nnacl/infer/dedepthwise_conv2d_infer.h" +#include "nnacl/infer/depth_to_space_infer.h" +#include "nnacl/infer/depthwise_conv2d_infer.h" +#include "nnacl/infer/detection_post_process_infer.h" +#include "nnacl/infer/dropout_grad_infer.h" +#include "nnacl/infer/embedding_lookup_infer.h" +#include "nnacl/infer/expand_dims_infer.h" +#include "nnacl/infer/fft_imag_infer.h" +#include "nnacl/infer/fft_real_infer.h" +#include "nnacl/infer/fill_infer.h" +#include "nnacl/infer/flatten_grad_infer.h" +#include "nnacl/infer/flatten_infer.h" +#include "nnacl/infer/full_connection_infer.h" +#include "nnacl/infer/fused_batchnorm_infer.h" +#include "nnacl/infer/gather_infer.h" +#include "nnacl/infer/gather_nd_infer.h" +#include "nnacl/infer/group_conv2d_grad_input_infer.h" +#include "nnacl/infer/hashtable_lookup_infer.h" +#include "nnacl/infer/layer_norm_infer.h" +#include "nnacl/infer/lsh_projection_infer.h" +#include "nnacl/infer/lstm_infer.h" +#include "nnacl/infer/matmul_infer.h" +#include "nnacl/infer/maximum_grad_infer.h" +#include "nnacl/infer/mean_infer.h" +#include "nnacl/infer/mfcc_infer.h" +#include "nnacl/infer/non_max_suppression_infer.h" +#include "nnacl/infer/one_hot_infer.h" +#include "nnacl/infer/pad_infer.h" +#include "nnacl/infer/pooling_grad_infer.h" +#include "nnacl/infer/pooling_infer.h" +#include "nnacl/infer/power_infer.h" +#include "nnacl/infer/quant_dtype_cast_infer.h" +#include "nnacl/infer/range_infer.h" +#include "nnacl/infer/rank_infer.h" +#include "nnacl/infer/reduce_infer.h" +#include "nnacl/infer/reshape_infer.h" +#include "nnacl/infer/resize_infer.h" +#include "nnacl/infer/rfft_infer.h" +#include "nnacl/infer/roi_pooling_infer.h" +#include "nnacl/infer/scatter_nd_infer.h" +#include "nnacl/infer/sgd_infer.h" +#include "nnacl/infer/shape_infer.h" +#include "nnacl/infer/skip_gram_infer.h" +#include "nnacl/infer/slice_infer.h" +#include "nnacl/infer/softmax_cross_entropy_infer.h" +#include "nnacl/infer/softmax_infer.h" +#include "nnacl/infer/space_to_batch_infer.h" +#include "nnacl/infer/space_to_batch_nd_infer.h" +#include "nnacl/infer/space_to_depth_infer.h" +#include "nnacl/infer/sparse_to_dense_infer.h" +#include "nnacl/infer/split_infer.h" +#include "nnacl/infer/squeeze_infer.h" +#include "nnacl/infer/stack_infer.h" +#include "nnacl/infer/strided_slice_infer.h" +#include "nnacl/infer/tile_infer.h" +#include "nnacl/infer/topk_infer.h" +#include "nnacl/infer/transpose_infer.h" +#include "nnacl/infer/unique_infer.h" +#include "nnacl/infer/unsorted_segment_sum_infer.h" +#include "nnacl/infer/unsqueeze_infer.h" +#include "nnacl/infer/unstack_infer.h" +#include "nnacl/infer/where_infer.h" +#include "nnacl/infer/while_infer.h" +#include "include/errorcode.h" +#include "nnacl/errorcode.h" + +#include "src/tensorlist.h" +#include "nnacl/infer/tensorlist_reserve_infer.h" +#include "nnacl/infer/tensorlist_getitem_infer.h" +#include "nnacl/infer/tensorlist_fromtensor_infer.h" +#include "nnacl/infer/tensorlist_setitem_infer.h" +#include "nnacl/infer/tensorlist_stack_infer.h" +#include "nnacl/infer/partial_infer.h" +#include "nnacl/infer/merge_infer.h" +#include "nnacl/infer/switch_infer.h" +#include "nnacl/infer/assert_op_infer.h" +#include "nnacl/infer/sparse_softmax_cross_entropy_infer.h" +#include "nnacl/infer/dropout_infer.h" +#include "nnacl/infer/prior_box_infer.h" + +#include "nnacl/infer/gru_infer.h" +#include "nnacl/infer/select_infer.h" +#include "nnacl/infer/size_infer.h" +#include "nnacl/infer/invert_permutation_infer.h" +#include "nnacl/infer/random_standard_normal_infer.h" +#include "nnacl/infer/crop_and_resize_infer.h" +#include "nnacl/infer/strided_slice_grad_infer.h" +#include "nnacl/infer/lin_space_infer.h" +#include "nnacl/infer/uniform_real_infer.h" + +namespace mindspore { +namespace lite { + +int KernelInferShape(const std::vector<lite::Tensor *> &inputs, std::vector<lite::Tensor *> *outputs, + OpParameter *parameter) { + std::vector<TensorC *> in_tensors; + std::vector<TensorC *> out_tensors; + int ret = 0; + + ret = GenerateInTensorC(parameter, inputs, outputs, &in_tensors); + if (ret != RET_OK) { + FreeAllTensorC(&in_tensors); + return RET_ERROR; + } + + ret = GenerateOutTensorC(parameter, inputs, outputs, &out_tensors); + if (ret != RET_OK) { + FreeAllTensorC(&in_tensors); + FreeAllTensorC(&out_tensors); + return RET_ERROR; + } + + auto infer_shape_func = InferManager::GetInstance()->GetInferShapeFunc(parameter->type_); + if (infer_shape_func == nullptr) { + MS_LOG(ERROR) << "Get infershape func failed! type:" << PrimitiveCurVersionTypeName(parameter->type_); + return RET_ERROR; + } + ret = infer_shape_func(static_cast<TensorC **>(in_tensors.data()), in_tensors.size(), out_tensors.data(), + out_tensors.size(), parameter); + + if (ret == RET_OK) { + for (size_t i = 0; i < out_tensors.size(); i++) { + if (reinterpret_cast<TensorListC *>(out_tensors.at(i))->data_type_ == TypeIdC::kObjectTypeTensorType) { + auto *tensor_list_c = reinterpret_cast<TensorListC *>(out_tensors.at(i)); + auto *tensor_list = reinterpret_cast<TensorList *>(outputs->at(i)); + tensor_list->set_shape({static_cast<int>(tensor_list_c->element_num_)}); + auto tensor_shape = std::vector<std::vector<int>>( + tensor_list_c->element_num_, + std::vector<int>(tensor_list_c->element_shape_, + tensor_list_c->element_shape_ + tensor_list_c->element_shape_size_)); + tensor_list->MallocTensorListData(static_cast<TypeId>(tensor_list_c->data_type_), tensor_shape); + TensorListC2TensorList(tensor_list_c, tensor_list); + } else { + TensorC2Tensor(out_tensors.at(i), outputs->at(i)); + } + } + } else { + TensorC2LiteTensor(out_tensors, outputs); + } + + FreeAllTensorC(&in_tensors); + FreeAllTensorC(&out_tensors); + if (ret == NNACL_INFER_INVALID) { + return RET_INFER_INVALID; + } else if (ret != NNACL_OK) { + return RET_INFER_ERR; + } + return RET_OK; +} + +REG_INFER_SHAPE(topk, mindspore::schema::PrimitiveType_TopKFusion, TopKInferShape) +// static RegistryInferShape g_TopkInferShape(mindspore::schema::PrimitiveType_TopKFusion, TopKInferShape); +static RegistryInferShape g_MaxPoolingInferShape(mindspore::schema::PrimitiveType_MaxPoolFusion, PoolingInferShape); +static RegistryInferShape g_AvgPoolingInferShape(mindspore::schema::PrimitiveType_AvgPoolFusion, PoolingInferShape); +static RegistryInferShape g_DetectionPostProcessInferShape(mindspore::schema::PrimitiveType_DetectionPostProcess, + DetectionPostProcessInferShape); +static RegistryInferShape g_SpaceToBatchNdInferShape(mindspore::schema::PrimitiveType_SpaceToBatchND, + SpaceToBatchNdInferShape); +static RegistryInferShape g_ScatterNdInferShape(mindspore::schema::PrimitiveType_ScatterNd, ScatterNdInferShape); +static RegistryInferShape g_FftRealInferShape(mindspore::schema::PrimitiveType_FftReal, FftRealInferShape); +static RegistryInferShape g_SpaceToBatchInferShape(mindspore::schema::PrimitiveType_SpaceToBatch, + SpaceToBatchInferShape); +static RegistryInferShape g_CustomPredictInferShape(mindspore::schema::PrimitiveType_CustomPredict, + CustomPredictInferShape); +static RegistryInferShape g_Conv2dInferShape(mindspore::schema::PrimitiveType_Conv2DFusion, Conv2dInferShape); +static RegistryInferShape g_Deconv2dInferShape(mindspore::schema::PrimitiveType_Conv2dTransposeFusion, + Deconv2dInferShape); +static RegistryInferShape g_SquaredDifferenceInferShape(mindspore::schema::PrimitiveType_SquaredDifference, + ArithmeticInferShape); +static RegistryInferShape g_AddInferShape(mindspore::schema::PrimitiveType_AddFusion, ArithmeticInferShape); +static RegistryInferShape g_AddSubInferShape(mindspore::schema::PrimitiveType_AddGrad, AddSubGradInferShape); +static RegistryInferShape g_SubInferShape(mindspore::schema::PrimitiveType_SubFusion, ArithmeticInferShape); +static RegistryInferShape g_SubGradInferShape(mindspore::schema::PrimitiveType_SubGrad, AddSubGradInferShape); +static RegistryInferShape g_DivInferShape(mindspore::schema::PrimitiveType_DivFusion, ArithmeticInferShape); +static RegistryInferShape g_DivGradInferShape(mindspore::schema::PrimitiveType_DivGrad, ArithmeticGradInferShape); +static RegistryInferShape g_MulInferShape(mindspore::schema::PrimitiveType_MulFusion, ArithmeticInferShape); +static RegistryInferShape g_MulGradInferShape(mindspore::schema::PrimitiveType_MulGrad, ArithmeticGradInferShape); +static RegistryInferShape g_FloorDivInferShape(mindspore::schema::PrimitiveType_FloorDiv, ArithmeticInferShape); +static RegistryInferShape g_RealDivInferShape(mindspore::schema::PrimitiveType_RealDiv, ArithmeticInferShape); +static RegistryInferShape g_LogicalOrInferShape(mindspore::schema::PrimitiveType_LogicalOr, ArithmeticInferShape); +static RegistryInferShape g_LogicalAndInferShape(mindspore::schema::PrimitiveType_LogicalAnd, ArithmeticInferShape); +static RegistryInferShape g_MinuimumInferShape(mindspore::schema::PrimitiveType_Minimum, ArithmeticInferShape); +static RegistryInferShape g_MaximumInferShape(mindspore::schema::PrimitiveType_Maximum, ArithmeticInferShape); +static RegistryInferShape g_FloorModInferShape(mindspore::schema::PrimitiveType_FloorMod, ArithmeticInferShape); +static RegistryInferShape g_EltwiseInferShape(mindspore::schema::PrimitiveType_Eltwise, ArithmeticInferShape); + +static RegistryInferShape g_SpaceToDepthInferShape(mindspore::schema::PrimitiveType_SpaceToDepth, + SpaceToDepthInferShape); +static RegistryInferShape g_Conv2dGradFilterInferShape(mindspore::schema::PrimitiveType_Conv2DBackpropFilterFusion, + Conv2dGradFilterInferShape); +static RegistryInferShape g_PadInferShape(mindspore::schema::PrimitiveType_PadFusion, PadInferShape); +static RegistryInferShape g_ApplyMomentumInferShape(mindspore::schema::PrimitiveType_ApplyMomentum, + ApplyMomentumInferShape); +static RegistryInferShape g_GatherInferShape(mindspore::schema::PrimitiveType_Gather, GatherInferShape); +static RegistryInferShape g_SkipGramInferShape(mindspore::schema::PrimitiveType_SkipGram, SkipGramInferShape); +static RegistryInferShape g_StridedSliceInferShape(mindspore::schema::PrimitiveType_StridedSlice, + StridedSliceInferShape); +static RegistryInferShape g_StackInferShape(mindspore::schema::PrimitiveType_Stack, StackInferShape); + +static RegistryInferShape g_AssignInferShape(mindspore::schema::PrimitiveType_Assign, AssignInferShape); +static RegistryInferShape g_BnGradInferShape(mindspore::schema::PrimitiveType_BatchNormGrad, BnGradInferShape); +static RegistryInferShape g_SplitInferShape(mindspore::schema::PrimitiveType_Split, SplitInferShape); +static RegistryInferShape g_HashtableLookupInferShape(mindspore::schema::PrimitiveType_HashtableLookup, + HashtableLoopupInferShape); +static RegistryInferShape g_FillInferShape(mindspore::schema::PrimitiveType_Fill, FillInferShape); +static RegistryInferShape g_MatmulInferShape(mindspore::schema::PrimitiveType_MatMul, MatmulInferShape); +static RegistryInferShape g_BatchToSpaceInferShape(mindspore::schema::PrimitiveType_BatchToSpace, + BatchToSpaceInferShape); +static RegistryInferShape g_RankInferShape(mindspore::schema::PrimitiveType_Rank, RankInferShape); +static RegistryInferShape g_FlattenGradInferShape(mindspore::schema::PrimitiveType_FlattenGrad, FlattenGradInferShape); +static RegistryInferShape g_ConcatInferShape(mindspore::schema::PrimitiveType_Concat, ConcatInferShape); +static RegistryInferShape g_SliceInferShape(mindspore::schema::PrimitiveType_SliceFusion, SliceInferShape); +static RegistryInferShape g_ExpandDimsInferShape(mindspore::schema::PrimitiveType_ExpandDims, ExpandDimsInferShape); +static RegistryInferShape g_ResizeInferShape(mindspore::schema::PrimitiveType_Resize, ResizeInferShape); +static RegistryInferShape g_WhereInferShape(mindspore::schema::PrimitiveType_Where, WhereInferShape); +static RegistryInferShape g_ConstantOfShapeInferShape(mindspore::schema::PrimitiveType_ConstantOfShape, + ConstantOfShapeInferShape); +static RegistryInferShape g_DepthToSpaceInferShape(mindspore::schema::PrimitiveType_DepthToSpace, + DepthToSpaceInferShape); +static RegistryInferShape g_SqueezeInferShape(mindspore::schema::PrimitiveType_Squeeze, SqueezeInferShape); +static RegistryInferShape g_RfftInferShape(mindspore::schema::PrimitiveType_Rfft, RfftInferShape); +static RegistryInferShape g_CastInferShape(mindspore::schema::PrimitiveType_Cast, CastInferShape); +static RegistryInferShape g_SparseToDenseInferShape(mindspore::schema::PrimitiveType_SparseToDense, + SparseToDenseInferShape); +static RegistryInferShape g_Conv2dGradInputInferShape(mindspore::schema::PrimitiveType_Conv2DBackpropInputFusion, + Conv2dGradInputInferShape); +static RegistryInferShape g_QuantDtypeCastInferShape(mindspore::schema::PrimitiveType_QuantDTypeCast, + QuantDtypeCastInferShape); +static RegistryInferShape g_MfccInferShape(mindspore::schema::PrimitiveType_Mfcc, MfccInferShape); +static RegistryInferShape g_AssignAddInferShape(mindspore::schema::PrimitiveType_AssignAdd, AssignAddInferShape); +static RegistryInferShape g_LayerNormInferShape(mindspore::schema::PrimitiveType_LayerNormFusion, LayerNormInferShape); +static RegistryInferShape g_UnsortedSegmentSumInferShape(mindspore::schema::PrimitiveType_UnsortedSegmentSum, + UnsortedSegmentSumInferShape); +static RegistryInferShape g_AddnInferShape(mindspore::schema::PrimitiveType_AddN, AddnInferShape); +static RegistryInferShape g_BiasGradInferShape(mindspore::schema::PrimitiveType_BiasAddGrad, BiasGradInferShape); +static RegistryInferShape g_FullConnectionInferShape(mindspore::schema::PrimitiveType_FullConnection, + FullConnectionInferShape); +static RegistryInferShape g_CropInferShape(mindspore::schema::PrimitiveType_Crop, CropInferShape); +static RegistryInferShape g_DropoutGradInferShape(mindspore::schema::PrimitiveType_DropoutGrad, DropoutGradInferShape); +static RegistryInferShape g_AdamInferShape(mindspore::schema::PrimitiveType_Adam, AdamInferShape); +static RegistryInferShape g_FusedBatchnormInferShape(mindspore::schema::PrimitiveType_FusedBatchNorm, + FusedBatchNormInferShape); +static RegistryInferShape g_SoftmaxInferShape(mindspore::schema::PrimitiveType_Softmax, SoftMaxInferShape); +static RegistryInferShape g_RoiPoolingInferShape(mindspore::schema::PrimitiveType_ROIPooling, ROIPoolingInferShape); +static RegistryInferShape g_avgPoolGradInferShape(mindspore::schema::PrimitiveType_AvgPoolGrad, PoolingGradInferShape); +static RegistryInferShape g_maxPoolGradInferShape(mindspore::schema::PrimitiveType_MaxPoolGrad, PoolingGradInferShape); +static RegistryInferShape g_WhileInferShape(mindspore::schema::PrimitiveType_While, WhileInferShape); +static RegistryInferShape g_BinaryCrossEntropyInferShape(mindspore::schema::PrimitiveType_BinaryCrossEntropy, + BinaryCrossEntropyInferShape); +static RegistryInferShape g_TileInferShape(mindspore::schema::PrimitiveType_TileFusion, TileInferShape); +static RegistryInferShape g_EmbeddingLookupInferShape(mindspore::schema::PrimitiveType_EmbeddingLookupFusion, + EmbeddingLookupInferShape); +static RegistryInferShape g_UnsqueezeInferShape(mindspore::schema::PrimitiveType_Unsqueeze, UnsqueezeInferShape); +static RegistryInferShape g_TransposeInferShape(mindspore::schema::PrimitiveType_Transpose, TransposeInferShape); +static RegistryInferShape g_GatherNdInferShape(mindspore::schema::PrimitiveType_GatherNd, GatherNdInferShape); +static RegistryInferShape g_BroadcastToInferShape(mindspore::schema::PrimitiveType_BroadcastTo, BroadcastToInferShape); +static RegistryInferShape g_MaximumGradInferShape(mindspore::schema::PrimitiveType_MaximumGrad, MaximumGradInferShape); +static RegistryInferShape g_PowerInferShape(mindspore::schema::PrimitiveType_PowFusion, PowerInferShape); +static RegistryInferShape g_RangeInferShape(mindspore::schema::PrimitiveType_Range, RangeInferShape); +static RegistryInferShape g_SgdInferShape(mindspore::schema::PrimitiveType_SGD, SgdInferShape); +static RegistryInferShape g_ArgminInferShape(mindspore::schema::PrimitiveType_ArgMinFusion, ArgMinMaxInferShape); +static RegistryInferShape g_UnstackInferShape(mindspore::schema::PrimitiveType_Unstack, UnstackInferShape); +static RegistryInferShape g_AudioSpectrogramInferShape(mindspore::schema::PrimitiveType_AudioSpectrogram, + AudioSpectrogramInferShape); + +// note: no arithmetic_self +static RegistryInferShape g_BinaryCrossEntropyGradInferShape(mindspore::schema::PrimitiveType_BinaryCrossEntropyGrad, + CommonInferShape); +static RegistryInferShape g_ReverseSequenceInferShape(mindspore::schema::PrimitiveType_ReverseSequence, + CommonInferShape); +static RegistryInferShape g_ZerosLikeInferShape(mindspore::schema::PrimitiveType_ZerosLike, CommonInferShape); + +static RegistryInferShape g_AbsInferShape(mindspore::schema::PrimitiveType_Abs, CommonInferShape); +static RegistryInferShape g_ActivationGradInferShape(mindspore::schema::PrimitiveType_ActivationGrad, CommonInferShape); +static RegistryInferShape g_ActivationInferShape(mindspore::schema::PrimitiveType_Activation, CommonInferShape); +static RegistryInferShape g_BatchNormInferShape(mindspore::schema::PrimitiveType_BatchNorm, CommonInferShape); +static RegistryInferShape g_BiasAddInferShape(mindspore::schema::PrimitiveType_BiasAdd, CommonInferShape); +static RegistryInferShape g_CeilInferShape(mindspore::schema::PrimitiveType_Ceil, CommonInferShape); +static RegistryInferShape g_ClipInferShape(mindspore::schema::PrimitiveType_Clip, CommonInferShape); +static RegistryInferShape g_CosInferShape(mindspore::schema::PrimitiveType_Cos, CommonInferShape); +static RegistryInferShape g_SinInferShape(mindspore::schema::PrimitiveType_Sin, CommonInferShape); +static RegistryInferShape g_DependInferShape(mindspore::schema::PrimitiveType_Depend, CommonInferShape); +// note : no Primitive_Dequant +static RegistryInferShape g_EluInferShape(mindspore::schema::PrimitiveType_Elu, CommonInferShape); +static RegistryInferShape g_ExpInferShape(mindspore::schema::PrimitiveType_ExpFusion, CommonInferShape); +static RegistryInferShape g_FakeQuantWithMinMaxVarsInferShape(mindspore::schema::PrimitiveType_FakeQuantWithMinMaxVars, + CommonInferShape); +static RegistryInferShape g_FloorInferShape(mindspore::schema::PrimitiveType_Floor, CommonInferShape); +static RegistryInferShape g_InstanceNormInferShape(mindspore::schema::PrimitiveType_InstanceNorm, CommonInferShape); +static RegistryInferShape g_L2NormInferShape(mindspore::schema::PrimitiveType_L2NormalizeFusion, CommonInferShape); +static RegistryInferShape g_LeakyReluInferShape(mindspore::schema::PrimitiveType_LeakyRelu, CommonInferShape); + +static RegistryInferShape g_LogGradInferShape(mindspore::schema::PrimitiveType_LogGrad, CommonInferShape); +static RegistryInferShape g_LogicalNotInferShape(mindspore::schema::PrimitiveType_LogicalNot, CommonInferShape); +static RegistryInferShape g_LrnInferShape(mindspore::schema::PrimitiveType_LRN, CommonInferShape); +static RegistryInferShape g_NegInferShape(mindspore::schema::PrimitiveType_Neg, CommonInferShape); +static RegistryInferShape g_NegGradInferShape(mindspore::schema::PrimitiveType_NegGrad, CommonInferShape); +static RegistryInferShape g_PowerGradInferShape(mindspore::schema::PrimitiveType_PowerGrad, CommonInferShape); +static RegistryInferShape g_PReLUInferShape(mindspore::schema::PrimitiveType_PReLUFusion, CommonInferShape); +static RegistryInferShape g_ReverseInferShape(mindspore::schema::PrimitiveType_ReverseV2, CommonInferShape); +static RegistryInferShape g_RoundInferShape(mindspore::schema::PrimitiveType_Round, CommonInferShape); +static RegistryInferShape g_RsqrtInferShape(mindspore::schema::PrimitiveType_Rsqrt, CommonInferShape); +static RegistryInferShape g_ScaleInferShape(mindspore::schema::PrimitiveType_ScaleFusion, CommonInferShape); +static RegistryInferShape g_SqrtInferShape(mindspore::schema::PrimitiveType_Sqrt, CommonInferShape); +static RegistryInferShape g_SquareInferShape(mindspore::schema::PrimitiveType_Square, CommonInferShape); + +static RegistryInferShape g_LshProjectionInferShape(mindspore::schema::PrimitiveType_LshProjection, + LshProjectionInferShape); +static RegistryInferShape g_SoftmaxCrossEntropyInferShape( + mindspore::schema::PrimitiveType_SoftmaxCrossEntropyWithLogits, SoftmaxCrossEntropyInferShape); +static RegistryInferShape g_LogInferShape(mindspore::schema::PrimitiveType_Log, CommonInferShape); +static RegistryInferShape g_LessInferShape(mindspore::schema::PrimitiveType_Less, ArithmeticCompareInferShape); +static RegistryInferShape g_EqualInferShape(mindspore::schema::PrimitiveType_Equal, ArithmeticCompareInferShape); +static RegistryInferShape g_LessEqualInferShape(mindspore::schema::PrimitiveType_LessEqual, + ArithmeticCompareInferShape); +static RegistryInferShape g_GreaterInferShape(mindspore::schema::PrimitiveType_Greater, ArithmeticCompareInferShape); +static RegistryInferShape g_GreaterEqualInferShape(mindspore::schema::PrimitiveType_GreaterEqual, + ArithmeticCompareInferShape); +static RegistryInferShape g_NotEqualInferShape(mindspore::schema::PrimitiveType_NotEqual, ArithmeticCompareInferShape); +static RegistryInferShape g_ShapeInferShape(mindspore::schema::PrimitiveType_Shape, ShapeInferShape); +static RegistryInferShape g_ReshapeInferShape(mindspore::schema::PrimitiveType_Reshape, ReshapeInferShape); +static RegistryInferShape g_OneHotInferShape(mindspore::schema::PrimitiveType_OneHot, OneHotInferShape); +static RegistryInferShape g_FftImagInferShape(mindspore::schema::PrimitiveType_FftImag, FftImagInferShape); +static RegistryInferShape g_LstmInferShape(mindspore::schema::PrimitiveType_LSTM, LstmInferShape); +static RegistryInferShape g_ReduceInferShape(mindspore::schema::PrimitiveType_ReduceFusion, ReduceInferShape); +static RegistryInferShape g_FlattenInferShape(mindspore::schema::PrimitiveType_Flatten, FlattenInferShape); +static RegistryInferShape g_CustomNormalizeInferShape(mindspore::schema::PrimitiveType_CustomNormalize, + CustomNormalizeInferShape); +static RegistryInferShape g_NonMaxSuppressionInferShape(mindspore::schema::PrimitiveType_NonMaxSuppression, + NonMaxSuppressionInferShape); +static RegistryInferShape g_CustomExtractFeaturesInferShape(mindspore::schema::PrimitiveType_CustomExtractFeatures, + CustomExtractFeaturesInferShape); +static RegistryInferShape g_ArgmaxInferShape(mindspore::schema::PrimitiveType_ArgMaxFusion, ArgMinMaxInferShape); +static RegistryInferShape g_UniqueInferShape(mindspore::schema::PrimitiveType_Unique, UniqueInferShape); + +static RegistryInferShape g_TensorListFromTensorInferShape(mindspore::schema::PrimitiveType_TensorListFromTensor, + TensorListFromTensorInferShape); +static RegistryInferShape g_TensorListGetItemInferShape(mindspore::schema::PrimitiveType_TensorListGetItem, + TensorListGetItemInferShape); +static RegistryInferShape g_TensorListReserveInferShape(mindspore::schema::PrimitiveType_TensorListReserve, + TensorListReserveInferShape); +static RegistryInferShape g_TensorListSetItemInferShape(mindspore::schema::PrimitiveType_TensorListSetItem, + TensorListSetItemInferShape); +static RegistryInferShape g_TensorListStackInferShape(mindspore::schema::PrimitiveType_TensorListStack, + TensorListStackInferShape); +static RegistryInferShape g_PartialInferShape(mindspore::schema::PrimitiveType_PartialFusion, PartialInferShape); +static RegistryInferShape g_MergeInferShape(mindspore::schema::PrimitiveType_Merge, MergeInferShape); +static RegistryInferShape g_SwitchInferShape(mindspore::schema::PrimitiveType_Switch, SwitchInferShape); +static RegistryInferShape g_AssertOpInferShape(mindspore::schema::PrimitiveType_Assert, AssertOpInferShape); +static RegistryInferShape g_SparseSoftmaxCrossEntropyInferShape( + mindspore::schema::PrimitiveType_SparseSoftmaxCrossEntropy, SparseSoftmaxCrossEntropyInferShape); +static RegistryInferShape g_DropoutInferShape(mindspore::schema::PrimitiveType_Dropout, DropoutInferShape); +static RegistryInferShape g_PriorBoxInferShape(mindspore::schema::PrimitiveType_PriorBox, PriorBoxInferShape); +static RegistryInferShape g_MinimumGradInferShape(mindspore::schema::PrimitiveType_MinimumGrad, MaximumGradInferShape); +static RegistryInferShape g_AdderInferShape(mindspore::schema::PrimitiveType_AdderFusion, Conv2dInferShape); +static RegistryInferShape g_ReciprocalInferShape(mindspore::schema::PrimitiveType_Reciprocal, CommonInferShape); +static RegistryInferShape g_SmoothL1LossInferShape(mindspore::schema::PrimitiveType_SmoothL1Loss, CommonInferShape); +static RegistryInferShape g_SmoothL1LossGradInferShape(mindspore::schema::PrimitiveType_SmoothL1LossGrad, + CommonInferShape); +static RegistryInferShape g_SigmoidCrossEntropyWithLogitsInferShape( + mindspore::schema::PrimitiveType_SigmoidCrossEntropyWithLogits, CommonInferShape); +static RegistryInferShape g_SigmoidCrossEntropyWithLogitsGradInferShape( + mindspore::schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad, CommonInferShape); +static RegistryInferShape g_ModInferShape(mindspore::schema::PrimitiveType_Mod, ArithmeticInferShape); +static RegistryInferShape g_ControlDependInferShape(mindspore::schema::PrimitiveType_ControlDepend, CommonInferShape); + +static RegistryInferShape g_SelectInferShape(mindspore::schema::PrimitiveType_Select, SelectInferShape); +static RegistryInferShape g_IfInferShape(mindspore::schema::PrimitiveType_If, CommonInferShape); +static RegistryInferShape g_GruInferShape(mindspore::schema::PrimitiveType_GRU, GruInferShape); +static RegistryInferShape g_InvertPermutationInferShape(mindspore::schema::PrimitiveType_InvertPermutation, + InvertPermutationInferShape); +static RegistryInferShape g_SizeInferShape(mindspore::schema::PrimitiveType_Size, SizeInferShape); +static RegistryInferShape g_RandomStandardNormalInferShape(mindspore::schema::PrimitiveType_RandomStandardNormal, + RandomStandardNormalInferShape); +static RegistryInferShape g_CropAndResizeInferShape(mindspore::schema::PrimitiveType_CropAndResize, + CropAndResizeInferShape); +static RegistryInferShape g_ErfInferShape(mindspore::schema::PrimitiveType_Erf, CommonInferShape); +static RegistryInferShape g_StridedSliceGradInferShape(mindspore::schema::PrimitiveType_StridedSliceGrad, + StridedSliceGradInferShape); +static RegistryInferShape g_IsFiniteInferShape(mindspore::schema::PrimitiveType_IsFinite, CommonInferShape); +static RegistryInferShape g_LinSpaceInferShape(mindspore::schema::PrimitiveType_LinSpace, LinSpaceInferShape); +static RegistryInferShape g_UniformRealInferShape(mindspore::schema::PrimitiveType_UniformReal, UniformRealInferShape); +static RegistryInferShape g_AbsGradInferShape(mindspore::schema::PrimitiveType_AbsGrad, CommonInferShape); + +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/src/runtime/infer_manager.h b/mindspore/lite/src/runtime/infer_manager.h new file mode 100644 index 0000000000..e8a34bff2e --- /dev/null +++ b/mindspore/lite/src/runtime/infer_manager.h @@ -0,0 +1,65 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_INFER_MANAGER_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_INFER_MANAGER_H_ + +#include <map> +#include <vector> +#include "src/common/prim_util.h" +#include "src/common/common.h" +#include "nnacl/tensor_c.h" + +namespace mindspore::lite { +typedef int (*InferShape)(const TensorC *const *inputs, size_t input_size, TensorC **outputs, size_t output_size, + OpParameter *parameter); +int KernelInferShape(const std::vector<lite::Tensor *> &tensors_in, std::vector<lite::Tensor *> *outputs, + OpParameter *parameter); +class InferManager { + public: + static InferManager *GetInstance() { + static InferManager instance; + return &instance; + } + virtual ~InferManager() = default; + + void InsertInferShapeFunc(int prim_type, InferShape func) { infer_shape_funcs_[prim_type] = func; } + + InferShape GetInferShapeFunc(int prim_type) { + auto iter = infer_shape_funcs_.find(prim_type); + if (iter == infer_shape_funcs_.end()) { + return nullptr; + } + return iter->second; + } + + private: + InferManager() = default; + + std::map<int, InferShape> infer_shape_funcs_; +}; + +class RegistryInferShape { + public: + RegistryInferShape(int prim_type, InferShape func) { + InferManager::GetInstance()->InsertInferShapeFunc(prim_type, func); + } +}; + +#define REG_INFER_SHAPE(op, prim_type, func) static RegistryInferShape g_##op##InferShape(prim_type, func); +} // namespace mindspore::lite + +#endif // MINDSPORE_LITE_SRC_RUNTIME_INFER_MANAGER_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/base/argminmax_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/argminmax_base.cc index 04ad4ea6e0..e39fb1a9ba 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/argminmax_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/argminmax_base.cc @@ -22,8 +22,8 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_ArgMax; -using mindspore::schema::PrimitiveType_ArgMin; +using mindspore::schema::PrimitiveType_ArgMaxFusion; +using mindspore::schema::PrimitiveType_ArgMinFusion; namespace mindspore::kernel { int ArgMinMaxCPUKernel::Init() { @@ -89,8 +89,8 @@ int ArgMinMaxCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ArgMax, LiteKernelCreator<ArgMinMaxCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ArgMin, LiteKernelCreator<ArgMinMaxCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_ArgMax, LiteKernelCreator<ArgMinMaxCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_ArgMin, LiteKernelCreator<ArgMinMaxCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ArgMaxFusion, LiteKernelCreator<ArgMinMaxCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ArgMinFusion, LiteKernelCreator<ArgMinMaxCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_ArgMaxFusion, LiteKernelCreator<ArgMinMaxCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_ArgMinFusion, LiteKernelCreator<ArgMinMaxCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/base/argminmax_base.h b/mindspore/lite/src/runtime/kernel/arm/base/argminmax_base.h index 65dde8edf4..4265555737 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/argminmax_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/argminmax_base.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class ArgMinMaxCPUKernel : public LiteKernel { public: ArgMinMaxCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { arg_param_ = reinterpret_cast<ArgMinMaxParameter *>(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/assert.h b/mindspore/lite/src/runtime/kernel/arm/base/assert.h index 6195a8390d..bdf617c473 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/assert.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/assert.h @@ -23,9 +23,8 @@ namespace mindspore::kernel { class AssertCPUKernel : public LiteKernel { public: AssertCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~AssertCPUKernel() override {} int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/carry_data.cc b/mindspore/lite/src/runtime/kernel/arm/base/carry_data.cc index 4d5b4f89a3..0121cabe11 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/carry_data.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/carry_data.cc @@ -59,7 +59,7 @@ int CarryDataKernel::MoveTensorData(lite::Tensor *dst_tensor, lite::Tensor *src_ MS_LOG(ERROR) << "input tensor data_type: " << src_tensor->data_type() << " vs " << "output tensor data_type: " << dst_tensor->data_type() << "input tensor format: " << src_tensor->format() << " vs " - << "output tensor format: " << dst_tensor->format() << "input tensor shape: " << src_tensor->shape() + << "output tensor format: " << dst_tensor->format() << " input tensor shape: " << src_tensor->shape() << " vs " << "output tensor shape: " << dst_tensor->shape(); return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/carry_data.h b/mindspore/lite/src/runtime/kernel/arm/base/carry_data.h index d122d2562d..f7054d1b5d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/carry_data.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/carry_data.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class CarryDataKernel : public LiteKernel { public: CarryDataKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~CarryDataKernel() override = default; protected: diff --git a/mindspore/lite/src/runtime/kernel/arm/base/constant_of_shape.h b/mindspore/lite/src/runtime/kernel/arm/base/constant_of_shape.h index e03c9f762f..fd2fab96e8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/constant_of_shape.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/constant_of_shape.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class ConstantOfShapeCPUKernel : public LiteKernel { public: ConstantOfShapeCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<ConstantOfShapeParameter *>(parameter); } ~ConstantOfShapeCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc index 716ae15d51..7c9014ce0b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.cc @@ -19,7 +19,6 @@ #include "schema/model_generated.h" #include "src/kernel_registry.h" #include "include/errorcode.h" -#include "src/ops/conv2d.h" using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; @@ -78,11 +77,6 @@ void ConvolutionBaseCPUKernel::FreeQuantParam() { } int ConvolutionBaseCPUKernel::Init() { - auto conv2d_lite_primitive = (lite::Conv2D *)primitive_; - conv_param_->pad_u_ = conv2d_lite_primitive->PadUp(); - conv_param_->pad_d_ = conv2d_lite_primitive->PadDown(); - conv_param_->pad_l_ = conv2d_lite_primitive->PadLeft(); - conv_param_->pad_r_ = conv2d_lite_primitive->PadRight(); auto input = this->in_tensors_.front(); auto output = this->out_tensors_.front(); conv_param_->input_batch_ = input->Batch(); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h index 7e287fd224..d3e5c49527 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/convolution_base.h @@ -35,9 +35,8 @@ namespace mindspore::kernel { class ConvolutionBaseCPUKernel : public LiteKernel { public: ConvolutionBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), ctx_(ctx), thread_count_(ctx->thread_num_) { op_parameter_->thread_num_ = ctx->thread_num_; conv_param_ = reinterpret_cast<ConvParameter *>(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/crop_base.h b/mindspore/lite/src/runtime/kernel/arm/base/crop_base.h index e3d3e70d13..88f170ad74 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/crop_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/crop_base.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class CropBaseCPUKernel : public LiteKernel { public: CropBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { crop_para_ = reinterpret_cast<CropParameter *>(op_parameter_); crop_para_->thread_count_ = op_parameter_->thread_num_; } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h b/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h index 3ff99bfedc..c1cfd2d5c9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/depth_to_space_base.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class DepthToSpaceBaseCPUKernel : public LiteKernel { public: DepthToSpaceBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<DepthToSpaceParameter *>(op_parameter_); } virtual ~DepthToSpaceBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/detection_post_process_base.h b/mindspore/lite/src/runtime/kernel/arm/base/detection_post_process_base.h index 5f9fce4d1f..3a2bbd8d79 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/detection_post_process_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/detection_post_process_base.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class DetectionPostProcessBaseCPUKernel : public LiteKernel { public: DetectionPostProcessBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_num_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_num_(ctx->thread_num_) { params_ = reinterpret_cast<DetectionPostProcessParameter *>(parameter); } virtual ~DetectionPostProcessBaseCPUKernel(); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/merge.h b/mindspore/lite/src/runtime/kernel/arm/base/merge.h index 096d14fa95..8c63a02371 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/merge.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/merge.h @@ -27,9 +27,8 @@ enum InputPart { UNKNOWN_INPUT_PART, LEFT_INPUT_PART, RIGHT_INPUT_PART }; class MergeCPUKernel : public CarryDataKernel { public: MergeCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : CarryDataKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : CarryDataKernel(parameter, inputs, outputs, ctx) {} bool IsReady(const std::vector<lite::Tensor *> &scope_tensors) override; ~MergeCPUKernel() override = default; int FreeInWorkTensor() const override; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc index 006bd26bad..e1efe7547a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.cc @@ -14,12 +14,16 @@ * limitations under the License. */ #include "src/runtime/kernel/arm/base/pooling_base.h" +#include "src/kernel_registry.h" #include "include/errorcode.h" -#include "src/ops/pooling.h" +#include "include/context.h" +using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; +using mindspore::schema::PrimitiveType_AvgPoolFusion; +using mindspore::schema::PrimitiveType_MaxPoolFusion; namespace mindspore::kernel { int PoolingBaseCPUKernel::SetQuantParam() { @@ -89,11 +93,6 @@ int PoolingBaseCPUKernel::ReSize() { auto out_tensor = this->out_tensors_.front(); MS_ASSERT(in_tensor != nullptr); MS_ASSERT(out_tensor != nullptr); - auto pooling_lite_primitive = (lite::Pooling *)primitive_; - pooling_param_->pad_u_ = pooling_lite_primitive->PadUp(); - pooling_param_->pad_d_ = pooling_lite_primitive->PadDown(); - pooling_param_->pad_l_ = pooling_lite_primitive->PadLeft(); - pooling_param_->pad_r_ = pooling_lite_primitive->PadRight(); pooling_param_->input_batch_ = in_tensor->Batch(); pooling_param_->input_channel_ = in_tensor->Channel(); pooling_param_->input_h_ = in_tensor->Height(); diff --git a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h index c14594d53a..97c9fd30b2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/pooling_base.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class PoolingBaseCPUKernel : public LiteKernel { public: PoolingBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), ctx_(ctx), thread_count_(ctx->thread_num_) { pooling_param_ = reinterpret_cast<PoolingParameter *>(op_parameter_); } ~PoolingBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h index ea0852f5fe..be9f3d04b3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/prior_box.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class PriorBoxCPUKernel : public LiteKernel { public: PriorBoxCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), ctx_(ctx), thread_count_(ctx->thread_num_) { prior_box_param_ = reinterpret_cast<PriorBoxParameter *>(op_parameter_); } ~PriorBoxCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h b/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h index 1560bcb63b..4d6ee3308c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/quant_dtype_cast.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class QuantDTypeCastCPUKernel : public LiteKernel { public: QuantDTypeCastCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_num_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_num_(ctx->thread_num_) {} ~QuantDTypeCastCPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/random_standard_normal.h b/mindspore/lite/src/runtime/kernel/arm/base/random_standard_normal.h index f7a54caa6c..12d69fce50 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/random_standard_normal.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/random_standard_normal.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class RandomStandardNormalCPUKernel : public LiteKernel { public: RandomStandardNormalCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<RandomStandardNormalParam *>(parameter); } ~RandomStandardNormalCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.cc index afc2c83a16..42429f4ad2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.cc @@ -26,7 +26,6 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Reduce; namespace mindspore::kernel { namespace { @@ -104,7 +103,7 @@ int ReduceBaseCPUKernel::Init() { MS_LOG(ERROR) << "input axes invalid."; return RET_ERROR; } - memcpy(axes_, axes_ptr->MutableData(), axes_ptr->Size()); + memcpy(axes_, axes_ptr->data_c(), axes_ptr->Size()); } else { num_axes_ = reduce_param->num_axes_; memcpy(axes_, reduce_param->axes_, sizeof(reduce_param->axes_)); @@ -170,35 +169,4 @@ int ReduceBaseCPUKernel::ReSize() { CalculateInnerOuterSize(); return RET_OK; } - -kernel::LiteKernel *CpuReduceFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_Reduce); - if (opParameter == nullptr) { - MS_LOG(ERROR) << "Reduce opParameter nullptr"; - return nullptr; - } - if (desc.type != schema::PrimitiveType_Reduce) { - MS_LOG(ERROR) << "Reduce op desc.type should be PrimitiveType_Reduce, got " << desc.type; - free(opParameter); - return nullptr; - } - auto *kernel = new (std::nothrow) ReduceCPUKernel(opParameter, inputs, outputs, ctx, primitive); - if (kernel == nullptr) { - MS_LOG(ERROR) << "Reduce new ReduceCPUKernel failed."; - free(opParameter); - return nullptr; - } - auto ret = kernel->Init(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); - delete kernel; - return nullptr; - } - return kernel; -} } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.h b/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.h index 27fcbc8b33..5aa298f4a1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/reduce_base.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class ReduceBaseCPUKernel : public LiteKernel { public: ReduceBaseCPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(param, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(param, inputs, outputs, ctx) {} virtual ~ReduceBaseCPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h b/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h index 064e11dec1..2d573353b3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/reshape_base.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class ReshapeBaseCPUKernel : public LiteKernel { public: ReshapeBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~ReshapeBaseCPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc index 19efba6976..8d4de6d408 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.cc @@ -28,7 +28,7 @@ using mindspore::lite::RET_OK; namespace mindspore::kernel { namespace { -constexpr int kMaxInputNum = 2; +constexpr int kMaxInputNum = 4; constexpr int kOutputNum = 1; } // namespace @@ -44,7 +44,7 @@ int ResizeBaseCPUKernel::CheckParameters() { MS_LOG(ERROR) << "Resize method should be bilinear or nearest_neighbor, but got " << method_; return RET_INVALID_OP_ATTR; } - if (this->in_tensors_.size() == lite::kSingleNum) { + if (this->in_tensors_.size() == 1) { new_height_ = parameter->new_height_; if (new_height_ < 1) { MS_LOG(ERROR) << "Resize new_height should >= 1, but got " << new_height_; @@ -55,7 +55,7 @@ int ResizeBaseCPUKernel::CheckParameters() { MS_LOG(ERROR) << "Resize new_width should >= 1, but got " << new_width_; return RET_INVALID_OP_ATTR; } - } else if (this->in_tensors_.size() == lite::kDoubleNum) { + } else if (this->in_tensors_.size() == 2) { auto out_shape = this->in_tensors_.at(1)->data_c(); if (out_shape == nullptr) { MS_LOG(INFO) << "Out shape is not assigned"; @@ -78,7 +78,7 @@ int ResizeBaseCPUKernel::CheckParameters() { } int ResizeBaseCPUKernel::CheckInputsOuputs() { - if (in_tensors_.size() <= lite::kQuadrupleNum) { + if (in_tensors_.size() <= kMaxInputNum) { for (size_t i = 0; i < in_tensors_.size(); i++) { auto input = in_tensors_.at(i); if (input == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h index 4ec58c94c7..7647b5ceb8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/resize_base.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class ResizeBaseCPUKernel : public LiteKernel { public: ResizeBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} virtual ~ResizeBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/select.h b/mindspore/lite/src/runtime/kernel/arm/base/select.h index 47b99acd5e..8d26da529d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/select.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/select.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class SelectCPUKernel : public CarryDataKernel { public: SelectCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : CarryDataKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : CarryDataKernel(parameter, inputs, outputs, ctx) {} ~SelectCPUKernel() override = default; int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc index 475de022af..43c4bac9c5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.cc @@ -16,13 +16,12 @@ #include "src/runtime/kernel/arm/base/slice_base.h" #include "src/kernel_registry.h" #include "nnacl/base/slice_base.h" -#include "src/ops/slice.h" #include "src/tensor.h" using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Slice; +using mindspore::schema::PrimitiveType_SliceFusion; namespace mindspore::kernel { int SliceLaunch(void *cdata, int task_id) { @@ -35,19 +34,26 @@ int SliceLaunch(void *cdata, int task_id) { } int SliceCPUKernel::ReSize() { - auto primitive_slice = reinterpret_cast<const mindspore::lite::Slice *>(primitive_); - auto begin = primitive_slice->GetPostProcessBegin(); - auto size = primitive_slice->GetPostProcessSize(); + auto in_tensor = in_tensors_[0]; + auto begin_tensor = in_tensors_[1]; + auto size_tensor = in_tensors_[2]; - param_->param_length_ = in_tensors_.at(0)->shape().size(); + MS_ASSERT(in_tensor->shape().size() == begin_tensor->ElementsNum()); + MS_ASSERT(in_tensor->shape().size() == size_tensor->ElementsNum()); + MS_ASSERT(in_tensor->shape().size() <= DIMENSION_4D); + + auto begin = reinterpret_cast<int32_t *>(begin_tensor->data_c()); + auto size = reinterpret_cast<int32_t *>(size_tensor->data_c()); + + param_->param_length_ = in_tensor->shape().size(); if (param_->param_length_ > DIMENSION_4D) { MS_LOG(ERROR) << "input dimension num should <= " << DIMENSION_4D; return RET_ERROR; } for (int i = 0; i < param_->param_length_; ++i) { - param_->shape_[i] = in_tensors_.at(0)->DimensionSize(i); - param_->begin_[i] = begin.at(i); - param_->size_[i] = size.at(i) < 0 ? param_->shape_[i] - param_->begin_[i] : size.at(i); + param_->shape_[i] = in_tensor->DimensionSize(i); + param_->begin_[i] = begin[i]; + param_->size_[i] = size[i] < 0 ? param_->shape_[i] - param_->begin_[i] : size[i]; param_->end_[i] = param_->begin_[i] + param_->size_[i]; } if (param_->param_length_ < DIMENSION_4D) { @@ -89,7 +95,7 @@ int SliceCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Slice, LiteKernelCreator<SliceCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Slice, LiteKernelCreator<SliceCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Slice, LiteKernelCreator<SliceCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_SliceFusion, LiteKernelCreator<SliceCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_SliceFusion, LiteKernelCreator<SliceCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SliceFusion, LiteKernelCreator<SliceCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.h b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.h index 01e36fb73e..742efe71df 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/slice_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/slice_base.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class SliceCPUKernel : public LiteKernel { public: SliceCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<SliceParameter *>(op_parameter_); } ~SliceCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc index 956e77b2d2..1e32241839 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.cc @@ -26,7 +26,6 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_SoftMax; namespace mindspore::kernel { diff --git a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h index be5a638825..adca161db1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/softmax_base.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class SoftmaxBaseCPUKernel : public LiteKernel { public: SoftmaxBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), ctx_(ctx), thread_count_(ctx->thread_num_) { softmax_param_ = reinterpret_cast<SoftmaxParameter *>(op_parameter_); } ~SoftmaxBaseCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/split_base.h b/mindspore/lite/src/runtime/kernel/arm/base/split_base.h index 4a27e74c49..60a44e17ca 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/split_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/split_base.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class SplitBaseCPUKernel : public LiteKernel { public: SplitBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param = reinterpret_cast<SplitParameter *>(op_parameter_); } ~SplitBaseCPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/base/stack_base.h b/mindspore/lite/src/runtime/kernel/arm/base/stack_base.h index 4ea68271c2..9adefe4ede 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/stack_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/stack_base.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class StackBaseCPUKernel : public LiteKernel { public: StackBaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~StackBaseCPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc index aeed56df4a..f5dfdfd239 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.cc @@ -13,14 +13,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - #include "src/runtime/kernel/arm/base/strided_slice.h" #include <vector> #include "schema/model_generated.h" #include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" -#include "src/ops/populate/strided_slice_populate.h" using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; @@ -61,16 +59,6 @@ int StridedSliceCPUKernel::ReSize() { if (fast_run_) { InitFastRunParam(); } - if (op_parameter_ != nullptr) { - free(op_parameter_); - op_parameter_ = nullptr; - } - op_parameter_ = PopulateStridedSliceParameter(primitive_); - if (op_parameter_ == nullptr) { - MS_LOG(ERROR) << "Malloc parameter failed"; - return RET_ERROR; - } - param_ = reinterpret_cast<StridedSliceParameter *>(op_parameter_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.h b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.h index 2a2b76db76..60030db889 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/strided_slice.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class StridedSliceCPUKernel : public LiteKernel { public: StridedSliceCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<StridedSliceParameter *>(parameter); } ~StridedSliceCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/switch.cc b/mindspore/lite/src/runtime/kernel/arm/base/switch.cc index a5c89f26e3..180bfe7a53 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/switch.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/switch.cc @@ -30,7 +30,6 @@ int SwitchCPUKernel::PostProcess() { MS_ASSERT(bool_tensor != nullptr); MS_ASSERT(bool_tensor->data_type() == kNumberTypeBool); MS_ASSERT(bool_tensor->Size() == 1); - MS_ASSERT(bool_tensor->Size() == 1); auto active = static_cast<bool *>(bool_tensor->data_c()); if (active == nullptr) { MS_LOG(ERROR) << "data of bool tensor is nullptr"; @@ -68,7 +67,6 @@ int SwitchCPUKernel::Run() { MS_ASSERT(bool_tensor != nullptr); MS_ASSERT(bool_tensor->data_type() == kNumberTypeBool); MS_ASSERT(bool_tensor->Size() == 1); - MS_ASSERT(bool_tensor->Size() == 1); auto active = static_cast<bool *>(bool_tensor->data_c()); if (active == nullptr) { MS_LOG(ERROR) << "data of bool tensor is nullptr"; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/switch.h b/mindspore/lite/src/runtime/kernel/arm/base/switch.h index 66187bd416..f646a3fc20 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/switch.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/switch.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class SwitchCPUKernel : public CarryDataKernel { public: SwitchCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : CarryDataKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : CarryDataKernel(parameter, inputs, outputs, ctx) {} ~SwitchCPUKernel() override = default; int PostProcess() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/base/tile_base.cc b/mindspore/lite/src/runtime/kernel/arm/base/tile_base.cc index 8547ffd657..04f3aee09c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/tile_base.cc +++ b/mindspore/lite/src/runtime/kernel/arm/base/tile_base.cc @@ -21,7 +21,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Tile; +using mindspore::schema::PrimitiveType_TileFusion; namespace mindspore::kernel { namespace { @@ -147,7 +147,7 @@ int TileCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Tile, LiteKernelCreator<TileCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Tile, LiteKernelCreator<TileCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Tile, LiteKernelCreator<TileCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_TileFusion, LiteKernelCreator<TileCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_TileFusion, LiteKernelCreator<TileCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_TileFusion, LiteKernelCreator<TileCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/base/tile_base.h b/mindspore/lite/src/runtime/kernel/arm/base/tile_base.h index 743a12a568..86dc677142 100644 --- a/mindspore/lite/src/runtime/kernel/arm/base/tile_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/base/tile_base.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class TileCPUKernel : public LiteKernel { public: TileCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~TileCPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.h index 6e40578ffe..bfd2c544d8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/activation_fp16.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class ActivationFp16CPUKernel : public LiteKernel { public: ActivationFp16CPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(param, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(param, inputs, outputs, ctx), thread_count_(ctx->thread_num_) { type_ = (reinterpret_cast<ActivationParameter *>(param))->type_; alpha_ = (float16_t)((reinterpret_cast<ActivationParameter *>(param))->alpha_); min_val_ = (reinterpret_cast<ActivationParameter *>(param))->min_val_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_compare_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_compare_fp16.h index 98f300334a..47d8342429 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_compare_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_compare_fp16.h @@ -37,9 +37,8 @@ typedef struct { class ArithmeticCompareFP16CPUKernel : public LiteKernel { public: ArithmeticCompareFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<ArithmeticParameter *>(parameter); } ~ArithmeticCompareFP16CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc index 5ff560dc7b..e70d95b553 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.cc @@ -23,8 +23,8 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Add; -using mindspore::schema::PrimitiveType_Div; +using mindspore::schema::PrimitiveType_AddFusion; +using mindspore::schema::PrimitiveType_DivFusion; using mindspore::schema::PrimitiveType_Eltwise; using mindspore::schema::PrimitiveType_Equal; using mindspore::schema::PrimitiveType_FloorDiv; @@ -37,10 +37,10 @@ using mindspore::schema::PrimitiveType_LogicalAnd; using mindspore::schema::PrimitiveType_LogicalOr; using mindspore::schema::PrimitiveType_Maximum; using mindspore::schema::PrimitiveType_Minimum; -using mindspore::schema::PrimitiveType_Mul; +using mindspore::schema::PrimitiveType_MulFusion; using mindspore::schema::PrimitiveType_NotEqual; using mindspore::schema::PrimitiveType_SquaredDifference; -using mindspore::schema::PrimitiveType_Sub; +using mindspore::schema::PrimitiveType_SubFusion; namespace mindspore::kernel { int ArithmeticFP16CPUKernel::ReSize() { @@ -61,20 +61,20 @@ int ArithmeticFP16CPUKernel::CheckDataType() { return RET_OK; } -void ArithmeticFP16CPUKernel::InitRunFunction() { +void ArithmeticFP16CPUKernel::InitRunFunction(int primitive_type) { ARITHMETIC_FUNC_INFO_FP16 fun_table[] = { - {PrimitiveType_Mul, schema::ActivationType_RELU, ElementMulReluFp16, ElementOptMulReluFp16}, - {PrimitiveType_Mul, schema::ActivationType_RELU6, ElementMulRelu6Fp16, ElementOptMulRelu6Fp16}, - {PrimitiveType_Mul, schema::ActivationType_NO_ACTIVATION, ElementMulFp16, ElementOptMulFp16}, - {PrimitiveType_Add, schema::ActivationType_RELU, ElementAddReluFp16, ElementOptAddReluFp16}, - {PrimitiveType_Add, schema::ActivationType_RELU6, ElementAddRelu6Fp16, ElementOptAddRelu6Fp16}, - {PrimitiveType_Add, schema::ActivationType_NO_ACTIVATION, ElementAddFp16, ElementOptAddFp16}, - {PrimitiveType_Sub, schema::ActivationType_RELU, ElementSubReluFp16, ElementOptSubReluFp16}, - {PrimitiveType_Sub, schema::ActivationType_RELU6, ElementSubRelu6Fp16, ElementOptSubRelu6Fp16}, - {PrimitiveType_Sub, schema::ActivationType_NO_ACTIVATION, ElementSubFp16, ElementOptSubFp16}, - {PrimitiveType_Div, schema::ActivationType_RELU, ElementDivReluFp16, ElementOptDivReluFp16}, - {PrimitiveType_Div, schema::ActivationType_RELU6, ElementDivRelu6Fp16, ElementOptDivRelu6Fp16}, - {PrimitiveType_Div, schema::ActivationType_NO_ACTIVATION, ElementDivFp16, ElementOptDivFp16}, + {PrimitiveType_MulFusion, schema::ActivationType_RELU, ElementMulReluFp16, ElementOptMulReluFp16}, + {PrimitiveType_MulFusion, schema::ActivationType_RELU6, ElementMulRelu6Fp16, ElementOptMulRelu6Fp16}, + {PrimitiveType_MulFusion, schema::ActivationType_NO_ACTIVATION, ElementMulFp16, ElementOptMulFp16}, + {PrimitiveType_AddFusion, schema::ActivationType_RELU, ElementAddReluFp16, ElementOptAddReluFp16}, + {PrimitiveType_AddFusion, schema::ActivationType_RELU6, ElementAddRelu6Fp16, ElementOptAddRelu6Fp16}, + {PrimitiveType_AddFusion, schema::ActivationType_NO_ACTIVATION, ElementAddFp16, ElementOptAddFp16}, + {PrimitiveType_SubFusion, schema::ActivationType_RELU, ElementSubReluFp16, ElementOptSubReluFp16}, + {PrimitiveType_SubFusion, schema::ActivationType_RELU6, ElementSubRelu6Fp16, ElementOptSubRelu6Fp16}, + {PrimitiveType_SubFusion, schema::ActivationType_NO_ACTIVATION, ElementSubFp16, ElementOptSubFp16}, + {PrimitiveType_DivFusion, schema::ActivationType_RELU, ElementDivReluFp16, ElementOptDivReluFp16}, + {PrimitiveType_DivFusion, schema::ActivationType_RELU6, ElementDivRelu6Fp16, ElementOptDivRelu6Fp16}, + {PrimitiveType_DivFusion, schema::ActivationType_NO_ACTIVATION, ElementDivFp16, ElementOptDivFp16}, {PrimitiveType_FloorMod, schema::ActivationType_NO_ACTIVATION, ElementFloorModFp16, ElementOptFloorModFp16}, {PrimitiveType_FloorDiv, schema::ActivationType_NO_ACTIVATION, ElementFloorDivFp16, ElementOptFloorDivFp16}, {PrimitiveType_LogicalAnd, schema::ActivationType_NO_ACTIVATION, ElementLogicalAndFp16, ElementOptLogicalAndFp16}, @@ -86,8 +86,7 @@ void ArithmeticFP16CPUKernel::InitRunFunction() { size_t length = sizeof(fun_table) / sizeof(ARITHMETIC_FUNC_INFO_FP16); for (size_t i = 0; i < length; i++) { - if (fun_table[i].primitive_type_ == param_->op_parameter_.type_ && - fun_table[i].activation_type_ == param_->activation_type_) { + if (fun_table[i].primitive_type_ == primitive_type && fun_table[i].activation_type_ == param_->activation_type_) { arithmetic_opt_func_ = fun_table[i].opt_func_; arithmetic_func_ = fun_table[i].func_; return; @@ -173,10 +172,10 @@ void ArithmeticFP16CPUKernel::FreeFp16Buffer() { } } -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Mul, LiteKernelCreator<ArithmeticFP16CPUKernel>) -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Add, LiteKernelCreator<ArithmeticFP16CPUKernel>) -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Sub, LiteKernelCreator<ArithmeticFP16CPUKernel>) -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Div, LiteKernelCreator<ArithmeticFP16CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_MulFusion, LiteKernelCreator<ArithmeticFP16CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_AddFusion, LiteKernelCreator<ArithmeticFP16CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_SubFusion, LiteKernelCreator<ArithmeticFP16CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_DivFusion, LiteKernelCreator<ArithmeticFP16CPUKernel>) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_FloorMod, LiteKernelCreator<ArithmeticFP16CPUKernel>) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_FloorDiv, LiteKernelCreator<ArithmeticFP16CPUKernel>) REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_LogicalAnd, LiteKernelCreator<ArithmeticFP16CPUKernel>) diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h index bec8a0d620..5b25ffc7db 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_fp16.h @@ -35,15 +35,14 @@ typedef struct { class ArithmeticFP16CPUKernel : public ArithmeticCPUKernel { public: ArithmeticFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ArithmeticCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ArithmeticCPUKernel(parameter, inputs, outputs, ctx) {} ~ArithmeticFP16CPUKernel() = default; int ReSize() override; int Run() override; private: - void InitRunFunction() override; + void InitRunFunction(int primitive_type) override; int CheckDataType() override; int ConstTensorBroadCast() override; void TileConstTensor(const void *in_data, void *out_data, size_t ndim, const int *in_shape, const int *in_strides, diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_self_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_self_fp16.h index f30bfe0734..96e0ba04be 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_self_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/arithmetic_self_fp16.h @@ -24,9 +24,8 @@ typedef int (*ArithmeticSelfFp16Func)(float16_t *input, float16_t *output, int e class ArithmeticSelfFp16CPUKernel : public ArithmeticSelfCPUKernel { public: explicit ArithmeticSelfFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ArithmeticSelfCPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ArithmeticSelfCPUKernel(parameter, inputs, outputs, ctx) { fp16_func_ = GetArithmeticSelfFp16Fun(parameter->type_); } ~ArithmeticSelfFp16CPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.h index 253e7a01c3..08cb5a2f0b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/batchnorm_fp16.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class BatchnormFp16CPUKernel : public BatchnormCPUKernel { public: BatchnormFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : BatchnormCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : BatchnormCPUKernel(parameter, inputs, outputs, ctx) {} virtual ~BatchnormFp16CPUKernel() {} int Run() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/bias_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/bias_fp16.h index 525600551b..6d00b75082 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/bias_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/bias_fp16.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class BiasCPUFp16Kernel : public LiteKernel { public: BiasCPUFp16Kernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { bias_param_ = reinterpret_cast<ArithmeticParameter *>(parameter); } ~BiasCPUFp16Kernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.h index 4db5d283b0..7264686e4c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/cast_fp16.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class CastFp16CPUKernel : public LiteKernel { public: CastFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~CastFp16CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h index d05374468a..6d0ce99b3d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/concat_fp16.h @@ -31,9 +31,8 @@ namespace mindspore::kernel { class ConcatFp16CPUKernel : public LiteKernel { public: ConcatFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { concat_param_ = reinterpret_cast<ConcatParameter *>(op_parameter_); } ~ConcatFp16CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.cc index 3c2ad86598..b9152a4804 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.cc @@ -19,17 +19,12 @@ #include "nnacl/fp16/cast_fp16.h" #include "nnacl/fp16/pack_fp16.h" #include "src/runtime/kernel/arm/fp16/layout_transform_fp16.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Conv2D; namespace mindspore::kernel { int Convolution1x1FP16CPUKernel::InitMatmulParam() { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h index 32a6c58d8a..e08274273e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h @@ -29,10 +29,9 @@ namespace mindspore::kernel { class Convolution1x1FP16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: Convolution1x1FP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive, void *origin_weight, void *origin_bias, - TypeId origin_weight_data_type, TypeId origin_bias_data_type) - : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive), + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, void *origin_weight, + void *origin_bias, TypeId origin_weight_data_type, TypeId origin_bias_data_type) + : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx), origin_weight_(origin_weight), origin_bias_(origin_bias), origin_weight_data_type_(origin_weight_data_type), diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.cc index 510bcf7b83..d785529560 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.cc @@ -17,8 +17,6 @@ #include "src/runtime/kernel/arm/fp16/convolution_base_fp16.h" #include "nnacl/fp16/cast_fp16.h" #include "src/runtime/kernel/arm/fp16/common_fp16.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.h index c166e94a12..5a296e3bf0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_base_fp16.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class ConvolutionBaseFP16CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionBaseFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionBaseFP16CPUKernel() override; int Init() override { return mindspore::lite::RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_delegate_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_delegate_fp16.cc index 2c12aaed5b..349c29a2cc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_delegate_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_delegate_fp16.cc @@ -21,6 +21,8 @@ #include "src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h" #include "src/runtime/kernel/arm/fp16/convolution_1x1_fp16.h" #include "src/runtime/kernel/arm/fp16/group_convolution_fp16.h" +#include "src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h" +#include "src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h" #include "schema/model_generated.h" #include "src/kernel_registry.h" #include "include/errorcode.h" @@ -30,7 +32,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Conv2D; +using mindspore::schema::PrimitiveType_Conv2DFusion; using mindspore::schema::Format::Format_NHWC; namespace mindspore::kernel { @@ -86,9 +88,8 @@ int ConvolutionDelegateFP16CPUKernel::ReSize() { SetInputOutputShapeInfo(reinterpret_cast<ConvParameter *>(op_parameter_), in_tensors_.front(), out_tensors_.front(), context_); if (fp16_conv_kernel_ == nullptr) { - fp16_conv_kernel_ = - CpuConvFp16KernelSelect(in_tensors_, out_tensors_, op_parameter_, context_, primitive_, origin_weight_, - origin_bias_, origin_weight_data_type_, origin_bias_data_type_); + fp16_conv_kernel_ = CpuConvFp16KernelSelect(in_tensors_, out_tensors_, op_parameter_, context_, origin_weight_, + origin_bias_, origin_weight_data_type_, origin_bias_data_type_); if (fp16_conv_kernel_ == nullptr) { MS_LOG(ERROR) << "Selecting execute kernel failed for conv_kernel, got a nullptr."; return RET_ERROR; @@ -111,25 +112,22 @@ ConvParameter *CreateNewConvParameterFp16(ConvParameter *parameter) { kernel::LiteKernel *CpuConvFp16KernelSelect(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, - const lite::InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive, - void *origin_weight, void *origin_bias, TypeId origin_weight_data_type, - TypeId origin_bias_data_type) { + const lite::InnerContext *ctx, void *origin_weight, void *origin_bias, + TypeId origin_weight_data_type, TypeId origin_bias_data_type) { auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); bool use_winograd = false; int out_unit; CheckIfUseWinogradFp16(&use_winograd, &out_unit, conv_param); kernel::LiteKernel *kernel = nullptr; if (conv_param->kernel_h_ == 1 && conv_param->kernel_w_ == 1) { - kernel = new (std::nothrow) - kernel::Convolution1x1FP16CPUKernel(op_parameter, inputs, outputs, ctx, primitive, origin_weight, origin_bias, - origin_weight_data_type, origin_bias_data_type); + kernel = new (std::nothrow) kernel::Convolution1x1FP16CPUKernel( + op_parameter, inputs, outputs, ctx, origin_weight, origin_bias, origin_weight_data_type, origin_bias_data_type); } else if (use_winograd) { kernel = new (std::nothrow) kernel::ConvolutionWinogradFP16CPUKernel( - op_parameter, inputs, outputs, ctx, primitive, out_unit, origin_weight, origin_bias, origin_bias_data_type); + op_parameter, inputs, outputs, ctx, out_unit, origin_weight, origin_bias, origin_bias_data_type); } else { - kernel = - new (std::nothrow) kernel::ConvolutionFP16CPUKernel(op_parameter, inputs, outputs, ctx, primitive, origin_weight, - origin_bias, origin_weight_data_type, origin_bias_data_type); + kernel = new (std::nothrow) kernel::ConvolutionFP16CPUKernel( + op_parameter, inputs, outputs, ctx, origin_weight, origin_bias, origin_weight_data_type, origin_bias_data_type); } // Once kernel is selected, init func will invoke InitWeightAndBias auto ret = kernel->Init(); @@ -212,15 +210,14 @@ static lite::Tensor *CreateOutputTensorFp16(const std::vector<int> &out_shape, kernel::LiteKernel *CreateDelegateConvFp16(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, - const InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive) { - return new (std::nothrow) kernel::ConvolutionDelegateFP16CPUKernel(op_parameter, inputs, outputs, ctx, primitive); + const InnerContext *ctx) { + return new (std::nothrow) kernel::ConvolutionDelegateFP16CPUKernel(op_parameter, inputs, outputs, ctx); } kernel::LiteKernel *CpuGroupConvFp16KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, - const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) { - bool infer_flag = (primitive != nullptr && primitive->infer_flag()); + const InnerContext *ctx) { + bool infer_flag = op_parameter->infer_flag_; auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); // update new shape info for each sub kernel int new_in_channel = inputs.at(kWeightIndex)->Channel(); @@ -298,26 +295,54 @@ kernel::LiteKernel *CpuGroupConvFp16KernelCreator(const std::vector<lite::Tensor } new_outputs.emplace_back(out_tensor); } - group_convs.emplace_back(CreateDelegateConvFp16( - new_inputs, new_outputs, reinterpret_cast<OpParameter *>(new_conv_parameter), ctx, primitive)); + group_convs.emplace_back( + CreateDelegateConvFp16(new_inputs, new_outputs, reinterpret_cast<OpParameter *>(new_conv_parameter), ctx)); } return new (std::nothrow) - GroupConvolutionFP16CPUKernel(op_parameter, inputs, outputs, ctx, primitive, group_convs, conv_param->group_); + GroupConvolutionFP16CPUKernel(op_parameter, inputs, outputs, ctx, group_convs, conv_param->group_); +} + +kernel::LiteKernel *CpuConvDwFp16KernelCreator(const std::vector<lite::Tensor *> &inputs, + const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, + const InnerContext *ctx, const kernel::KernelKey &desc) { + MS_ASSERT(opParameter != nullptr); + + auto conv_param = reinterpret_cast<ConvParameter *>(opParameter); + kernel::LiteKernel *kernel; + if (conv_param->input_channel_ < 32) { + kernel = new (std::nothrow) kernel::ConvolutionDepthwiseSWFp16CPUKernel(opParameter, inputs, outputs, ctx); + } else { + kernel = new (std::nothrow) kernel::ConvolutionDepthwiseFp16CPUKernel(opParameter, inputs, outputs, ctx); + } + if (kernel == nullptr) { + MS_LOG(ERROR) << "kernel is nullptr."; + free(opParameter); + return nullptr; + } + auto ret = kernel->Init(); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " + << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); + delete kernel; + return nullptr; + } + return kernel; } kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const InnerContext *ctx, const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_Conv2D); + MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DFusion); auto conv_param = reinterpret_cast<ConvParameter *>(opParameter); kernel::LiteKernel *kernel = nullptr; if (conv_param->group_ == 1) { - kernel = CreateDelegateConvFp16(inputs, outputs, opParameter, ctx, primitive); + kernel = CreateDelegateConvFp16(inputs, outputs, opParameter, ctx); + } else if (conv_param->group_ == conv_param->input_channel_ && conv_param->group_ == conv_param->output_channel_) { + kernel = CpuConvDwFp16KernelCreator(inputs, outputs, opParameter, ctx, desc); } else { - kernel = CpuGroupConvFp16KernelCreator(inputs, outputs, opParameter, ctx, primitive); + kernel = CpuGroupConvFp16KernelCreator(inputs, outputs, opParameter, ctx); } if (kernel == nullptr) { @@ -335,5 +360,6 @@ kernel::LiteKernel *CpuConvFp16KernelCreator(const std::vector<lite::Tensor *> & } return kernel; } -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Conv2D, CpuConvFp16KernelCreator) + +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Conv2DFusion, CpuConvFp16KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_delegate_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_delegate_fp16.h index e7f9467c92..a4e66c3ed7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_delegate_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_delegate_fp16.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class ConvolutionDelegateFP16CPUKernel : public LiteKernel { public: ConvolutionDelegateFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionDelegateFP16CPUKernel() override { FreeCopiedData(); if (fp16_conv_kernel_ != nullptr) { @@ -60,9 +59,8 @@ class ConvolutionDelegateFP16CPUKernel : public LiteKernel { kernel::LiteKernel *CpuConvFp16KernelSelect(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, - const lite::InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive, - void *origin_weight, void *origin_bias, TypeId origin_weight_data_type, - TypeId origin_bias_data_type); + const lite::InnerContext *ctx, void *origin_weight, void *origin_bias, + TypeId origin_weight_data_type, TypeId origin_bias_data_type); } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_CONVOLUTION_DELEGATE_FP16_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc index 69113a6d0c..17e3ec4626 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.cc @@ -15,19 +15,13 @@ */ #include "src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h" -#include "src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h" #include "nnacl/fp16/pack_fp16.h" #include "nnacl/fp16/cast_fp16.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DepthwiseConv2D; namespace mindspore::kernel { ConvolutionDepthwiseFp16CPUKernel::~ConvolutionDepthwiseFp16CPUKernel() { @@ -127,35 +121,4 @@ int ConvolutionDepthwiseFp16CPUKernel::Run() { return ret; } -kernel::LiteKernel *CpuConvDwFp16KernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_DepthwiseConv2D); - - auto conv_param = reinterpret_cast<ConvParameter *>(opParameter); - kernel::LiteKernel *kernel; - if (conv_param->input_channel_ < 32) { - kernel = - new (std::nothrow) kernel::ConvolutionDepthwiseSWFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive); - } else { - kernel = new (std::nothrow) kernel::ConvolutionDepthwiseFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive); - } - if (kernel == nullptr) { - MS_LOG(ERROR) << "kernel is nullptr."; - free(opParameter); - return nullptr; - } - auto ret = kernel->Init(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); - delete kernel; - return nullptr; - } - return kernel; -} - -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_DepthwiseConv2D, CpuConvDwFp16KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h index a028707707..f59863ceb5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_fp16.h @@ -35,9 +35,8 @@ namespace mindspore::kernel { class ConvolutionDepthwiseFp16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: ConvolutionDepthwiseFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionDepthwiseFp16CPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc index 4179749623..adb47b9d86 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.cc @@ -17,16 +17,11 @@ #include "src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h" #include "nnacl/fp16/pack_fp16.h" #include "nnacl/fp16/cast_fp16.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DepthwiseConv2D; namespace mindspore::kernel { ConvolutionDepthwiseSWFp16CPUKernel::~ConvolutionDepthwiseSWFp16CPUKernel() { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h index 7f44731930..6bf78f8859 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_depthwise_slidewindow_fp16.h @@ -36,9 +36,8 @@ namespace mindspore::kernel { class ConvolutionDepthwiseSWFp16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: ConvolutionDepthwiseSWFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionDepthwiseSWFp16CPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc index 6c7247ca03..67f4cb3d66 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.cc @@ -17,8 +17,6 @@ #include "src/runtime/kernel/arm/fp16/convolution_fp16.h" #include <vector> #include "include/errorcode.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "src/runtime/runtime_api.h" #include "nnacl/fp16/conv_fp16.h" #include "nnacl/fp16/matmul_fp16.h" @@ -26,12 +24,8 @@ #include "nnacl/fp16/pack_fp16.h" #include "nnacl/fp16/winograd_utils_fp16.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Conv2D; -using mindspore::schema::Format::Format_NHWC; namespace mindspore::kernel { int ConvolutionFP16CPUKernel::InitWeightBias() { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.h index 68d3d5e31a..bdc5f074d8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_fp16.h @@ -26,10 +26,9 @@ namespace mindspore::kernel { class ConvolutionFP16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: ConvolutionFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive, void *origin_weight, void *origin_bias, - TypeId origin_weight_data_type, TypeId origin_bias_data_type) - : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive), + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, void *origin_weight, + void *origin_bias, TypeId origin_weight_data_type, TypeId origin_bias_data_type) + : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx), origin_weight_(origin_weight), origin_bias_(origin_bias), origin_weight_data_type_(origin_weight_data_type), diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h index 680be2cfed..0bb273a582 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/convolution_winograd_fp16.h @@ -30,10 +30,9 @@ namespace mindspore::kernel { class ConvolutionWinogradFP16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: ConvolutionWinogradFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive, int out_unit, void *origin_weight, - void *origin_bias, TypeId origin_bias_data_type) - : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive), + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, int out_unit, + void *origin_weight, void *origin_bias, TypeId origin_bias_data_type) + : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx), output_unit_(out_unit), origin_weight_(origin_weight), origin_bias_(origin_bias), diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.h index e6fcc7fc77..4568c41fd3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/crop_fp16.h @@ -30,9 +30,8 @@ namespace mindspore::kernel { class CropFp16CPUKernel : public CropBaseCPUKernel { public: CropFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : CropBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : CropBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~CropFp16CPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc index 9607a9c4e2..d35f867b8e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.cc @@ -16,16 +16,11 @@ #include "src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h" #include "nnacl/fp16/pack_fp16.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DeDepthwiseConv2D; namespace mindspore::kernel { DeconvolutionDepthwiseFp16CPUKernel::~DeconvolutionDepthwiseFp16CPUKernel() { @@ -197,29 +192,4 @@ void DeconvolutionDepthwiseFp16CPUKernel::FreePackedInputOutput() { packed_output_ = nullptr; } } - -kernel::LiteKernel *CpuDeconvDwFp16KernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_DeDepthwiseConv2D); - - auto kernel = new (std::nothrow) DeconvolutionDepthwiseFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive); - if (kernel == nullptr) { - MS_LOG(ERROR) << "kernel is nullptr."; - free(opParameter); - return nullptr; - } - auto ret = kernel->Init(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); - delete kernel; - return nullptr; - } - return kernel; -} - -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_DeDepthwiseConv2D, CpuDeconvDwFp16KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h index 71f81d5e98..e49618c6e1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h @@ -37,9 +37,8 @@ namespace mindspore::kernel { class DeconvolutionDepthwiseFp16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: DeconvolutionDepthwiseFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx) {} ~DeconvolutionDepthwiseFp16CPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc index c65d64f26b..5363050c6e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.cc @@ -16,6 +16,7 @@ #include "src/runtime/kernel/arm/fp16/deconvolution_fp16.h" #include "src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.h" +#include "src/runtime/kernel/arm/fp16/deconvolution_depthwise_fp16.h" #include "src/runtime/runtime_api.h" using mindspore::kernel::KERNEL_ARCH::kCPU; @@ -23,7 +24,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DeConv2D; +using mindspore::schema::PrimitiveType_Conv2dTransposeFusion; namespace mindspore::kernel { DeConvolutionFp16CPUKernel::~DeConvolutionFp16CPUKernel() { @@ -210,34 +211,42 @@ int DeConvolutionFp16CPUKernel::Run() { } kernel::LiteKernel *CpuDeConvFp16KernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_DeConv2D); - - kernel::LiteKernel *kernel; - auto conv_param = reinterpret_cast<ConvParameter *>(opParameter); - if ((conv_param->stride_h_ != 1 || conv_param->stride_w_ != 1) && - (conv_param->dilation_w_ == 1 && conv_param->dilation_h_ == 1)) { - kernel = new (std::nothrow) kernel::DeConvWinogradFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive); + const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { + MS_ASSERT(op_parameter != nullptr); + MS_ASSERT(desc.type == schema::PrimitiveType_Conv2dTransposeFusion); + + kernel::LiteKernel *kernel = nullptr; + auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); + + if (conv_param->group_ == 1) { + if ((conv_param->stride_h_ != 1 || conv_param->stride_w_ != 1) && + (conv_param->dilation_w_ == 1 && conv_param->dilation_h_ == 1)) { + kernel = new (std::nothrow) kernel::DeConvWinogradFp16CPUKernel(op_parameter, inputs, outputs, ctx); + } else { + kernel = new (std::nothrow) kernel::DeConvolutionFp16CPUKernel(op_parameter, inputs, outputs, ctx); + } + } else if (conv_param->group_ == conv_param->input_channel_ && conv_param->group_ == conv_param->output_channel_) { + kernel = new (std::nothrow) DeconvolutionDepthwiseFp16CPUKernel(op_parameter, inputs, outputs, ctx); } else { - kernel = new (std::nothrow) kernel::DeConvolutionFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive); + MS_LOG(ERROR) << "deconv do not support group deconv!"; + kernel = nullptr; } if (kernel == nullptr) { MS_LOG(ERROR) << "kernel is nullptr."; - free(opParameter); + free(op_parameter); return nullptr; } auto ret = kernel->Init(); if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); + MS_LOG(ERROR) << "Init kernel failed, name: " << op_parameter->name_ << ", type: " + << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_)); delete kernel; return nullptr; } return kernel; } -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_DeConv2D, CpuDeConvFp16KernelCreator) + +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Conv2dTransposeFusion, CpuDeConvFp16KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.h index 4911fdb320..54e9ddcdef 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_fp16.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class DeConvolutionFp16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: DeConvolutionFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx) {} ~DeConvolutionFp16CPUKernel() override; int Init() override; int Run() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.cc index 7b39a6b8b0..c3ad9aa89a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.cc @@ -20,11 +20,8 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DeConv2D; -using mindspore::schema::Format::Format_NHWC; namespace mindspore::kernel { - DeConvWinogradFp16CPUKernel::~DeConvWinogradFp16CPUKernel() { FreeResizeBuf(); FreeDeconvParam(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.h index 3eeaad2f77..1b4220415c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/deconvolution_winograd_fp16.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class DeConvWinogradFp16CPUKernel : public ConvolutionBaseFP16CPUKernel { public: DeConvWinogradFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionBaseFP16CPUKernel(parameter, inputs, outputs, ctx) {} ~DeConvWinogradFp16CPUKernel() override; int Init() override; int Run() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.h index 38bbce8ce5..6206d6b369 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/fullconnection_fp16.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class FullconnectionFP16CPUKernel : public MatmulBaseFP16CPUKernel { public: explicit FullconnectionFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : MatmulBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : MatmulBaseFP16CPUKernel(parameter, inputs, outputs, ctx) {} ~FullconnectionFP16CPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/fused_batchnorm_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/fused_batchnorm_fp16.h index 67f3410546..dc77f74daf 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/fused_batchnorm_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/fused_batchnorm_fp16.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class FusedBatchnormFp16CPUKernel : public FusedBatchnormCPUKernel { public: FusedBatchnormFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : FusedBatchnormCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : FusedBatchnormCPUKernel(parameter, inputs, outputs, ctx) {} virtual ~FusedBatchnormFp16CPUKernel() {} virtual int DoExecute(int task_id); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc index c0e379d367..735a0e881b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.cc @@ -20,6 +20,7 @@ #include "src/kernel_registry.h" #include "src/runtime/runtime_api.h" #include "nnacl/fp16/cast_fp16.h" +#include "src/runtime/infer_manager.h" using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; @@ -40,10 +41,10 @@ int GatherFp16CPUKernel::ReSize() { return RET_OK; } int GatherFp16CPUKernel::PreProcess() { if (!InferShapeDone()) { - (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->set_infer_flag(true); - auto ret = (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->InferShape(in_tensors_, out_tensors_); + op_parameter_->infer_flag_ = true; + auto ret = lite::KernelInferShape(in_tensors_, &out_tensors_, op_parameter_); if (ret != 0) { - (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->set_infer_flag(false); + op_parameter_->infer_flag_ = false; MS_LOG(ERROR) << "InferShape fail!"; return ret; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.h index 507be6ecc5..5092277749 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/gather_fp16.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class GatherFp16CPUKernel : public LiteKernel { public: GatherFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~GatherFp16CPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.cc index e516641786..da773850ee 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.cc @@ -18,12 +18,12 @@ #include "schema/model_generated.h" #include "src/kernel_registry.h" #include "include/errorcode.h" +#include "src/runtime/infer_manager.h" using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Conv2D; namespace mindspore::kernel { int GroupConvolutionFP16CPUKernel::Init() { @@ -73,13 +73,14 @@ void GroupConvolutionFP16CPUKernel::FreeSubKernel() { int GroupConvolutionFP16CPUKernel::PreProcess() { if (!InferShapeDone()) { - auto ret = (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->InferShape(in_tensors_, out_tensors_); - if (ret != RET_OK) { - (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->set_infer_flag(false); + op_parameter_->infer_flag_ = true; + + auto ret = lite::KernelInferShape(in_tensors_, &out_tensors_, op_parameter_); + if (ret != 0) { + op_parameter_->infer_flag_ = false; MS_LOG(ERROR) << "InferShape fail!"; return ret; } - (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->set_infer_flag(true); // if infershape func is called in runtime stage, we should malloc memory and set shape info for outputs of sub // kernels here. @@ -145,7 +146,7 @@ int GroupConvolutionFP16CPUKernel::SeparateInput(int group_id) { return RET_ERROR; } if (!(in_data_type == kNumberTypeFloat32 || in_data_type == kNumberTypeFloat16)) { - MS_LOG(ERROR) << "Invaild data type."; + MS_LOG(ERROR) << "Invalid data type."; return RET_ERROR; } if (in_tensors_.front()->data_type() == kNumberTypeFloat16) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.h index dddbcc6b20..010bc249da 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/group_convolution_fp16.h @@ -29,9 +29,8 @@ class GroupConvolutionFP16CPUKernel : public ConvolutionBaseCPUKernel { public: GroupConvolutionFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive, std::vector<kernel::LiteKernel *> group_convs, const int group_num) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive), + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx), group_convs_(std::move(group_convs)), group_num_(group_num) {} // opParameter(in channel, out channel) in this kernel has been split to groups, if // you want to get real params, multiply in channel / out channel with group num diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/gru_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/gru_fp16.cc index c391993d68..0045f98c3d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/gru_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/gru_fp16.cc @@ -26,7 +26,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Gru; +using mindspore::schema::PrimitiveType_GRU; namespace mindspore::kernel { void GruFp16CPUKernel::FreeTmpBuffer() { @@ -264,5 +264,5 @@ int GruFp16CPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Gru, LiteKernelCreator<GruFp16CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_GRU, LiteKernelCreator<GruFp16CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/gru_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/gru_fp16.h index 8529c80a5b..a11bada349 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/gru_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/gru_fp16.h @@ -23,9 +23,8 @@ namespace mindspore::kernel { class GruFp16CPUKernel : public LiteKernel { public: GruFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { gru_param_ = reinterpret_cast<GruParameter *>(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/instance_norm_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/instance_norm_fp16.h index b4b865b3ef..c7f5645761 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/instance_norm_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/instance_norm_fp16.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class InstanceNormFp16CPUKernel : public LiteKernel { public: InstanceNormFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<InstanceNormParameter *>(parameter); } ~InstanceNormFp16CPUKernel() override { FreeTmpBuffer(); }; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/lstm_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/lstm_fp16.cc index 669fb8db58..c25382a4a2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/lstm_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/lstm_fp16.cc @@ -26,7 +26,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Lstm; +using mindspore::schema::PrimitiveType_LSTM; namespace mindspore::kernel { void LstmFp16CPUKernel::FreeTmpBuffer() { @@ -52,7 +52,9 @@ void LstmFp16CPUKernel::FreeTmpBuffer() { void LstmFp16CPUKernel::FreeRunBuffer() { context_->allocator->Free(gate_buffer_); - context_->allocator->Free(state_buffer_); + for (int i = 0; i < 2; i++) { + context_->allocator->Free(state_buffer_[i]); + } if (!is_vec_) { for (int i = 0; i < 2; i++) { context_->allocator->Free(matmul_buffer_[i]); @@ -223,11 +225,21 @@ int LstmFp16CPUKernel::MallocRunBuffer() { MS_LOG(ERROR) << "LstmFp16CPUKernel malloc gate_buffer error."; return RET_ERROR; } - if (!(lstm_param_->smooth_ >= -FLT_EPSILON && lstm_param_->smooth_ <= FLT_EPSILON)) { - int buffer_size = 2 * lstm_param_->batch_ * lstm_param_->hidden_size_ * sizeof(float16_t); - state_buffer_ = reinterpret_cast<float16_t *>(context_->allocator->Malloc(buffer_size)); - if (state_buffer_ == nullptr) { - MS_LOG(ERROR) << "LstmFp16CPUKernel malloc state_buffer error."; + state_buffer_[0] = nullptr; + state_buffer_[1] = nullptr; + if (!(lstm_param_->zoneout_cell_ >= -FLT_EPSILON && lstm_param_->zoneout_cell_ <= FLT_EPSILON)) { + int buffer_size = lstm_param_->batch_ * lstm_param_->hidden_size_ * sizeof(float16_t); + state_buffer_[0] = reinterpret_cast<float16_t *>(context_->allocator->Malloc(buffer_size)); + if (state_buffer_[0] == nullptr) { + MS_LOG(ERROR) << "LstmFp16CPUKernel malloc state_buffer for cell error."; + return RET_ERROR; + } + } + if (!(lstm_param_->zoneout_hidden_ >= -FLT_EPSILON && lstm_param_->zoneout_hidden_ <= FLT_EPSILON)) { + int buffer_size = lstm_param_->batch_ * lstm_param_->hidden_size_ * sizeof(float16_t); + state_buffer_[1] = reinterpret_cast<float16_t *>(context_->allocator->Malloc(buffer_size)); + if (state_buffer_[1] == nullptr) { + MS_LOG(ERROR) << "LstmFp16CPUKernel malloc state_buffer for hidden error."; return RET_ERROR; } } @@ -270,5 +282,5 @@ int LstmFp16CPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Lstm, LiteKernelCreator<LstmFp16CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_LSTM, LiteKernelCreator<LstmFp16CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/lstm_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/lstm_fp16.h index 0cc7e69ed0..afa6ceae50 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/lstm_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/lstm_fp16.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class LstmFp16CPUKernel : public LiteKernel { public: LstmFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { lstm_param_ = reinterpret_cast<LstmParameter *>(op_parameter_); } @@ -46,7 +45,7 @@ class LstmFp16CPUKernel : public LiteKernel { int MallocRunBuffer(); float16_t *gate_buffer_ = nullptr; - float16_t *state_buffer_ = nullptr; + float16_t *state_buffer_[2]; float16_t *weight_i_ptr_ = nullptr; float16_t *weight_h_ptr_ = nullptr; float16_t *bias_ptr_ = nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_base_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_base_fp16.h index 92eb91b784..f2538a31e1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_base_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_base_fp16.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class MatmulBaseFP16CPUKernel : public LiteKernel { public: explicit MatmulBaseFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { params_ = reinterpret_cast<MatMulParameter *>(op_parameter_); } ~MatmulBaseFP16CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.h index 7a0f8980de..a8e7d5b8a7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/matmul_fp16.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class MatmulFP16CPUKernel : public MatmulBaseFP16CPUKernel { public: explicit MatmulFP16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : MatmulBaseFP16CPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : MatmulBaseFP16CPUKernel(parameter, inputs, outputs, ctx) {} ~MatmulFP16CPUKernel() override = default; int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/pad_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/pad_fp16.cc index 76e6ee8686..3665dbaab8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/pad_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/pad_fp16.cc @@ -23,7 +23,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Pad; +using mindspore::schema::PrimitiveType_PadFusion; namespace mindspore::kernel { namespace { @@ -76,5 +76,5 @@ int PadFp16CPUKernel::Run() { return ret; } -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Pad, LiteKernelCreator<PadFp16CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_PadFusion, LiteKernelCreator<PadFp16CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/pad_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/pad_fp16.h index 660fd0415b..2e0d0d69f3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/pad_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/pad_fp16.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class PadFp16CPUKernel : public PadCPUKernel { public: PadFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : PadCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : PadCPUKernel(parameter, inputs, outputs, ctx) {} ~PadFp16CPUKernel() {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc index db2a68666b..06dc0fb5d8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.cc @@ -26,7 +26,8 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Pooling; +using mindspore::schema::PrimitiveType_AvgPoolFusion; +using mindspore::schema::PrimitiveType_MaxPoolFusion; namespace mindspore::kernel { int PoolingFp16CPUKernel::Init() { @@ -97,5 +98,6 @@ int PoolingFp16CPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Pooling, LiteKernelCreator<PoolingFp16CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_AvgPoolFusion, LiteKernelCreator<PoolingFp16CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_MaxPoolFusion, LiteKernelCreator<PoolingFp16CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.h index 9bab2bb7a1..52341d4df1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/pooling_fp16.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class PoolingFp16CPUKernel : public PoolingBaseCPUKernel { public: PoolingFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : PoolingBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : PoolingBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~PoolingFp16CPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.cc index 1ecde5f254..acf0f00e89 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.cc @@ -176,13 +176,12 @@ int QuantDTypeCastFp16CPUKernel::Run() { kernel::LiteKernel *CpuQuantDTypeCastFp16KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const kernel::KernelKey &desc) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Input opParameter is nullptr!"; return nullptr; } - auto *kernel = new (std::nothrow) QuantDTypeCastFp16CPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) QuantDTypeCastFp16CPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new QuantDTypeCastFp16CPUKernel fail!"; free(opParameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.h index bd54faa0a4..62257d0e75 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/quant_dtype_cast_fp16.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class QuantDTypeCastFp16CPUKernel : public LiteKernel { public: QuantDTypeCastFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_num_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_num_(ctx->thread_num_) {} ~QuantDTypeCastFp16CPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc index 2f5a923111..2a5928b02d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.cc @@ -28,7 +28,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Reduce; +using mindspore::schema::PrimitiveType_ReduceFusion; using mindspore::schema::ReduceMode; using mindspore::schema::ReduceMode_ReduceMax; using mindspore::schema::ReduceMode_ReduceMean; @@ -142,5 +142,5 @@ int ReduceFp16CPUKernel::MallocTmpBuffer() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Reduce, LiteKernelCreator<ReduceFp16CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_ReduceFusion, LiteKernelCreator<ReduceFp16CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h index a7c3fbbc1a..d1a5d3a501 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/reduce_fp16.h @@ -31,9 +31,8 @@ class ReduceFp16CPUKernel : public ReduceBaseCPUKernel { public: ReduceFp16CPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ReduceBaseCPUKernel(param, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ReduceBaseCPUKernel(param, inputs, outputs, ctx) {} ~ReduceFp16CPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.cc index 9f86d14155..325a2a837e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.cc @@ -28,7 +28,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Scale; +using mindspore::schema::PrimitiveType_ScaleFusion; namespace mindspore::kernel { @@ -159,5 +159,5 @@ void ScaleFp16CPUKernel::FreeTmpBuffer() { } } -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Scale, LiteKernelCreator<ScaleFp16CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_ScaleFusion, LiteKernelCreator<ScaleFp16CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.h index a54b95c017..fc7bf76639 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/scale_fp16.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class ScaleFp16CPUKernel : public ScaleCPUKernel { public: ScaleFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ScaleCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ScaleCPUKernel(parameter, inputs, outputs, ctx) {} ~ScaleFp16CPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.cc b/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.cc index 98fd9cb1ee..c0c09ffea9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.cc @@ -28,7 +28,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_SoftMax; +using mindspore::schema::PrimitiveType_Softmax; namespace mindspore::kernel { int SoftmaxFp16CPUKernel::Init() { @@ -99,5 +99,5 @@ int SoftmaxFp16CPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_SoftMax, LiteKernelCreator<SoftmaxFp16CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_Softmax, LiteKernelCreator<SoftmaxFp16CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.h index 230c2e38bd..8e5c90b78c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/softmax_fp16.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class SoftmaxFp16CPUKernel : public SoftmaxBaseCPUKernel { public: SoftmaxFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : SoftmaxBaseCPUKernel(parameter, inputs, outputs, ctx, primitive), sum_data_(nullptr) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : SoftmaxBaseCPUKernel(parameter, inputs, outputs, ctx), sum_data_(nullptr) {} ~SoftmaxFp16CPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.h index e234f418e4..d0be557794 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/stack_fp16.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class StackFp16CPUKernel : public StackBaseCPUKernel { public: StackFp16CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : StackBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : StackBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~StackFp16CPUKernel() override = default; int Init() override; int Run() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h index 2695f29476..c56181ec54 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16/transpose_fp16.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class TransposeFp16CPUKernel : public TransposeCPUKernel { public: explicit TransposeFp16CPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : TransposeCPUKernel(param, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : TransposeCPUKernel(param, inputs, outputs, ctx) {} ~TransposeFp16CPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/activation_fp16_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/activation_fp16_grad.cc index 87f8fe4887..d5d9d117ec 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/activation_fp16_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/activation_fp16_grad.cc @@ -52,13 +52,11 @@ int ActivationGradCPUKernelFp16::DoActivation(int task_id) { auto error_code = RET_OK; - if (param_act_grad_->type_ == schema::ActivationGradType_RELU) { + if (param_act_grad_->type_ == schema::ActivationType_RELU) { error_code = Fp16ReluGrad(yt_addr + start, input_addr + start, count, output_addr + start); - } else if (param_act_grad_->type_ == schema::ActivationGradType_SIGMOID) { + } else if (param_act_grad_->type_ == schema::ActivationType_SIGMOID) { // Sigmoid gets the input tensors in reverse order! error_code = Fp16SigmoidGrad(input_addr + start, yt_addr + start, count, output_addr + start); - } else if (param_act_grad_->type_ == schema::ActivationGradType_LOG) { - error_code = Fp16LogGrad(yt_addr + start, input_addr + start, count, output_addr + start); } else { MS_LOG(ERROR) << "Activation type error"; return RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/activation_fp16_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/activation_fp16_grad.h index f92faa31f4..1f490c79ea 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/activation_fp16_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/activation_fp16_grad.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class ActivationGradCPUKernelFp16 : public LiteKernel { public: explicit ActivationGradCPUKernelFp16(OpParameter *param, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(param, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(param, inputs, outputs, ctx), thread_count_(ctx->thread_num_) { param_act_grad_ = reinterpret_cast<ActivationGradParameterFp16 *>(param); } ~ActivationGradCPUKernelFp16() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad.cc new file mode 100644 index 0000000000..597250ee14 --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad.cc @@ -0,0 +1,85 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad.h" +#include "schema/model_generated.h" +#include "src/kernel_registry.h" +#include "include/errorcode.h" +#include "src/runtime/runtime_api.h" + +using mindspore::kernel::KERNEL_ARCH::kCPU; +using mindspore::lite::KernelRegistrar; +using mindspore::lite::RET_ERROR; +using mindspore::lite::RET_OK; +using mindspore::schema::PrimitiveType_LogGrad; + +namespace mindspore::kernel { +int ArithmeticSelfGradFp16CPUKernel::Init() { + if (in_tensors_.size() != 2) { + MS_LOG(ERROR) << "ActivationGrad should have 2 input tensors"; + return RET_ERROR; + } + return RET_OK; +} + +int ArithmeticSelfGradFp16CPUKernel::ReSize() { return RET_OK; } + +int ArithmeticSelfGradFp16CPUKernel::DoActivation(int task_id) { + auto yt_addr = reinterpret_cast<float16_t *>(in_tensors_.at(0)->MutableData()); + auto input_addr = reinterpret_cast<float16_t *>(in_tensors_.at(1)->MutableData()); + auto output_addr = reinterpret_cast<float16_t *>(out_tensors_.at(0)->MutableData()); + int length = in_tensors_.at(0)->ElementsNum(); + + int stride = UP_DIV(length, thread_count_); + int count = MSMIN(stride, length - stride * task_id); + int start = stride * task_id; + + auto error_code = RET_OK; + + if (param_act_grad_->type_ == schema::PrimitiveType_LogGrad) { + error_code = Fp16LogGrad(yt_addr + start, input_addr + start, count, output_addr + start); + } else { + MS_LOG(ERROR) << "Activation type error"; + return RET_ERROR; + } + if (error_code != RET_OK) { + return RET_ERROR; + } + return RET_OK; +} + +int ArithmeticSelfGradFp16Run(void *cdata, int task_id) { + MS_ASSERT(cdata != nullptr); + auto activationGrad_kernel = reinterpret_cast<ArithmeticSelfGradFp16CPUKernel *>(cdata); + auto error_code = activationGrad_kernel->DoActivation(task_id); + if (error_code != RET_OK) { + MS_LOG(ERROR) << "ActivationGradRun error task_id[" << task_id << "] error_code[" << error_code << "]"; + return RET_ERROR; + } + return RET_OK; +} + +int ArithmeticSelfGradFp16CPUKernel::Run() { + int error_code = ParallelLaunch(this->context_->thread_pool_, ArithmeticSelfGradFp16Run, this, thread_count_); + if (error_code != RET_OK) { + MS_LOG(ERROR) << "Activation Grad function error error_code[" << error_code << "]"; + return RET_ERROR; + } + return RET_OK; +} + +REG_KERNEL(kCPU, kNumberTypeFloat16, PrimitiveType_LogGrad, LiteKernelCreator<ArithmeticSelfGradFp16CPUKernel>) +} // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad.h new file mode 100644 index 0000000000..9dd7f575c7 --- /dev/null +++ b/mindspore/lite/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad.h @@ -0,0 +1,45 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_GRAD_ARITHMETIC_SELF_GRAD_H_ +#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_GRAD_ARITHMETIC_SELF_GRAD_H_ + +#include <vector> +#include "src/lite_kernel.h" +#include "nnacl/fp16_grad/arithmetic_self_grad.h" + +namespace mindspore::kernel { +class ArithmeticSelfGradFp16CPUKernel : public LiteKernel { + public: + explicit ArithmeticSelfGradFp16CPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs, + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(param, inputs, outputs, ctx), thread_count_(ctx->thread_num_) { + param_act_grad_ = reinterpret_cast<ArithmeticSelfGradParameterFp16 *>(param); + } + ~ArithmeticSelfGradFp16CPUKernel() override = default; + + int Init() override; + int ReSize() override; + int Run() override; + int DoActivation(int task_id); + + private: + ArithmeticSelfGradParameterFp16 *param_act_grad_; + int thread_count_; +}; +} // namespace mindspore::kernel + +#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP16_GRAD_ARITHMETIC_SELF_GRAD_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/activation_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/activation_fp32.h index 9ea331c41e..9662017f43 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/activation_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/activation_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class ActivationCPUKernel : public LiteKernel { public: ActivationCPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(param, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(param, inputs, outputs, ctx), thread_count_(ctx->thread_num_) { type_ = (reinterpret_cast<ActivationParameter *>(param))->type_; alpha_ = (reinterpret_cast<ActivationParameter *>(param))->alpha_; min_val_ = (reinterpret_cast<ActivationParameter *>(param))->min_val_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/adder_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/adder_fp32.cc index 01ced8bb4a..29a603238b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/adder_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/adder_fp32.cc @@ -27,7 +27,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Adder; +using mindspore::schema::PrimitiveType_AdderFusion; using mindspore::schema::Format::Format_NHWC; namespace mindspore::kernel { @@ -132,5 +132,5 @@ int AdderCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Adder, LiteKernelCreator<AdderCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_AdderFusion, LiteKernelCreator<AdderCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/adder_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/adder_fp32.h index 356a8ca2b1..1c5dea8736 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/adder_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/adder_fp32.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class AdderCPUKernel : public ConvolutionCPUKernel { public: AdderCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionCPUKernel(parameter, inputs, outputs, ctx, primitive, nullptr, nullptr) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionCPUKernel(parameter, inputs, outputs, ctx, nullptr, nullptr) {} ~AdderCPUKernel() override = default; int InitWeightBias() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/addn_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/addn_fp32.h index 6d80376045..53c42c7d6b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/addn_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/addn_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class AddNCPUKernel : public LiteKernel { public: AddNCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~AddNCPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_compare_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_compare_fp32.h index fad5565612..9518bd09f2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_compare_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_compare_fp32.h @@ -26,9 +26,8 @@ typedef int (*ArithmeticCompareIntFunc)(const int *input0, const int *input1, ui class ArithmeticCompareCPUKernel : public ArithmeticCPUKernel { public: explicit ArithmeticCompareCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ArithmeticCPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ArithmeticCPUKernel(parameter, inputs, outputs, ctx) { switch (parameter->type_) { case PrimitiveType_Equal: func_fp32_ = ElementEqualFp32; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc index fca1e66fe2..1423987b65 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.cc @@ -15,7 +15,6 @@ */ #include "src/runtime/kernel/arm/fp32/arithmetic_fp32.h" #include "src/kernel_registry.h" -#include "src/ops/arithmetic.h" using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; @@ -25,7 +24,24 @@ using mindspore::schema::PrimitiveType_Eltwise; namespace mindspore::kernel { int ArithmeticCPUKernel::Init() { - InitRunFunction(); + auto primitive_type = param_->op_parameter_.type_; + if (primitive_type == schema::PrimitiveType_Eltwise) { + switch (param_->eltwise_mode_) { + case schema::EltwiseMode_PROD: + primitive_type = schema::PrimitiveType_MulFusion; + break; + case schema::EltwiseMode_SUM: + primitive_type = schema::PrimitiveType_AddFusion; + break; + case schema::EltwiseMode_MAXIMUM: + primitive_type = schema::PrimitiveType_Maximum; + break; + default: + MS_LOG(ERROR) << "Eltwise mode not support, mode:" << param_->eltwise_mode_; + return RET_ERROR; + } + } + InitRunFunction(primitive_type); if (!InferShapeDone()) { return RET_OK; } @@ -37,15 +53,7 @@ int ArithmeticCPUKernel::ReSize() { MS_LOG(ERROR) << "ArithmeticCPUKernel resize failed."; return RET_ERROR; } - auto prim = reinterpret_cast<const lite::Arithmetic *>(primitive_); - param_->broadcasting_ = prim->Broadcasting(); - param_->ndim_ = prim->NDims(); - param_->in_elements_num0_ = in_tensors_[0]->ElementsNum(); - param_->in_elements_num1_ = in_tensors_[1]->ElementsNum(); - param_->out_elements_num_ = out_tensors_[0]->ElementsNum(); - memcpy(param_->in_shape0_, prim->InShape0().data(), prim->InShape0().size() * sizeof(int)); - memcpy(param_->in_shape1_, prim->InShape1().data(), prim->InShape1().size() * sizeof(int)); - memcpy(param_->out_shape_, prim->OutputShape().data(), prim->OutputShape().size() * sizeof(int)); + CalcMultiplesAndStrides(param_); if (param_->broadcasting_) { outside_ = 1; @@ -137,25 +145,31 @@ void ArithmeticCPUKernel::FreeConstTileBuff() { return; } -void ArithmeticCPUKernel::InitRunFunction() { +void ArithmeticCPUKernel::InitRunFunction(int primitive_type) { ARITHMETIC_FUNC_INFO_FP32 fun_table[] = { - {PrimitiveType_Mul, schema::ActivationType_RELU, ElementMulRelu, ElementMulReluInt, nullptr, ElementOptMulRelu, - ElementOptMulReluInt}, - {PrimitiveType_Mul, schema::ActivationType_RELU6, ElementMulRelu6, ElementMulRelu6Int, nullptr, ElementOptMulRelu6, - ElementOptMulRelu6Int}, - {PrimitiveType_Mul, schema::ActivationType_NO_ACTIVATION, ElementMul, ElementMulInt, nullptr, ElementOptMul, + {PrimitiveType_MulFusion, schema::ActivationType_RELU, ElementMulRelu, ElementMulReluInt, nullptr, + ElementOptMulRelu, ElementOptMulReluInt}, + {PrimitiveType_MulFusion, schema::ActivationType_RELU6, ElementMulRelu6, ElementMulRelu6Int, nullptr, + ElementOptMulRelu6, ElementOptMulRelu6Int}, + {PrimitiveType_MulFusion, schema::ActivationType_NO_ACTIVATION, ElementMul, ElementMulInt, nullptr, ElementOptMul, ElementOptMulInt}, - {PrimitiveType_Add, schema::ActivationType_RELU, ElementAddRelu, nullptr, nullptr, ElementOptAddRelu, nullptr}, - {PrimitiveType_Add, schema::ActivationType_RELU6, ElementAddRelu6, nullptr, nullptr, ElementOptAddRelu6, nullptr}, - {PrimitiveType_Add, schema::ActivationType_NO_ACTIVATION, ElementAdd, ElementAddInt, nullptr, ElementOptAdd, + {PrimitiveType_AddFusion, schema::ActivationType_RELU, ElementAddRelu, nullptr, nullptr, ElementOptAddRelu, + nullptr}, + {PrimitiveType_AddFusion, schema::ActivationType_RELU6, ElementAddRelu6, nullptr, nullptr, ElementOptAddRelu6, + nullptr}, + {PrimitiveType_AddFusion, schema::ActivationType_NO_ACTIVATION, ElementAdd, ElementAddInt, nullptr, ElementOptAdd, ElementOptAddInt}, - {PrimitiveType_Sub, schema::ActivationType_RELU, ElementSubRelu, nullptr, nullptr, ElementOptSubRelu, nullptr}, - {PrimitiveType_Sub, schema::ActivationType_RELU6, ElementSubRelu6, nullptr, nullptr, ElementOptSubRelu6, nullptr}, - {PrimitiveType_Sub, schema::ActivationType_NO_ACTIVATION, ElementSub, ElementSubInt, nullptr, ElementOptSub, + {PrimitiveType_SubFusion, schema::ActivationType_RELU, ElementSubRelu, nullptr, nullptr, ElementOptSubRelu, + nullptr}, + {PrimitiveType_SubFusion, schema::ActivationType_RELU6, ElementSubRelu6, nullptr, nullptr, ElementOptSubRelu6, + nullptr}, + {PrimitiveType_SubFusion, schema::ActivationType_NO_ACTIVATION, ElementSub, ElementSubInt, nullptr, ElementOptSub, ElementOptSubInt}, - {PrimitiveType_Div, schema::ActivationType_RELU, ElementDivRelu, nullptr, nullptr, ElementOptDivRelu, nullptr}, - {PrimitiveType_Div, schema::ActivationType_RELU6, ElementDivRelu6, nullptr, nullptr, ElementOptDivRelu6, nullptr}, - {PrimitiveType_Div, schema::ActivationType_NO_ACTIVATION, ElementDiv, nullptr, nullptr, ElementOptDiv, + {PrimitiveType_DivFusion, schema::ActivationType_RELU, ElementDivRelu, nullptr, nullptr, ElementOptDivRelu, + nullptr}, + {PrimitiveType_DivFusion, schema::ActivationType_RELU6, ElementDivRelu6, nullptr, nullptr, ElementOptDivRelu6, + nullptr}, + {PrimitiveType_DivFusion, schema::ActivationType_NO_ACTIVATION, ElementDiv, nullptr, nullptr, ElementOptDiv, ElementOptDivInt}, {PrimitiveType_RealDiv, schema::ActivationType_RELU, ElementDivRelu, nullptr, nullptr, ElementOptDivRelu, nullptr}, {PrimitiveType_RealDiv, schema::ActivationType_RELU6, ElementDivRelu6, nullptr, nullptr, ElementOptDivRelu6, @@ -181,8 +195,7 @@ void ArithmeticCPUKernel::InitRunFunction() { size_t length = sizeof(fun_table) / sizeof(ARITHMETIC_FUNC_INFO_FP32); for (size_t i = 0; i < length; i++) { - if (fun_table[i].primitive_type_ == param_->op_parameter_.type_ && - fun_table[i].activation_type_ == param_->activation_type_) { + if (fun_table[i].primitive_type_ == primitive_type && fun_table[i].activation_type_ == param_->activation_type_) { arithmetic_run_ = fun_table[i].func_; arithmetic_run_int_ = fun_table[i].int_func_; arithmetic_run_bool_ = fun_table[i].bool_func_; @@ -358,13 +371,13 @@ int ArithmeticCPUKernel::Run() { return ParallelLaunch(this->context_->thread_pool_, ArithmeticsRun, this, context_->thread_num_); } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Mul, LiteKernelCreator<ArithmeticCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Mul, LiteKernelCreator<ArithmeticCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Add, LiteKernelCreator<ArithmeticCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Add, LiteKernelCreator<ArithmeticCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Sub, LiteKernelCreator<ArithmeticCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Sub, LiteKernelCreator<ArithmeticCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Div, LiteKernelCreator<ArithmeticCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_MulFusion, LiteKernelCreator<ArithmeticCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_MulFusion, LiteKernelCreator<ArithmeticCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_AddFusion, LiteKernelCreator<ArithmeticCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_AddFusion, LiteKernelCreator<ArithmeticCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SubFusion, LiteKernelCreator<ArithmeticCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_SubFusion, LiteKernelCreator<ArithmeticCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_DivFusion, LiteKernelCreator<ArithmeticCPUKernel>) REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_RealDiv, LiteKernelCreator<ArithmeticCPUKernel>) REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Mod, LiteKernelCreator<ArithmeticCPUKernel>) REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Mod, LiteKernelCreator<ArithmeticCPUKernel>) @@ -382,5 +395,5 @@ REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_FloorDiv, LiteKernelCreator<Ari REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_FloorMod, LiteKernelCreator<ArithmeticCPUKernel>) REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SquaredDifference, LiteKernelCreator<ArithmeticCPUKernel>) REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Eltwise, LiteKernelCreator<ArithmeticCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Div, LiteKernelCreator<ArithmeticCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_DivFusion, LiteKernelCreator<ArithmeticCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.h index 5be91cbe40..a84de59192 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_fp32.h @@ -20,8 +20,8 @@ #include "src/lite_kernel.h" #include "nnacl/fp32/arithmetic_fp32.h" -using mindspore::schema::PrimitiveType_Add; -using mindspore::schema::PrimitiveType_Div; +using mindspore::schema::PrimitiveType_AddFusion; +using mindspore::schema::PrimitiveType_DivFusion; using mindspore::schema::PrimitiveType_Equal; using mindspore::schema::PrimitiveType_FloorDiv; using mindspore::schema::PrimitiveType_FloorMod; @@ -34,11 +34,19 @@ using mindspore::schema::PrimitiveType_LogicalOr; using mindspore::schema::PrimitiveType_Maximum; using mindspore::schema::PrimitiveType_Minimum; using mindspore::schema::PrimitiveType_Mod; -using mindspore::schema::PrimitiveType_Mul; +using mindspore::schema::PrimitiveType_MulFusion; using mindspore::schema::PrimitiveType_NotEqual; using mindspore::schema::PrimitiveType_RealDiv; using mindspore::schema::PrimitiveType_SquaredDifference; -using mindspore::schema::PrimitiveType_Sub; +using mindspore::schema::PrimitiveType_SubFusion; + +#define CHECK_NULL_RETURN(ptr, errcode) \ + do { \ + if (ptr == nullptr) { \ + MS_LOG(ERROR) << "ptr must not be null."; \ + return errcode; \ + } \ + } while (0); #define CHECK_NULL_RETURN(ptr, errcode) \ do { \ @@ -70,9 +78,8 @@ class ArithmeticCPUKernel : public LiteKernel { public: ArithmeticCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<ArithmeticParameter *>(parameter); } ~ArithmeticCPUKernel() { FreeConstTileBuff(); } @@ -84,7 +91,7 @@ class ArithmeticCPUKernel : public LiteKernel { virtual int BroadcastRun(void *input0, void *input1, void *output, int dim, int out_count, int out_thread_stride); protected: - virtual void InitRunFunction(); + virtual void InitRunFunction(int primitive_type); virtual int CheckDataType(); virtual int ConstTensorBroadCast(); virtual void TileConstTensor(const void *in_data, void *out_data, size_t ndim, const int *in_shape, diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self_fp32.h index cfe0ed013f..8fe14cdd6a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/arithmetic_self_fp32.h @@ -40,9 +40,8 @@ typedef int (*ArithmeticSelfBoolFunc)(const bool *input, bool *output, const int class ArithmeticSelfCPUKernel : public LiteKernel { public: explicit ArithmeticSelfCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { func_ = GetArithmeticSelfFun(parameter->type_); func_bool_ = GetArithmeticSelfBoolFun(parameter->type_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space_fp32.cc index d9c823e57c..fda740c2e5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space_fp32.cc @@ -16,7 +16,6 @@ #include "src/runtime/kernel/arm/fp32/batch_to_space_fp32.h" #include "schema/model_generated.h" #include "src/kernel_registry.h" -#include "src/ops/batch_to_space.h" using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space_fp32.h index 46994b5278..42ccb68589 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/batch_to_space_fp32.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class BatchToSpaceCPUKernel : public LiteKernel { public: BatchToSpaceCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~BatchToSpaceCPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm_fp32.h index 3768d9d96c..319f1a4aca 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/batchnorm_fp32.h @@ -30,9 +30,8 @@ namespace mindspore::kernel { class BatchnormCPUKernel : public LiteKernel { public: BatchnormCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} virtual ~BatchnormCPUKernel() { FreeMeanAndVariance(); } int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/bias_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/bias_fp32.h index f9958e2410..82205f1a33 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/bias_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/bias_fp32.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class BiasCPUKernel : public LiteKernel { public: BiasCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { bias_param_ = reinterpret_cast<ArithmeticParameter *>(parameter); } ~BiasCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to_fp32.h index 349b18ddb0..9415079d53 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/broadcast_to_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class BroadcastToCPUKernel : public LiteKernel { public: BroadcastToCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~BroadcastToCPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/cast_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/cast_fp32.h index c438d1ace8..cd976f044b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/cast_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/cast_fp32.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class CastCPUKernel : public LiteKernel { public: CastCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~CastCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/concat_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/concat_fp32.h index 8a03d57059..b97b9354a6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/concat_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/concat_fp32.h @@ -31,9 +31,8 @@ namespace mindspore::kernel { class ConcatCPUKernel : public LiteKernel { public: ConcatCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { concat_param_ = reinterpret_cast<ConcatParameter *>(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1_fp32.h index 85826b7a0f..43a5602ca2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_1x1_fp32.h @@ -34,8 +34,8 @@ class Convolution1x1CPUKernel : public ConvolutionBaseCPUKernel { public: Convolution1x1CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive, float *origin_weight, float *origin_bias) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive), + float *origin_weight, float *origin_bias) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx), origin_weight_(origin_weight), origin_bias_(origin_bias) {} ~Convolution1x1CPUKernel(); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_delegate_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_delegate_fp32.cc index d914834c80..7815722d26 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_delegate_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_delegate_fp32.cc @@ -18,6 +18,9 @@ #include "src/runtime/kernel/arm/fp32/convolution_1x1_fp32.h" #include "src/runtime/kernel/arm/fp32/convolution_winograd_fp32.h" #include "src/runtime/kernel/arm/fp32/group_convolution_fp32.h" +#include "src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.h" +#include "src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.h" +#include "src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.h" #include "schema/model_generated.h" #include "src/kernel_registry.h" #include "include/errorcode.h" @@ -28,7 +31,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Conv2D; +using mindspore::schema::PrimitiveType_Conv2DFusion; using mindspore::schema::Format::Format_NHWC; namespace mindspore::kernel { @@ -119,8 +122,8 @@ int ConvolutionDelegateCPUKernel::ReSize() { context_); if (conv_kernel_ == nullptr) { // need to select actual execute kernel here - conv_kernel_ = CpuConvFp32KernelSelect(in_tensors_, out_tensors_, op_parameter_, context_, primitive_, - origin_weight_, origin_bias_); + conv_kernel_ = + CpuConvFp32KernelSelect(in_tensors_, out_tensors_, op_parameter_, context_, origin_weight_, origin_bias_); if (conv_kernel_ == nullptr) { MS_LOG(ERROR) << "Selecting execute kernel failed for conv_kernel, got a nullptr."; return RET_ERROR; @@ -226,8 +229,7 @@ lite::Tensor *CreateOutputTensor(const std::vector<int> &out_shape, const std::v kernel::LiteKernel *CpuConvFp32KernelSelect(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, - const InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive, - float *origin_weight, float *origin_bias) { + const InnerContext *ctx, float *origin_weight, float *origin_bias) { auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); bool use_winograd = false; int out_unit; @@ -235,13 +237,13 @@ kernel::LiteKernel *CpuConvFp32KernelSelect(const std::vector<lite::Tensor *> &i kernel::LiteKernel *kernel = nullptr; if (conv_param->kernel_h_ == 1 && conv_param->kernel_w_ == 1) { kernel = new (std::nothrow) - kernel::Convolution1x1CPUKernel(op_parameter, inputs, outputs, ctx, primitive, origin_weight, origin_bias); + kernel::Convolution1x1CPUKernel(op_parameter, inputs, outputs, ctx, origin_weight, origin_bias); } else if (use_winograd) { - kernel = new (std::nothrow) kernel::ConvolutionWinogradCPUKernel(op_parameter, inputs, outputs, ctx, primitive, - out_unit, origin_weight, origin_bias); - } else { kernel = new (std::nothrow) - kernel::ConvolutionCPUKernel(op_parameter, inputs, outputs, ctx, primitive, origin_weight, origin_bias); + kernel::ConvolutionWinogradCPUKernel(op_parameter, inputs, outputs, ctx, out_unit, origin_weight, origin_bias); + } else { + kernel = + new (std::nothrow) kernel::ConvolutionCPUKernel(op_parameter, inputs, outputs, ctx, origin_weight, origin_bias); } if (kernel != nullptr) { auto ret = kernel->Init(); @@ -256,15 +258,14 @@ kernel::LiteKernel *CpuConvFp32KernelSelect(const std::vector<lite::Tensor *> &i static kernel::LiteKernel *CreateDelegateConv(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, - const InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive) { - return new (std::nothrow) kernel::ConvolutionDelegateCPUKernel(op_parameter, inputs, outputs, ctx, primitive); + const InnerContext *ctx) { + return new (std::nothrow) kernel::ConvolutionDelegateCPUKernel(op_parameter, inputs, outputs, ctx); } kernel::LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, - const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) { - bool infer_flag = primitive != nullptr && primitive->infer_flag(); + const InnerContext *ctx) { + bool infer_flag = op_parameter->infer_flag_; auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); int new_in_channel = inputs.at(kWeightIndex)->Channel(); int new_out_channel; @@ -341,26 +342,48 @@ kernel::LiteKernel *CpuGroupConvFp32KernelCreator(const std::vector<lite::Tensor new_outputs.emplace_back(out_tensor); } group_convs.emplace_back( - CreateDelegateConv(new_inputs, new_outputs, reinterpret_cast<OpParameter *>(new_conv_parameter), ctx, primitive)); + CreateDelegateConv(new_inputs, new_outputs, reinterpret_cast<OpParameter *>(new_conv_parameter), ctx)); } return new (std::nothrow) - GroupConvolutionCPUKernel(op_parameter, inputs, outputs, ctx, primitive, group_convs, conv_param->group_); + GroupConvolutionCPUKernel(op_parameter, inputs, outputs, ctx, group_convs, conv_param->group_); +} + +kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, + const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, + const InnerContext *ctx, const kernel::KernelKey &desc) { + auto conv_param = reinterpret_cast<ConvParameter *>(opParameter); + kernel::LiteKernel *kernel = nullptr; + if (opParameter != nullptr && opParameter->infer_flag_) { +#if defined(ENABLE_ARM64) || defined(ENABLE_AVX) + if (CheckConvDwUseIndirectBuffer(conv_param)) { + kernel = new (std::nothrow) kernel::ConvolutionDepthwiseIndirectCPUKernel(opParameter, inputs, outputs, ctx); + } +#endif + if (kernel == nullptr && conv_param->input_channel_ < 32) { + kernel = new (std::nothrow) kernel::ConvolutionDepthwiseSWCPUKernel(opParameter, inputs, outputs, ctx); + } + } + if (kernel == nullptr) { + kernel = new (std::nothrow) kernel::ConvolutionDepthwiseCPUKernel(opParameter, inputs, outputs, ctx); + } + return kernel; } kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, - const InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const InnerContext *ctx, const kernel::KernelKey &desc) { MS_ASSERT(op_parameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_Conv2D); + MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DFusion); MS_ASSERT(desc.data_type == kNumberTypeFloat32); auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); kernel::LiteKernel *kernel = nullptr; if (conv_param->group_ == 1) { - kernel = CreateDelegateConv(inputs, outputs, op_parameter, ctx, primitive); + kernel = CreateDelegateConv(inputs, outputs, op_parameter, ctx); + } else if (conv_param->group_ == conv_param->input_channel_ && conv_param->group_ == conv_param->output_channel_) { + kernel = CpuConvDwFp32KernelCreator(inputs, outputs, op_parameter, ctx, desc); } else { - kernel = CpuGroupConvFp32KernelCreator(inputs, outputs, op_parameter, ctx, primitive); + kernel = CpuGroupConvFp32KernelCreator(inputs, outputs, op_parameter, ctx); } if (kernel == nullptr) { @@ -379,5 +402,5 @@ kernel::LiteKernel *CpuConvFp32KernelCreator(const std::vector<lite::Tensor *> & return kernel; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Conv2D, CpuConvFp32KernelCreator) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Conv2DFusion, CpuConvFp32KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_delegate_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_delegate_fp32.h index ffa8d5104a..fbdf6455fc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_delegate_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_delegate_fp32.h @@ -17,7 +17,6 @@ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CONVOLUTION_DELEGATE_FP32_H_ #include <vector> -#include "src/ops/conv2d.h" #include "src/lite_kernel.h" #include "nnacl/conv_parameter.h" #include "nnacl/op_base.h" @@ -27,9 +26,8 @@ namespace mindspore::kernel { class ConvolutionDelegateCPUKernel : public LiteKernel { public: ConvolutionDelegateCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionDelegateCPUKernel() override { FreeCopiedData(); if (conv_kernel_ != nullptr) { @@ -83,8 +81,7 @@ lite::Tensor *CreateOutputTensor(const std::vector<int> &out_shape, const std::v kernel::LiteKernel *CpuConvFp32KernelSelect(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, - const InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive, - float *origin_weight, float *origin_bias); + const InnerContext *ctx, float *origin_weight, float *origin_bias); } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CONVOLUTION_DELEGATE_FP32_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.cc index 3453926a35..e685fe2311 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.cc @@ -15,17 +15,12 @@ */ #include "src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DepthwiseConv2D; namespace mindspore::kernel { ConvolutionDepthwise3x3CPUKernel::~ConvolutionDepthwise3x3CPUKernel() { @@ -165,5 +160,4 @@ int ConvolutionDepthwise3x3CPUKernel::Eval() { } return RET_OK; } - } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.h index ac9047d7d2..a919a58ccb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_3x3_fp32.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class ConvolutionDepthwise3x3CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwise3x3CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionDepthwise3x3CPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.cc index df19dd27f9..a550f4b163 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.cc @@ -15,20 +15,12 @@ */ #include "src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.h" -#include <limits> -#include "src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.h" -#include "src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DepthwiseConv2D; namespace mindspore::kernel { ConvolutionDepthwiseCPUKernel::~ConvolutionDepthwiseCPUKernel() { @@ -137,50 +129,4 @@ int ConvolutionDepthwiseCPUKernel::Eval() { } return RET_OK; } - -kernel::LiteKernel *CpuConvDwFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_DepthwiseConv2D); - - auto conv_param = reinterpret_cast<ConvParameter *>(opParameter); - kernel::LiteKernel *kernel = nullptr; - if (primitive != nullptr && primitive->infer_flag()) { - conv_param->input_h_ = inputs[kInputIndex]->Height(); - conv_param->input_w_ = inputs[kInputIndex]->Width(); - conv_param->input_channel_ = inputs[kInputIndex]->Channel(); - conv_param->output_h_ = outputs[kOutputIndex]->Height(); - conv_param->output_w_ = outputs[kOutputIndex]->Width(); -#if defined(ENABLE_ARM64) || defined(ENABLE_AVX) - if (CheckConvDwUseIndirectBuffer(conv_param)) { - kernel = - new (std::nothrow) kernel::ConvolutionDepthwiseIndirectCPUKernel(opParameter, inputs, outputs, ctx, primitive); - } -#endif - if (kernel == nullptr && conv_param->input_channel_ < 32) { - kernel = new (std::nothrow) kernel::ConvolutionDepthwiseSWCPUKernel(opParameter, inputs, outputs, ctx, primitive); - } - } - if (kernel == nullptr) { - kernel = new (std::nothrow) kernel::ConvolutionDepthwiseCPUKernel(opParameter, inputs, outputs, ctx, primitive); - } - if (kernel == nullptr) { - MS_LOG(ERROR) << "kernel is nullptr."; - free(opParameter); - return nullptr; - } - auto ret = kernel->Init(); - if (ret != RET_OK && ret != RET_INFER_INVALID) { - MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); - delete kernel; - return nullptr; - } - - return kernel; -} - -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_DepthwiseConv2D, CpuConvDwFp32KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.h index e727e2e35e..cfe9f00ad2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32.h @@ -17,6 +17,7 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CONVOLUTION_DEPTHWISE_FP32_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_CONVOLUTION_DEPTHWISE_FP32_H_ +#include <limits> #include <vector> #include "src/lite_kernel.h" #include "src/runtime/kernel/arm/base/convolution_base.h" @@ -26,9 +27,8 @@ namespace mindspore::kernel { class ConvolutionDepthwiseCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionDepthwiseCPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.cc index 40817bed83..d5e738a8de 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.cc @@ -15,17 +15,12 @@ */ #include "src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DepthwiseConv2D; namespace mindspore::kernel { ConvolutionDepthwiseIndirectCPUKernel::~ConvolutionDepthwiseIndirectCPUKernel() { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.h index 49eae476c2..cc6764c883 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_indirect_fp32.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class ConvolutionDepthwiseIndirectCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseIndirectCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionDepthwiseIndirectCPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.cc index ad76675168..5188d77088 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.cc @@ -15,13 +15,9 @@ */ #include "src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.h index 360223cdc4..8d3ea6d409 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_depthwise_slidewindow_fp32.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class ConvolutionDepthwiseSWCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseSWCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionDepthwiseSWCPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_fp32.cc index 57e6a7e447..af70dd4c22 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_fp32.cc @@ -16,19 +16,16 @@ #include "src/runtime/kernel/arm/fp32/convolution_fp32.h" #include "include/errorcode.h" +#include "nnacl/common_func.h" #include "schema/model_generated.h" #include "src/kernel_registry.h" #include "src/runtime/runtime_api.h" #include "nnacl/fp32/conv_common_fp32.h" #include "nnacl/fp32/matmul_fp32.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_INFER_INVALID; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Conv2D; -using mindspore::schema::Format::Format_NHWC; namespace mindspore::kernel { int ConvolutionCPUKernel::InitWeightBias() { @@ -195,5 +192,4 @@ int ConvolutionCPUKernel::Eval() { } return RET_OK; } - } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_fp32.h index e1beb5b500..c5518d97e5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_fp32.h @@ -26,9 +26,9 @@ namespace mindspore::kernel { class ConvolutionCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive, float *origin_weight, float *origin_bias) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive), + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, float *origin_weight, + float *origin_bias) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx), origin_weight_(origin_weight), origin_bias_(origin_bias) {} ~ConvolutionCPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd_fp32.cc index 02617bb20b..d57f531699 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd_fp32.cc @@ -17,17 +17,12 @@ #include "src/runtime/kernel/arm/fp32/convolution_winograd_fp32.h" #include "nnacl/fp32/conv_winograd_fp32.h" #include "nnacl/pack.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Conv2D; namespace mindspore::kernel { int ConvolutionWinogradCPUKernel::WinogradFilterTransform(const float *weight_data, float *matrix_g, float *matrix_gt, diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd_fp32.h index 9efff6cda2..90e066ebc2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/convolution_winograd_fp32.h @@ -29,9 +29,8 @@ class ConvolutionWinogradCPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionWinogradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive, int output_unit, float *origin_weight, - float *origin_bias) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive), + int output_unit, float *origin_weight, float *origin_bias) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx), output_unit_(output_unit), origin_weight_(origin_weight), origin_bias_(origin_bias) {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/crop_and_resize_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/crop_and_resize_fp32.h index 50bf38c7c3..0c8c197866 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/crop_and_resize_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/crop_and_resize_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class CropAndResizeCPUKernel : public LiteKernel { public: CropAndResizeCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<CropAndResizeParameter *>(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/crop_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/crop_fp32.h index 0997e6a888..04cfd53fd3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/crop_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/crop_fp32.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class CropCPUKernel : public CropBaseCPUKernel { public: CropCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : CropBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : CropBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~CropCPUKernel() = default; int Init() override; int Run() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.cc index 9840a17f19..a6a75ad707 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.cc @@ -15,16 +15,11 @@ */ #include "src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DeDepthwiseConv2D; namespace mindspore::kernel { DeconvolutionDepthwiseCPUKernel::~DeconvolutionDepthwiseCPUKernel() { @@ -194,29 +189,4 @@ void DeconvolutionDepthwiseCPUKernel::FreePackedInputOutput() { packed_output_ = nullptr; } } - -kernel::LiteKernel *CpuDeconvDwFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_DeDepthwiseConv2D); - auto kernel = - new (std::nothrow) kernel::DeconvolutionDepthwiseCPUKernel(opParameter, inputs, outputs, ctx, primitive); - if (kernel == nullptr) { - MS_LOG(ERROR) << "kernel is nullptr."; - free(opParameter); - return nullptr; - } - auto ret = kernel->Init(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); - delete kernel; - return nullptr; - } - return kernel; -} - -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_DeDepthwiseConv2D, CpuDeconvDwFp32KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.h index 4b48db40f1..0cf2b5bbe0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class DeconvolutionDepthwiseCPUKernel : public ConvolutionBaseCPUKernel { public: DeconvolutionDepthwiseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~DeconvolutionDepthwiseCPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_fp32.cc index cb92adbd72..7b2426bed2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_fp32.cc @@ -16,6 +16,7 @@ #include "src/runtime/kernel/arm/fp32/deconvolution_fp32.h" #include "src/runtime/kernel/arm/fp32/deconvolution_winograd_fp32.h" +#include "src/runtime/kernel/arm/fp32/deconvolution_depthwise_fp32.h" #include "src/runtime/runtime_api.h" using mindspore::kernel::KERNEL_ARCH::kCPU; @@ -23,7 +24,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DeConv2D; +using mindspore::schema::PrimitiveType_Conv2dTransposeFusion; namespace mindspore::kernel { DeConvolutionCPUKernel::~DeConvolutionCPUKernel() { @@ -234,35 +235,41 @@ int DeConvolutionCPUKernel::Run() { } kernel::LiteKernel *CpuDeConvFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_DeConv2D); + const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { + MS_ASSERT(op_parameter != nullptr); + MS_ASSERT(desc.type == schema::PrimitiveType_Conv2dTransposeFusion); - kernel::LiteKernel *kernel; - auto conv_param = reinterpret_cast<ConvParameter *>(opParameter); - if ((conv_param->stride_h_ != 1 || conv_param->stride_w_ != 1) && - (conv_param->dilation_w_ == 1 && conv_param->dilation_h_ == 1)) { - kernel = new (std::nothrow) kernel::DeConvolutionWinogradCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); + kernel::LiteKernel *kernel = nullptr; + if (conv_param->group_ == 1) { + if ((conv_param->stride_h_ != 1 || conv_param->stride_w_ != 1) && + (conv_param->dilation_w_ == 1 && conv_param->dilation_h_ == 1)) { + kernel = new (std::nothrow) kernel::DeConvolutionWinogradCPUKernel(op_parameter, inputs, outputs, ctx); + } else { + kernel = new (std::nothrow) kernel::DeConvolutionCPUKernel(op_parameter, inputs, outputs, ctx); + } + } else if (conv_param->group_ == conv_param->input_channel_ && conv_param->group_ == conv_param->output_channel_) { + kernel = new (std::nothrow) kernel::DeconvolutionDepthwiseCPUKernel(op_parameter, inputs, outputs, ctx); } else { - kernel = new (std::nothrow) kernel::DeConvolutionCPUKernel(opParameter, inputs, outputs, ctx, primitive); + MS_LOG(ERROR) << "deconv do not support group deconv!"; + kernel = nullptr; } if (kernel == nullptr) { MS_LOG(ERROR) << "kernel is nullptr."; - free(opParameter); + free(op_parameter); return nullptr; } auto ret = kernel->Init(); if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); + MS_LOG(ERROR) << "Init kernel failed, name: " << op_parameter->name_ << ", type: " + << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_)); delete kernel; return nullptr; } return kernel; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_DeConv2D, CpuDeConvFp32KernelCreator) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Conv2dTransposeFusion, CpuDeConvFp32KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_fp32.h index 5d29ba5477..4c5972d3be 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_fp32.h @@ -31,9 +31,8 @@ namespace mindspore::kernel { class DeConvolutionCPUKernel : public ConvolutionBaseCPUKernel { public: DeConvolutionCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~DeConvolutionCPUKernel() override; int Init() override; int Run() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd_fp32.h index 03946adefc..c1d81ea8e3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/deconvolution_winograd_fp32.h @@ -31,9 +31,8 @@ namespace mindspore::kernel { class DeConvolutionWinogradCPUKernel : public ConvolutionBaseCPUKernel { public: DeConvolutionWinogradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~DeConvolutionWinogradCPUKernel() override; int Init() override; int Run() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space_fp32.h index 3681e6c7b0..4ea29427f9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/depth_to_space_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class DepthToSpaceCPUKernel : public DepthToSpaceBaseCPUKernel { public: DepthToSpaceCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : DepthToSpaceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : DepthToSpaceBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~DepthToSpaceCPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/detection_post_process_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/detection_post_process_fp32.h index 29da37f837..8e59997cf3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/detection_post_process_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/detection_post_process_fp32.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class DetectionPostProcessCPUKernel : public DetectionPostProcessBaseCPUKernel { public: DetectionPostProcessCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : DetectionPostProcessBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : DetectionPostProcessBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~DetectionPostProcessCPUKernel() = default; private: diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/elu_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/elu_fp32.h index a0dfc066d6..897addceaf 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/elu_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/elu_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class EluCPUKernel : public LiteKernel { public: EluCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { elu_parameter_ = reinterpret_cast<EluParameter *>(op_parameter_); } ~EluCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup_fp32.cc index 9a83fd85b8..cc5b643f33 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup_fp32.cc @@ -22,7 +22,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_EmbeddingLookup; +using mindspore::schema::PrimitiveType_EmbeddingLookupFusion; namespace mindspore::kernel { int EmbeddingLookupCPUKernel::Init() { @@ -102,5 +102,5 @@ void EmbeddingLookupCPUKernel::FreeRunBuff() { param_->is_regulated_ = nullptr; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_EmbeddingLookup, LiteKernelCreator<EmbeddingLookupCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_EmbeddingLookupFusion, LiteKernelCreator<EmbeddingLookupCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup_fp32.h index ad78806765..3444c6121c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/embedding_lookup_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class EmbeddingLookupCPUKernel : public LiteKernel { public: explicit EmbeddingLookupCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<EmbeddingLookupParameter *>(parameter); } ~EmbeddingLookupCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/exp_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/exp_fp32.cc index edafe1afe7..8156b2d345 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/exp_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/exp_fp32.cc @@ -23,7 +23,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Exp; +using mindspore::schema::PrimitiveType_ExpFusion; namespace mindspore::kernel { int ExpCPUKernel::Init() { @@ -81,5 +81,5 @@ int ExpCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Exp, LiteKernelCreator<ExpCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ExpFusion, LiteKernelCreator<ExpCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/exp_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/exp_fp32.h index 6918d23dee..453c3a8ca4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/exp_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/exp_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class ExpCPUKernel : public LiteKernel { public: explicit ExpCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), ctx_(ctx), thread_count_(ctx->thread_num_) {} ~ExpCPUKernel() override{}; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fill_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/fill_fp32.h index 92e2f7bffc..368917b0bd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fill_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fill_fp32.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class FillCPUKernel : public LiteKernel { public: FillCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) {} ~FillCPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection_fp32.h index d1f9c5546b..16c03cbda9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fullconnection_fp32.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class FullconnectionCPUKernel : public MatmulFp32BaseCPUKernel { public: FullconnectionCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : MatmulFp32BaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx) + : MatmulFp32BaseCPUKernel(parameter, inputs, outputs, ctx) {} ~FullconnectionCPUKernel() = default; int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm_fp32.h index 0265549c9f..6ed27c85ce 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/fused_batchnorm_fp32.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class FusedBatchnormCPUKernel : public BatchnormCPUKernel { public: FusedBatchnormCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : BatchnormCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : BatchnormCPUKernel(parameter, inputs, outputs, ctx) {} ~FusedBatchnormCPUKernel() { FreeScaleAndOffset(); } int Eval() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd_fp32.h index 51cba5945b..da7f1ffccf 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gatherNd_fp32.h @@ -30,9 +30,8 @@ namespace mindspore::kernel { class GatherNdCPUKernel : public LiteKernel { public: GatherNdCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) {} ~GatherNdCPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gather_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/gather_fp32.cc index 616644722c..f2953903a0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gather_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gather_fp32.cc @@ -28,6 +28,7 @@ using mindspore::schema::PrimitiveType_Gather; namespace mindspore::kernel { int GatherCPUKernel::Init() { + axis_ = *(reinterpret_cast<int *>(in_tensors_.at(2)->data_c())); if (!InferShapeDone()) { return RET_OK; } @@ -44,15 +45,13 @@ int GatherCPUKernel::DoGather(int task_id) { auto in_shape = input_tensor->shape(); int in_rank = in_shape.size(); int indices_element_size = indices_tensor->ElementsNum(); - auto axis = (reinterpret_cast<GatherParameter *>(op_parameter_))->axis_; - - const int limit = in_shape.at(axis); + const int limit = in_shape.at(axis_); int outer_size = 1, inner_size = 1; - for (int i = 0; i < axis; ++i) { + for (int i = 0; i < axis_; ++i) { outer_size *= in_shape.at(i); } - for (int i = axis + 1; i < in_rank; ++i) { + for (int i = axis_ + 1; i < in_rank; ++i) { inner_size *= in_shape.at(i); } int stride = UP_DIV(outer_size, op_parameter_->thread_num_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gather_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/gather_fp32.h index 212da0c7dc..86c78f10d9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gather_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gather_fp32.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class GatherCPUKernel : public LiteKernel { public: GatherCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~GatherCPUKernel() = default; int Init() override; @@ -39,6 +38,7 @@ class GatherCPUKernel : public LiteKernel { private: int *indices_data_ = nullptr; + int axis_ = 0; int AssignIndicesData(bool isIndicesInt32, int indices_num, lite::Tensor *indices_tensor); }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.cc index eb86a433df..087d57c282 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.cc @@ -15,15 +15,11 @@ */ #include "src/runtime/kernel/arm/fp32/group_convolution_fp32.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" +#include "src/runtime/infer_manager.h" #include "include/errorcode.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Conv2D; namespace mindspore::kernel { int GroupConvolutionCPUKernel::Init() { @@ -78,13 +74,14 @@ void GroupConvolutionCPUKernel::FreeSubKernel() { int GroupConvolutionCPUKernel::PreProcess() { if (!InferShapeDone()) { - auto ret = (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->InferShape(in_tensors_, out_tensors_); - if (ret != RET_OK) { - (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->set_infer_flag(false); + op_parameter_->infer_flag_ = true; + + auto ret = lite::KernelInferShape(in_tensors_, &out_tensors_, op_parameter_); + if (ret != 0) { + op_parameter_->infer_flag_ = false; MS_LOG(ERROR) << "InferShape fail!"; return ret; } - (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->set_infer_flag(true); // if infershape func is called in runtime stage, we should malloc memory and set shape info for outputs of sub // kernels here. diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.h index a20e9db448..543646d742 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/group_convolution_fp32.h @@ -29,9 +29,8 @@ class GroupConvolutionCPUKernel : public ConvolutionBaseCPUKernel { public: GroupConvolutionCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive, std::vector<kernel::LiteKernel *> group_convs, - const int group_num) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive), + std::vector<kernel::LiteKernel *> group_convs, const int group_num) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx), group_convs_(std::move(group_convs)), group_num_(group_num) {} // opParameter(in channel, out channel) in this kernel has been split to groups, if // you want to get real params, multiply in channel / out channel with group num diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gru_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/gru_fp32.cc index c99a62de4f..fc4bfece90 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gru_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gru_fp32.cc @@ -25,7 +25,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Gru; +using mindspore::schema::PrimitiveType_GRU; namespace mindspore::kernel { void GruCPUKernel::FreeTmpBuffer() { @@ -231,5 +231,5 @@ int GruCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Gru, LiteKernelCreator<GruCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_GRU, LiteKernelCreator<GruCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/gru_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/gru_fp32.h index 53ebe7a8b1..741294a7d3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/gru_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/gru_fp32.h @@ -23,9 +23,8 @@ namespace mindspore::kernel { class GruCPUKernel : public LiteKernel { public: GruCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { gru_param_ = reinterpret_cast<GruParameter *>(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/instance_norm_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/instance_norm_fp32.h index 6e0a26b03b..9976cd6889 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/instance_norm_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/instance_norm_fp32.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class InstanceNormCPUKernel : public LiteKernel { public: InstanceNormCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<InstanceNormParameter *>(parameter); } ~InstanceNormCPUKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/invert_permutation_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/invert_permutation_fp32.h index 3b0244ec71..83569ee8ad 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/invert_permutation_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/invert_permutation_fp32.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class InvertPermutationCPUKernel : public LiteKernel { public: InvertPermutationCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~InvertPermutationCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.cc index 518dc0e306..e810386542 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.cc @@ -25,7 +25,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_L2Norm; +using mindspore::schema::PrimitiveType_L2NormalizeFusion; namespace mindspore::kernel { namespace { @@ -174,5 +174,5 @@ int L2NormCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_L2Norm, LiteKernelCreator<L2NormCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_L2NormalizeFusion, LiteKernelCreator<L2NormCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.h index 4fc2e88f92..8fdc5864b9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/l2_norm_fp32.h @@ -30,9 +30,8 @@ namespace mindspore::kernel { class L2NormCPUKernel : public LiteKernel { public: L2NormCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { l2_norm_param_ = reinterpret_cast<L2NormParameter *>(op_parameter_); } ~L2NormCPUKernel() { FreeTmpBuffer(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/layer_norm_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/layer_norm_fp32.cc index 07c299ba6d..318282e776 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/layer_norm_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/layer_norm_fp32.cc @@ -23,7 +23,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_LayerNorm; +using mindspore::schema::PrimitiveType_LayerNormFusion; namespace mindspore::kernel { int LayerNormCPUKernel::Init() { @@ -93,5 +93,5 @@ int LayerNormCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LayerNorm, LiteKernelCreator<LayerNormCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LayerNormFusion, LiteKernelCreator<LayerNormCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/layer_norm_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/layer_norm_fp32.h index 7e98ba915b..4bd9255890 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/layer_norm_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/layer_norm_fp32.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class LayerNormCPUKernel : public LiteKernel { public: LayerNormCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<LayerNormParameter *>(parameter); } ~LayerNormCPUKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm_fp32.cc index bbdc7eca47..4fa01c9c16 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm_fp32.cc @@ -25,7 +25,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_LocalResponseNormalization; +using mindspore::schema::PrimitiveType_LRN; namespace mindspore::kernel { @@ -82,6 +82,5 @@ int LocalResponseNormCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LocalResponseNormalization, - LiteKernelCreator<LocalResponseNormCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LRN, LiteKernelCreator<LocalResponseNormCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm_fp32.h index 5600993994..e83444f029 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/local_response_norm_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class LocalResponseNormCPUKernel : public LiteKernel { public: LocalResponseNormCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) {} ~LocalResponseNormCPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/lsh_projection_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/lsh_projection_fp32.h index 5da3b5a332..69a92634c3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/lsh_projection_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/lsh_projection_fp32.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class LshProjectionCPUKernel : public LiteKernel { public: LshProjectionCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<LshProjectionParameter *>(op_parameter_); } ~LshProjectionCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm_fp32.cc index b036ba9181..71dbf39068 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm_fp32.cc @@ -27,7 +27,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Lstm; +using mindspore::schema::PrimitiveType_LSTM; namespace mindspore::kernel { void LstmCPUKernel::FreeTmpBuffer() { @@ -49,7 +49,9 @@ void LstmCPUKernel::FreeTmpBuffer() { void LstmCPUKernel::FreeRunBuffer() { context_->allocator->Free(gate_buffer_); - context_->allocator->Free(state_buffer_); + for (int i = 0; i < 2; i++) { + context_->allocator->Free(state_buffer_[i]); + } if (!is_vec_) { for (int i = 0; i < 2; i++) { context_->allocator->Free(matmul_buffer_[i]); @@ -187,11 +189,21 @@ int LstmCPUKernel::MallocRunBuffer() { MS_LOG(ERROR) << "LstmCPUKernel malloc gate_buffer error."; return RET_ERROR; } - if (!(lstm_param_->smooth_ >= -FLT_EPSILON && lstm_param_->smooth_ <= FLT_EPSILON)) { - int buffer_size = 2 * lstm_param_->batch_ * lstm_param_->hidden_size_ * sizeof(float); - state_buffer_ = reinterpret_cast<float *>(context_->allocator->Malloc(buffer_size)); - if (state_buffer_ == nullptr) { - MS_LOG(ERROR) << "LstmCPUKernel malloc state_buffer error."; + state_buffer_[0] = nullptr; + state_buffer_[1] = nullptr; + if (!(lstm_param_->zoneout_cell_ >= -FLT_EPSILON && lstm_param_->zoneout_cell_ <= FLT_EPSILON)) { + int buffer_size = lstm_param_->batch_ * lstm_param_->hidden_size_ * sizeof(float); + state_buffer_[0] = reinterpret_cast<float *>(context_->allocator->Malloc(buffer_size)); + if (state_buffer_[0] == nullptr) { + MS_LOG(ERROR) << "LstmCPUKernel malloc state_buffer for cell error."; + return RET_ERROR; + } + } + if (!(lstm_param_->zoneout_hidden_ >= -FLT_EPSILON && lstm_param_->zoneout_hidden_ <= FLT_EPSILON)) { + int buffer_size = lstm_param_->batch_ * lstm_param_->hidden_size_ * sizeof(float); + state_buffer_[1] = reinterpret_cast<float *>(context_->allocator->Malloc(buffer_size)); + if (state_buffer_[1] == nullptr) { + MS_LOG(ERROR) << "LstmCPUKernel malloc state_buffer for hidden error."; return RET_ERROR; } } @@ -239,5 +251,5 @@ int LstmCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Lstm, LiteKernelCreator<LstmCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_LSTM, LiteKernelCreator<LstmCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm_fp32.h index d0f91671b6..90256141b7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/lstm_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/lstm_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class LstmCPUKernel : public LiteKernel { public: LstmCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { lstm_param_ = reinterpret_cast<LstmParameter *>(op_parameter_); } @@ -45,7 +44,7 @@ class LstmCPUKernel : public LiteKernel { int InitWeightBias(); float *gate_buffer_ = nullptr; - float *state_buffer_ = nullptr; + float *state_buffer_[2]; float *weight_i_ptr_ = nullptr; float *weight_h_ptr_ = nullptr; float *bias_ptr_ = nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32.h index 716bef906a..a95f3effde 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class MatmulCPUKernel : public MatmulFp32BaseCPUKernel { public: explicit MatmulCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : MatmulFp32BaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : MatmulFp32BaseCPUKernel(parameter, inputs, outputs, ctx) {} ~MatmulCPUKernel() = default; int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32_base.h b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32_base.h index 5a8a5e558c..afbab54216 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32_base.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/matmul_fp32_base.h @@ -30,9 +30,8 @@ namespace mindspore::kernel { class MatmulFp32BaseCPUKernel : public LiteKernel { public: MatmulFp32BaseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { params_ = reinterpret_cast<MatMulParameter *>(op_parameter_); vec_matmul_ = false; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/non_max_suppression_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/non_max_suppression_fp32.h index ff2c80bf04..905c833840 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/non_max_suppression_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/non_max_suppression_fp32.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class NonMaxSuppressionCPUKernel : public LiteKernel { public: NonMaxSuppressionCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~NonMaxSuppressionCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/nonzero_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/nonzero_fp32.h index b7c1d61a07..0feb8a5560 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/nonzero_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/nonzero_fp32.h @@ -23,9 +23,8 @@ namespace mindspore::kernel { class NonZeroCPUKernel : public LiteKernel { public: NonZeroCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~NonZeroCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot_fp32.h index f0e960561a..f590b9671b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/one_hot_fp32.h @@ -23,9 +23,8 @@ namespace mindspore::kernel { class OneHotCPUKernel : public LiteKernel { public: OneHotCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~OneHotCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pad_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/pad_fp32.cc index 924884bdfc..ecba2a4952 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pad_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pad_fp32.cc @@ -24,7 +24,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Pad; +using mindspore::schema::PrimitiveType_PadFusion; namespace mindspore::kernel { namespace { @@ -414,5 +414,5 @@ int PadCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Pad, LiteKernelCreator<PadCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_PadFusion, LiteKernelCreator<PadCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pad_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/pad_fp32.h index 92d15be16d..b27af67e04 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pad_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pad_fp32.h @@ -31,9 +31,8 @@ namespace mindspore::kernel { class PadCPUKernel : public LiteKernel { public: PadCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { pad_param_ = reinterpret_cast<PadParameter *>(parameter); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pooling_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/pooling_fp32.cc index f66085b278..0ccbf35cae 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pooling_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pooling_fp32.cc @@ -26,7 +26,8 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Pooling; +using mindspore::schema::PrimitiveType_AvgPoolFusion; +using mindspore::schema::PrimitiveType_MaxPoolFusion; namespace mindspore::kernel { int PoolingCPUKernel::Init() { @@ -92,5 +93,6 @@ int PoolingCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Pooling, LiteKernelCreator<PoolingCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_AvgPoolFusion, LiteKernelCreator<PoolingCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_MaxPoolFusion, LiteKernelCreator<PoolingCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/pooling_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/pooling_fp32.h index 36e65a70f9..511ee99908 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/pooling_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/pooling_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class PoolingCPUKernel : public PoolingBaseCPUKernel { public: PoolingCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : PoolingBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : PoolingBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~PoolingCPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.cc index 64160b15af..baf56197a9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.cc @@ -23,7 +23,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Power; +using mindspore::schema::PrimitiveType_PowFusion; namespace mindspore::kernel { int PowerCPUKernel::Init() { return RET_OK; } @@ -62,14 +62,14 @@ int PowerCPUKernel::RunImpl(int task_id) { } float *exp_addr = nullptr; bool broadcast = true; - if (in_tensors_.size() == 2) { - exp_addr = reinterpret_cast<float *>(in_tensors_[1]->MutableData()); - MS_ASSERT(exp_addr); - broadcast = in_tensors_[0]->shape() == in_tensors_[1]->shape() ? false : true; - } + MS_ASSERT(in_tensors_.size() == 2); + exp_addr = reinterpret_cast<float *>(in_tensors_[1]->data_c()); + MS_ASSERT(exp_addr != nullptr); + broadcast = in_tensors_[0]->shape() == in_tensors_[1]->shape() ? false : true; + float *cur_exp = nullptr; if (broadcast) { - cur_exp = in_tensors_.size() == 2 ? exp_addr : &power_; + cur_exp = exp_addr; } else { cur_exp = exp_addr + stride * task_id; } @@ -77,5 +77,5 @@ int PowerCPUKernel::RunImpl(int task_id) { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Power, LiteKernelCreator<PowerCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_PowFusion, LiteKernelCreator<PowerCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.h index aed3964173..127582dda9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/power_fp32.h @@ -26,11 +26,9 @@ namespace mindspore::kernel { class PowerCPUKernel : public LiteKernel { public: PowerCPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(param, inputs, outputs, ctx, primitive), + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(param, inputs, outputs, ctx), thread_count_(ctx->thread_num_), - power_(reinterpret_cast<PowerParameter *>(op_parameter_)->power_), scale_(reinterpret_cast<PowerParameter *>(op_parameter_)->scale_), shift_(reinterpret_cast<PowerParameter *>(op_parameter_)->shift_) {} ~PowerCPUKernel() override = default; @@ -42,7 +40,6 @@ class PowerCPUKernel : public LiteKernel { private: int thread_count_; - float power_; float scale_; float shift_; }; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu_fp32.cc index 9547293bf7..1acc9731d1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu_fp32.cc @@ -24,7 +24,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_PReLU; +using mindspore::schema::PrimitiveType_PReLUFusion; namespace mindspore::kernel { namespace { @@ -140,5 +140,5 @@ int PReluCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_PReLU, LiteKernelCreator<PReluCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_PReLUFusion, LiteKernelCreator<PReluCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu_fp32.h index 932908153d..9841449586 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/prelu_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/prelu_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class PReluCPUKernel : public LiteKernel { public: PReluCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { prelu_param_ = reinterpret_cast<PReluParameter *>(op_parameter_); } ~PReluCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/range_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/range_fp32.h index 47b935f4c6..11dea5a57e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/range_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/range_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class RangeCPUKernel : public LiteKernel { public: explicit RangeCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~RangeCPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/rank_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/rank_fp32.h index f1ed8bef81..b034540688 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/rank_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/rank_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class RankCPUKernel : public LiteKernel { public: explicit RankCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~RankCPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reduce_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/reduce_fp32.cc index 4c3982d946..7233231452 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reduce_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reduce_fp32.cc @@ -28,7 +28,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Reduce; +using mindspore::schema::PrimitiveType_ReduceFusion; using mindspore::schema::ReduceMode; using mindspore::schema::ReduceMode_ReduceAll; using mindspore::schema::ReduceMode_ReduceASum; @@ -237,7 +237,8 @@ void ReduceCPUKernel::FreeTmpBuffer() { data_buffers_.clear(); } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Reduce, LiteKernelCreator<ReduceCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeInt, PrimitiveType_Reduce, LiteKernelCreator<ReduceCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Reduce, LiteKernelCreator<ReduceCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ReduceFusion, LiteKernelCreator<ReduceCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt, PrimitiveType_ReduceFusion, LiteKernelCreator<ReduceCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_ReduceFusion, LiteKernelCreator<ReduceCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeBool, PrimitiveType_ReduceFusion, LiteKernelCreator<ReduceCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reduce_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/reduce_fp32.h index cba1d7e018..06b228e341 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reduce_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reduce_fp32.h @@ -36,9 +36,8 @@ class ReduceCPUKernel : public ReduceBaseCPUKernel { public: ReduceCPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ReduceBaseCPUKernel(param, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ReduceBaseCPUKernel(param, inputs, outputs, ctx) { reduce_param_ = reinterpret_cast<ReduceParameter *>(param); } ~ReduceCPUKernel() { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/resize_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/resize_fp32.cc index ce3bca0bd6..d379175959 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/resize_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/resize_fp32.cc @@ -31,15 +31,12 @@ namespace mindspore::kernel { int ResizeCPUKernel::Init() { auto ret = ResizeBaseCPUKernel::Init(); switch (coordinate_transform_mode_) { - case schema::CoordinateTransformMode_COMMON: case schema::CoordinateTransformMode_ASYMMETRIC: calculate_ = CalculateAsymmetric; break; case schema::CoordinateTransformMode_ALIGN_CORNERS: calculate_ = CalculateAlignCorners; break; - case schema::CoordinateTransformMode_PYTORCH_HALF_PIXEL: - case schema::CoordinateTransformMode_TF_HALF_PIXEL: case schema::CoordinateTransformMode_HALF_PIXEL: calculate_ = CalculateHalfPixel; break; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/resize_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/resize_fp32.h index 979ec547d9..d6fd27aea1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/resize_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/resize_fp32.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class ResizeCPUKernel : public ResizeBaseCPUKernel { public: ResizeCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ResizeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ResizeBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~ResizeCPUKernel() { FreeTmpBuffer(); } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_fp32.cc index e35a3864bf..ef592e1a09 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_fp32.cc @@ -25,7 +25,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Reverse; +using mindspore::schema::PrimitiveType_ReverseV2; namespace mindspore::kernel { int ReverseCPUKernel::Stride(int index) { @@ -134,6 +134,6 @@ int ReverseCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Reverse, LiteKernelCreator<ReverseCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Reverse, LiteKernelCreator<ReverseCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ReverseV2, LiteKernelCreator<ReverseCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_ReverseV2, LiteKernelCreator<ReverseCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_fp32.h index c2fd59828e..5486dd6f7e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_fp32.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class ReverseCPUKernel : public LiteKernel { public: ReverseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~ReverseCPUKernel() { if (tmp_ != nullptr) { free(tmp_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence_fp32.h index f71b6c5d8d..9628b82b27 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/reverse_sequence_fp32.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class ReverseSequenceCPUKernel : public LiteKernel { public: ReverseSequenceCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~ReverseSequenceCPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling_fp32.h index 7f284acac7..973fb16045 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/roi_pooling_fp32.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class ROIPoolingCPUKernel : public LiteKernel { public: ROIPoolingCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<ROIPoolingParameter *>(parameter); } ~ROIPoolingCPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.cc index 6dc8ce26a0..2a07586e7b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.cc @@ -25,7 +25,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Scale; +using mindspore::schema::PrimitiveType_ScaleFusion; namespace mindspore::kernel { ScaleCPUKernel::~ScaleCPUKernel() { @@ -196,5 +196,5 @@ int ScaleCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Scale, LiteKernelCreator<ScaleCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ScaleFusion, LiteKernelCreator<ScaleCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.h index 180a55b375..23c89c1dd8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scale_fp32.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class ScaleCPUKernel : public LiteKernel { public: ScaleCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { scale_param_ = reinterpret_cast<ScaleParameter *>(op_parameter_); } ~ScaleCPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.cc index 4fe1ed27e7..3fe4d6f405 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.cc @@ -26,7 +26,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_ScatterND; +using mindspore::schema::PrimitiveType_ScatterNd; namespace mindspore::kernel { namespace { @@ -158,5 +158,5 @@ int ScatterNDCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ScatterND, LiteKernelCreator<ScatterNDCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_ScatterNd, LiteKernelCreator<ScatterNDCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.h index ce7cac8322..c91ab68582 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/scatter_nd_fp32.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class ScatterNDCPUKernel : public LiteKernel { public: explicit ScatterNDCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~ScatterNDCPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/shape_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/shape_fp32.h index 5a5b79283c..1d8db49514 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/shape_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/shape_fp32.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class ShapeCPUKernel : public LiteKernel { public: ShapeCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~ShapeCPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/size_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/size_fp32.h index aaffcfb94f..ea6477863b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/size_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/size_fp32.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class SizeCPUKernel : public LiteKernel { public: SizeCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~SizeCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram_fp32.h index 1a7580429d..2ba154862b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/skip_gram_fp32.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class SkipGramCPUKernel : public LiteKernel { public: explicit SkipGramCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), ctx_(ctx), thread_count_(ctx->thread_num_) {} ~SkipGramCPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax_fp32.cc index 90d65ed49a..adedc54d90 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax_fp32.cc @@ -26,7 +26,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_SoftMax; +using mindspore::schema::PrimitiveType_Softmax; namespace mindspore::kernel { int SoftmaxCPUKernel::Init() { @@ -111,5 +111,5 @@ int SoftmaxCPUKernel::Run() { return ret; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SoftMax, LiteKernelCreator<SoftmaxCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Softmax, LiteKernelCreator<SoftmaxCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax_fp32.h index 6cd6c791fa..e4f6874a7d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/softmax_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/softmax_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class SoftmaxCPUKernel : public SoftmaxBaseCPUKernel { public: SoftmaxCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : SoftmaxBaseCPUKernel(parameter, inputs, outputs, ctx, primitive), sum_data_(nullptr) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : SoftmaxBaseCPUKernel(parameter, inputs, outputs, ctx), sum_data_(nullptr) {} ~SoftmaxCPUKernel() override { if (sum_data_ != nullptr) { free(sum_data_); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch_fp32.h index fa0a3ce895..06c3c3fee8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_batch_fp32.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class SpaceToBatchCPUKernel : public LiteKernel { public: SpaceToBatchCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<SpaceToBatchParameter *>(op_parameter_); } ~SpaceToBatchCPUKernel() {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth_fp32.h index 9604614627..786fab36af 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/space_to_depth_fp32.h @@ -23,9 +23,8 @@ namespace mindspore::kernel { class SpaceToDepthCPUKernel : public LiteKernel { public: SpaceToDepthCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~SpaceToDepthCPUKernel() = default; int SpaceToDepth(int task_id); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.h index 87f900311a..3b6e44a1f6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class SparseToDenseCPUKernel : public LiteKernel { public: SparseToDenseCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), ctx_(ctx), thread_count_(ctx->thread_num_) { s2d_param = (reinterpret_cast<SparseToDenseParameter *>(op_parameter_)); s2d_param->thread_num_ = thread_count_; } diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_fromtensor_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_fromtensor_fp32.h index 94061454b5..63d58dd59e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_fromtensor_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_fromtensor_fp32.h @@ -27,10 +27,9 @@ namespace mindspore::kernel { class TensorListFromTensorCPUKernel : public LiteKernel { public: TensorListFromTensorCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), - dtype_(reinterpret_cast<TensorListParameter *>(parameter)->element_dtype_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), + dtype_(static_cast<TypeId>(reinterpret_cast<TensorListParameter *>(parameter)->element_dtype_)) {} ~TensorListFromTensorCPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_getitem_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_getitem_fp32.h index 0140acee0c..fe8b9257d3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_getitem_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_getitem_fp32.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class TensorListGetItemCPUKernel : public LiteKernel { public: TensorListGetItemCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), dtype_(reinterpret_cast<TensorListParameter *>(parameter)->element_dtype_) {} ~TensorListGetItemCPUKernel() = default; @@ -39,7 +38,7 @@ class TensorListGetItemCPUKernel : public LiteKernel { private: int index_ = 0; - TypeId dtype_ = kTypeUnknown; + int dtype_ = kTypeUnknown; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_reserve_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_reserve_fp32.h index 846475ccd4..0e56aefde9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_reserve_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_reserve_fp32.h @@ -27,10 +27,9 @@ namespace mindspore::kernel { class TensorListReserveCPUKernel : public LiteKernel { public: TensorListReserveCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), - element_dtype_(reinterpret_cast<TensorListParameter *>(parameter)->element_dtype_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), + element_dtype_(static_cast<TypeId>(reinterpret_cast<TensorListParameter *>(parameter)->element_dtype_)) {} ~TensorListReserveCPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_setitem_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_setitem_fp32.cc index 83a481380a..fde278e0d5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_setitem_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_setitem_fp32.cc @@ -38,8 +38,9 @@ int TensorListSetItemCPUKernel::Init() { } int TensorListSetItemCPUKernel::CheckParam() { - if (dtype_ != kTypeUnknown && dtype_ != input0_->tensors_data_type()) { - MS_LOG(ERROR) << "op dtype:" << dtype_ << " is not equal in_tensors[0] dtype:" << input0_->data_type(); + if (dtype_ != kTypeUnknown && input0_->tensors_data_type() != kTypeUnknown && + dtype_ != input0_->tensors_data_type()) { + MS_LOG(ERROR) << "op dtype:" << dtype_ << " is not equal in_tensors[0] dtype:" << input0_->tensors_data_type(); return RET_ERROR; } if (in_tensors_[1]->data_type() != kNumberTypeInt && in_tensors_[1]->data_type() != kNumberTypeInt32) { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_setitem_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_setitem_fp32.h index 1c3ccb8b5e..1608fa6ded 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_setitem_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_setitem_fp32.h @@ -27,10 +27,9 @@ namespace mindspore::kernel { class TensorListSetItemCPUKernel : public LiteKernel { public: TensorListSetItemCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), - dtype_(reinterpret_cast<TensorListParameter *>(parameter)->element_dtype_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), + dtype_(static_cast<TypeId>(reinterpret_cast<TensorListParameter *>(parameter)->element_dtype_)) {} ~TensorListSetItemCPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_stack_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_stack_fp32.h index 08fa4e21cb..e914a61308 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_stack_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/tensorlist_stack_fp32.h @@ -28,11 +28,10 @@ namespace mindspore::kernel { class TensorListStackCPUKernel : public LiteKernel { public: TensorListStackCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), num_element_(reinterpret_cast<TensorListParameter *>(parameter)->num_element_), - dtype_(reinterpret_cast<TensorListParameter *>(parameter)->element_dtype_) {} + dtype_(static_cast<TypeId>(reinterpret_cast<TensorListParameter *>(parameter)->element_dtype_)) {} ~TensorListStackCPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/topk_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/topk_fp32.cc index efc4a32aa4..47329b124c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/topk_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/topk_fp32.cc @@ -21,13 +21,11 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_TopK; +using mindspore::schema::PrimitiveType_TopKFusion; namespace mindspore::kernel { int TopKCPUKernel::Init() { - TopkParameter *parameter = reinterpret_cast<TopkParameter *>(op_parameter_); - MS_ASSERT(parameter); - parameter->topk_node_list_ = nullptr; + topk_param_->topk_node_list_ = nullptr; if (!InferShapeDone()) { return RET_OK; } @@ -36,11 +34,10 @@ int TopKCPUKernel::Init() { int TopKCPUKernel::ReSize() { lite::Tensor *input = in_tensors_.at(0); - TopkParameter *parameter = reinterpret_cast<TopkParameter *>(op_parameter_); - parameter->last_dim_size_ = input->shape().at(input->shape().size() - 1); - parameter->loop_num_ = 1; + topk_param_->last_dim_size_ = input->shape().at(input->shape().size() - 1); + topk_param_->loop_num_ = 1; for (size_t i = 0; i < input->shape().size() - 1; ++i) { - parameter->loop_num_ *= input->shape().at(i); + topk_param_->loop_num_ *= input->shape().at(i); } return RET_OK; } @@ -54,26 +51,24 @@ int TopKCPUKernel::Run() { MS_ASSERT(output_index); MS_ASSERT(context_->allocator != nullptr); - TopkParameter *parameter = reinterpret_cast<TopkParameter *>(op_parameter_); - MS_ASSERT(parameter); - if (in_tensors_.size() == lite::kDoubleNum) { + if (in_tensors_.size() == 2) { auto input_k = reinterpret_cast<int *>(in_tensors_.at(1)->MutableData()); - parameter->k_ = input_k[0]; + topk_param_->k_ = input_k[0]; } - if (parameter->k_ > in_tensors_.at(0)->ElementsNum()) { + if (topk_param_->k_ > in_tensors_.at(0)->ElementsNum()) { MS_LOG(ERROR) << "The k value is out of the data size range."; return RET_ERROR; } - parameter->topk_node_list_ = context_->allocator->Malloc(sizeof(TopkNode) * parameter->last_dim_size_); - if (parameter->topk_node_list_ == nullptr) { + topk_param_->topk_node_list_ = context_->allocator->Malloc(sizeof(TopkNode) * topk_param_->last_dim_size_); + if (topk_param_->topk_node_list_ == nullptr) { MS_LOG(ERROR) << "Memory allocation failed"; return RET_ERROR; } Topk(input_data, output_data, output_index, reinterpret_cast<TopkParameter *>(op_parameter_)); - context_->allocator->Free(parameter->topk_node_list_); - parameter->topk_node_list_ = nullptr; + context_->allocator->Free(topk_param_->topk_node_list_); + topk_param_->topk_node_list_ = nullptr; return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_TopK, LiteKernelCreator<TopKCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_TopKFusion, LiteKernelCreator<TopKCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/topk_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/topk_fp32.h index 5e5d951352..fcb8fce080 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/topk_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/topk_fp32.h @@ -24,9 +24,10 @@ namespace mindspore::kernel { class TopKCPUKernel : public LiteKernel { public: explicit TopKCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { + topk_param_ = reinterpret_cast<TopkParameter *>(op_parameter_); + } ~TopKCPUKernel() override {} int Init() override; @@ -34,6 +35,7 @@ class TopKCPUKernel : public LiteKernel { int Run() override; private: + TopkParameter *topk_param_; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.cc index c5a2bc76ea..fa5e55e9c4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.cc @@ -24,8 +24,6 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::RET_OP_EXECUTE_FAILURE; -using mindspore::schema::PrimitiveType_Nchw2Nhwc; -using mindspore::schema::PrimitiveType_Nhwc2Nchw; using mindspore::schema::PrimitiveType_Transpose; namespace mindspore::kernel { @@ -38,13 +36,19 @@ int TransposeCPUKernel::Init() { int TransposeCPUKernel::ReSize() { TransposeParameter *param = reinterpret_cast<TransposeParameter *>(op_parameter_); - if (in_tensors_.at(kInputIndex)->shape().size() != static_cast<size_t>(param->num_axes_) && in_tensors_.size() != 2) { + if (in_tensors_.size() == 2) { + param->num_axes_ = in_tensors_.at(1)->ElementsNum(); + } + if (in_tensors_.at(kInputIndex)->shape().size() != static_cast<size_t>(param->num_axes_)) { return RET_OK; } - if (in_tensors_.size() == 2) { - auto input_perm = in_tensors_.at(1); - MS_ASSERT(input_perm != nullptr); - param->num_axes_ = input_perm->ElementsNum(); + // get perm data + MS_ASSERT(in_tensors_.size() == 2); + auto perm_tensor = in_tensors_.at(1); + int *perm_data = reinterpret_cast<int *>(perm_tensor->data_c()); + MS_ASSERT(perm_data != nullptr); + for (int i = 0; i < param->num_axes_; ++i) { + param->perm_[i] = perm_data[i]; } auto &inTensor = in_tensors_.front(); auto &outTensor = out_tensors_.front(); @@ -120,6 +124,10 @@ int TransposeCPUKernel::Run() { MS_ASSERT(out_data_); TransposeParameter *param = reinterpret_cast<TransposeParameter *>(this->op_parameter_); + if (in_tensor->shape().size() != static_cast<size_t>(param->num_axes_)) { + memcpy(out_data_, in_data_, in_tensor->ElementsNum() * sizeof(float)); + return RET_OK; + } if (in_tensors_.size() == 2) { auto input_perm = in_tensors_.at(1); MS_ASSERT(input_perm != nullptr); @@ -132,10 +140,6 @@ int TransposeCPUKernel::Run() { param->perm_[i] = 0; } } - if (in_tensor->shape().size() != static_cast<size_t>(param->num_axes_)) { - memcpy(out_data_, in_data_, in_tensor->ElementsNum() * sizeof(float)); - return RET_OK; - } auto ret = NhNcTranspose(in_tensor, out_tensor, param); if (ret == RET_OK) { return ret; @@ -180,8 +184,4 @@ int TransposeCPUKernel::Run() { REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Transpose, LiteKernelCreator<TransposeCPUKernel>) REG_KERNEL(kCPU, kNumberTypeInt32, PrimitiveType_Transpose, LiteKernelCreator<TransposeCPUKernel>) REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Transpose, LiteKernelCreator<TransposeCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Nchw2Nhwc, LiteKernelCreator<TransposeCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Nchw2Nhwc, LiteKernelCreator<TransposeCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Nhwc2Nchw, LiteKernelCreator<TransposeCPUKernel>) -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Nhwc2Nchw, LiteKernelCreator<TransposeCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.h index 1581b8f2bb..90926625f2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/transpose_fp32.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class TransposeCPUKernel : public LiteKernel { public: explicit TransposeCPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(param, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(param, inputs, outputs, ctx) {} ~TransposeCPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unique_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/unique_fp32.h index 4aa7dd801c..9ad677a17f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unique_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unique_fp32.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class UniqueCPUKernel : public LiteKernel { public: UniqueCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~UniqueCPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/unstack_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/unstack_fp32.h index f363b25b06..a3323a5584 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/unstack_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/unstack_fp32.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class UnstackCPUKernel : public LiteKernel { public: UnstackCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~UnstackCPUKernel() { free(output_addr_array_); } int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/upsample_fp32.cc b/mindspore/lite/src/runtime/kernel/arm/fp32/upsample_fp32.cc deleted file mode 100644 index 5fa002b447..0000000000 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/upsample_fp32.cc +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "src/runtime/kernel/arm/fp32/upsample_fp32.h" -#include <algorithm> -#include "nnacl/fp32/resize_fp32.h" -#include "src/kernel_registry.h" -#include "include/errorcode.h" - -using mindspore::lite::KernelRegistrar; -using mindspore::lite::RET_ERROR; -using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Upsample; - -namespace mindspore::kernel { -int UpsampleCPUKernel::Init() { - param_ = reinterpret_cast<UpsampleParameter *>(op_parameter_); - MS_ASSERT(param_); - if (!InferShapeDone()) { - return RET_OK; - } - return ReSize(); -} - -int UpsampleCPUKernel::ReSize() { - auto ret = RET_OK; - auto out_tensor = out_tensors_.at(0); - MS_ASSERT(out_tensor); - auto out_shape = out_tensor->shape(); - if (out_shape.size() != 4) { - MS_LOG(ERROR) << "Upsample out tensor dim should be 4"; - return RET_ERROR; - } - new_height_ = out_shape.at(1); - new_width_ = out_shape.at(2); - - if (param_->method_ == 0) { // bilinear - FreeTmpBuffer(); - ret = MallocTmpBuffer(); - if (ret != RET_OK) { - FreeTmpBuffer(); - return ret; - } - - auto input = in_tensors_.at(0); - MS_ASSERT(input); - auto input_shape = input->shape(); - auto output = out_tensors().at(0); - MS_ASSERT(output); - auto output_shape = output->shape(); - ret = PrepareResizeBilinear(input_shape.data(), output_shape.data(), CalculateAsymmetric, y_bottoms_, y_tops_, - x_lefts_, x_rights_, y_bottom_weights_, x_left_weights_); - if (ret != RET_OK) { - FreeTmpBuffer(); - } - } - return ret; -} - -int UpsampleImpl(void *cdata, int task_id) { - auto upsample_kernel = reinterpret_cast<UpsampleCPUKernel *>(cdata); - auto error_code = upsample_kernel->RunImpl(task_id); - if (error_code != RET_OK) { - MS_LOG(ERROR) << "Upsample Run error task_id[" << task_id << "] error_code[" << error_code << "]"; - return RET_ERROR; - } - return RET_OK; -} - -int UpsampleCPUKernel::RunImpl(int task_id) { - MS_ASSERT(in_tensors_.size() == 2); - auto input = in_tensors_.at(0); // input to be upsampled(resized) - auto input_data = reinterpret_cast<float *>(input->data_c()); - MS_ASSERT(input_data); - - auto out_tensor = out_tensors_.at(0); - MS_ASSERT(out_tensor); - auto output_data = reinterpret_cast<float *>(out_tensor->data_c()); - MS_ASSERT(output_data); - auto input_shape = input->shape(); - - int ret = 0; - switch (param_->method_) { - case static_cast<int>(schema::ResizeMethod_LINEAR): { - int n_h_begin, n_h_end; - int n = out_tensor->shape().at(0); - int h = new_height_; - int unit = UP_DIV(n * h, context_->thread_num_); - n_h_begin = unit * task_id; - n_h_end = std::min(n_h_begin + unit, n * h); - int c = in_tensors_.at(0)->shape().at(3); - float *line0 = line_buffer_ + new_width_ * c * 2 * task_id; - float *line1 = line0 + new_width_ * c; - ret = ResizeBilinear(input_data, output_data, input_shape.data(), out_tensor->shape().data(), y_bottoms_, y_tops_, - x_lefts_, x_rights_, y_bottom_weights_, x_left_weights_, line0, line1, n_h_begin, n_h_end); - break; - } - case static_cast<int>(schema::ResizeMethod_NEAREST): { - ret = ResizeNearestNeighbor(input_data, output_data, input_shape.data(), out_tensor->shape().data(), - CalculateAsymmetric, coordinate_transform_mode_, task_id, context_->thread_num_); - break; - } - default: { - MS_LOG(ERROR) << "Upsample unknown method " << param_->method_; - ret = RET_ERROR; - } - } - return ret; -} - -int UpsampleCPUKernel::Run() { - int error_code = ParallelLaunch(this->context_->thread_pool_, UpsampleImpl, this, context_->thread_num_); - if (error_code != RET_OK) { - MS_LOG(ERROR) << "Upsample run error, error_code[" << error_code << "]"; - FreeTmpBuffer(); - return RET_ERROR; - } - - return RET_OK; -} -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Upsample, LiteKernelCreator<UpsampleCPUKernel>) -} // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/upsample_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/upsample_fp32.h deleted file mode 100644 index 22bd7660cb..0000000000 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/upsample_fp32.h +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_UPSAMPLE_FP32_H_ -#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_UPSAMPLE_FP32_H_ - -#include <vector> -#include "src/lite_kernel.h" -#include "nnacl/upsample_parameter.h" -#include "src/runtime/kernel/arm/fp32/resize_fp32.h" - -namespace mindspore::kernel { -class UpsampleCPUKernel : public ResizeCPUKernel { - public: - UpsampleCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ResizeCPUKernel(parameter, inputs, outputs, ctx, primitive) {} - ~UpsampleCPUKernel() = default; - - int Init() override; - int ReSize() override; - int Run() override; - int RunImpl(int task_id) override; - - private: - UpsampleParameter *param_ = nullptr; -}; -} // namespace mindspore::kernel - -#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_UPSAMPLE_FP32_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.h index db591d0a57..d4ed588c76 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/where_fp32.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class WhereCPUKernel : public LiteKernel { public: WhereCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), ctx_(ctx), thread_count_(ctx->thread_num_) { where_param_ = reinterpret_cast<WhereParameter *>(op_parameter_); } ~WhereCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike_fp32.h b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike_fp32.h index 070e6805f3..f6df9a6422 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike_fp32.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32/zeroslike_fp32.h @@ -23,9 +23,8 @@ namespace mindspore::kernel { class ZerosLikeCPUKernel : public LiteKernel { public: ZerosLikeCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~ZerosLikeCPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc index 4aea7ab019..8c59424db2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.cc @@ -102,11 +102,10 @@ int ActivationGradCPUKernel::Run() { kernel::LiteKernel *CpuActivationGradFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_ActivationGrad); - auto *kernel = new (std::nothrow) ActivationGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) ActivationGradCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new ActivationGradCPUKernel fail!"; free(opParameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h index 50c3ddb5f7..e24bf922ad 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/activation_grad.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class ActivationGradCPUKernel : public LiteKernel { public: explicit ActivationGradCPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(param, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(param, inputs, outputs, ctx), thread_count_(ctx->thread_num_) { param_act_grad_ = reinterpret_cast<ActivationParameter *>(param); } ~ActivationGradCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc index 9f63ff92ad..2922ff498b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.cc @@ -146,10 +146,9 @@ int AdamCPUKernel::OptimizerStep() { kernel::LiteKernel *CpuAdamFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const lite::PrimitiveC *primitive) { + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { MS_ASSERT(desc.type == schema::PrimitiveType_Adam); - auto *kernel = new (std::nothrow) AdamCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) AdamCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new AdamCPUKernel fail!"; free(opParameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.h index 1b675c22cb..83f34dc382 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/adam.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class AdamCPUKernel : public OptimizerKernel { public: explicit AdamCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : OptimizerKernel(parameter, inputs, outputs, ctx, primitive, 5, 9), thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : OptimizerKernel(parameter, inputs, outputs, ctx, 5, 9), thread_count_(ctx->thread_num_) { adam_param_ = reinterpret_cast<AdamParameter *>(parameter); } ~AdamCPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc index 0fcfbe6b02..a366a3763c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.cc @@ -119,10 +119,9 @@ int ApplyMomentumCPUKernel::OptimizerStep() { kernel::LiteKernel *CpuApplyMomentumFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, - const lite::PrimitiveC *primitive) { + const kernel::KernelKey &desc) { MS_ASSERT(desc.type == schema::PrimitiveType_ApplyMomentum); - auto *kernel = new (std::nothrow) ApplyMomentumCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) ApplyMomentumCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new ApplyMomentumCPUKernel fail!"; free(opParameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.h index 95b39219d6..0adc921c50 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/apply_momentum.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class ApplyMomentumCPUKernel : public OptimizerKernel { public: explicit ApplyMomentumCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : OptimizerKernel(parameter, inputs, outputs, ctx, primitive, 2, 3), + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : OptimizerKernel(parameter, inputs, outputs, ctx, 2, 3), thread_count_(ctx->thread_num_), apply_momentum_param_(nullptr) { apply_momentum_param_ = reinterpret_cast<ApplyMomentumParameter *>(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc index 9e0d588200..9187d26da9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.cc @@ -238,13 +238,12 @@ int ArithmeticGradCPUKernel::Run() { kernel::LiteKernel *CpuArithmeticGradFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const kernel::KernelKey &desc) { MS_ASSERT(nullptr != opParameter); if (opParameter == nullptr) { return nullptr; } - auto *kernel = new (std::nothrow) ArithmeticGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) ArithmeticGradCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new ArithmeticGradCPUKernel fail!"; free(opParameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h index 6932a328ea..bfb507ddd1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h @@ -38,9 +38,8 @@ class ArithmeticGradCPUKernel : public LiteKernel { public: explicit ArithmeticGradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), tile_data0(NULL), tile_data1(NULL), tile_data2(NULL) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), tile_data0(NULL), tile_data1(NULL), tile_data2(NULL) { switch (Type()) { case PrimitiveType_MulGrad: arithmetic_grad_ = &ArithmeticGradCPUKernel::ArithmeticGradMul; // this will be adjusted in InferShape diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.cc index 36c27eb27f..1eea6318bf 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.cc @@ -83,13 +83,12 @@ int ArithmeticSelfGradCPUKernel::Run() { kernel::LiteKernel *CpuArithmeticSelfGradFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *param, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const kernel::KernelKey &desc) { if (param == nullptr) { MS_LOG(ERROR) << "input parameter is nullptr!"; return nullptr; } - auto *kernel = new (std::nothrow) ArithmeticSelfGradCPUKernel(param, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) ArithmeticSelfGradCPUKernel(param, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new ArithmeticSelfGradCPUKernel fail!"; free(param); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.h index 28b90c9045..5b506aaaa9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/arithmetic_self_grad.h @@ -28,9 +28,8 @@ class ArithmeticSelfGradCPUKernel : public LiteKernel { public: ArithmeticSelfGradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) {} ~ArithmeticSelfGradCPUKernel() override {} int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc index 75d83def8c..a5e59eb61b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.cc @@ -69,10 +69,9 @@ int AssignCPUKernel::Init() { return RET_OK; } kernel::LiteKernel *CpuAssignFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const lite::PrimitiveC *primitive) { + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { MS_ASSERT(desc.type == schema::PrimitiveType_Assign); - auto *kernel = new (std::nothrow) AssignCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) AssignCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new AssignCPUKernel fail!"; free(opParameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.h index 0da097de21..00c513b7a8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/assign.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class AssignCPUKernel : public LiteKernel { public: explicit AssignCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) {} ~AssignCPUKernel() override {} int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc index 5635d90714..d70cb3a5a5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.cc @@ -25,7 +25,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_BiasGrad; +using mindspore::schema::PrimitiveType_BiasAddGrad; namespace mindspore::kernel { @@ -93,11 +93,10 @@ int BiasGradCPUKernel::Run() { kernel::LiteKernel *CpuBiasGradFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_BiasGrad); - auto *kernel = new (std::nothrow) BiasGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); + MS_ASSERT(desc.type == schema::PrimitiveType_BiasAddGrad); + auto *kernel = new (std::nothrow) BiasGradCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new BiasGradCPUKernel fail!"; free(opParameter); @@ -114,5 +113,5 @@ kernel::LiteKernel *CpuBiasGradFp32KernelCreator(const std::vector<lite::Tensor return kernel; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_BiasGrad, CpuBiasGradFp32KernelCreator) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_BiasAddGrad, CpuBiasGradFp32KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h index ae4916a1bd..f69ad84ab4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bias_grad.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class BiasGradCPUKernel : public LiteKernel { public: explicit BiasGradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { bias_param = reinterpret_cast<ArithmeticParameter *>(parameter); } ~BiasGradCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc index 6385dcccbb..271701cc58 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.cc @@ -32,7 +32,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_BNGrad; +using mindspore::schema::PrimitiveType_BatchNormGrad; namespace mindspore::kernel { int BNGradCPUKernel::ReSize() { @@ -149,11 +149,10 @@ int BNGradCPUKernel::Run() { kernel::LiteKernel *CpuBNGradFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_BNGrad); - auto *kernel = new (std::nothrow) BNGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); + MS_ASSERT(desc.type == schema::PrimitiveType_BatchNormGrad); + auto *kernel = new (std::nothrow) BNGradCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new BNGradCPUKernel fail!"; free(opParameter); @@ -169,5 +168,5 @@ kernel::LiteKernel *CpuBNGradFp32KernelCreator(const std::vector<lite::Tensor *> return kernel; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_BNGrad, CpuBNGradFp32KernelCreator) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_BatchNormGrad, CpuBNGradFp32KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h index d0e85384a8..baff52968a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/bn_grad.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class BNGradCPUKernel : public LiteKernel { public: explicit BNGradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~BNGradCPUKernel() override {} int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc index 24082f283a..5659842cc1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.cc @@ -176,12 +176,11 @@ int ConvolutionTrainCPUKernel::Run() { kernel::LiteKernel *CpuConvTrainFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const lite::PrimitiveC *primitive) { + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_Conv2D || desc.type == schema::PrimitiveType_DepthwiseConv2D); + MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DFusion); - auto *kernel = new (std::nothrow) ConvolutionTrainCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) ConvolutionTrainCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new ConvolutionTrainCPUKernel failed!"; free(opParameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.h index ccb634ea1a..d77ed8983d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class ConvolutionTrainCPUKernel : public LiteKernel { public: explicit ConvolutionTrainCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionTrainCPUKernel() override {} int Init() override; @@ -47,8 +46,7 @@ class ConvolutionTrainCPUKernel : public LiteKernel { kernel::LiteKernel *CpuConvTrainFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const lite::PrimitiveC *primitive); + const lite::InnerContext *ctx, const kernel::KernelKey &desc); } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_CONVOLUTION_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc index 8defa03cb9..085dcec54e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.cc @@ -27,7 +27,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Conv2DGradFilter; +using mindspore::schema::PrimitiveType_Conv2DBackpropFilterFusion; namespace mindspore::kernel { int ConvolutionGradFilterCPUKernel::ReSize() { @@ -201,12 +201,11 @@ int ConvolutionGradFilterCPUKernel::Run() { kernel::LiteKernel *CpuConvGradFilterFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DGradFilter); + MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DBackpropFilterFusion); - auto *kernel = new (std::nothrow) ConvolutionGradFilterCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) ConvolutionGradFilterCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new kernel fail!"; free(opParameter); @@ -223,5 +222,5 @@ kernel::LiteKernel *CpuConvGradFilterFp32KernelCreator(const std::vector<lite::T return kernel; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Conv2DGradFilter, CpuConvGradFilterFp32KernelCreator) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Conv2DBackpropFilterFusion, CpuConvGradFilterFp32KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h index b7cd2f9094..65a7d105f9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_filter.h @@ -24,10 +24,9 @@ namespace mindspore::kernel { class ConvolutionGradFilterCPUKernel : public LiteKernel { public: explicit ConvolutionGradFilterCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + : LiteKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionGradFilterCPUKernel() override {} int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc index 4276961202..2492a6b908 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.cc @@ -26,8 +26,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Conv2DGradInput; -using mindspore::schema::PrimitiveType_GroupConv2DGradInput; +using mindspore::schema::PrimitiveType_Conv2DBackpropInputFusion; namespace mindspore::kernel { int ConvolutionGradInputCPUKernel::ReSize() { @@ -152,32 +151,6 @@ int ConvolutionGradInputCPUKernel::Run() { return RET_OK; } -kernel::LiteKernel *CpuConvGradInputFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, - OpParameter *opParameter, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DGradInput || - desc.type == schema::PrimitiveType_GroupConv2DGradInput); - - auto *kernel = new (std::nothrow) ConvolutionGradInputCPUKernel(opParameter, inputs, outputs, ctx, primitive); - if (kernel == nullptr) { - MS_LOG(ERROR) << "new kernel fail!"; - free(opParameter); - return nullptr; - } - - auto ret = kernel->Init(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); - delete kernel; - return nullptr; - } - return kernel; -} - -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Conv2DGradInput, CpuConvGradInputFp32KernelCreator) -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_GroupConv2DGradInput, CpuConvGradInputFp32KernelCreator) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Conv2DBackpropInputFusion, + LiteKernelCreator<ConvolutionGradInputCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h index 4578992d0b..12927a2557 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/convolution_grad_input.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class ConvolutionGradInputCPUKernel : public LiteKernel { public: explicit ConvolutionGradInputCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionGradInputCPUKernel() override {} int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc index f5ea14d126..42f377a9d1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.cc @@ -132,12 +132,11 @@ int DeConvolutionGradFilterCPUKernel::Run() { kernel::LiteKernel *CpuDeConvGradFilterFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_DeConv2DGradFilter); - auto *kernel = new (std::nothrow) DeConvolutionGradFilterCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) DeConvolutionGradFilterCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new kernel fail!"; free(opParameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.h index cb3007c67c..ec2330ba20 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_filter.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class DeConvolutionGradFilterCPUKernel : public LiteKernel { public: explicit DeConvolutionGradFilterCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~DeConvolutionGradFilterCPUKernel() override {} int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc index 374b182af7..7b4ef067d1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.cc @@ -109,8 +109,7 @@ int DropoutCPUKernel::Run() { kernel::LiteKernel *CpuDropoutFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { if (opParameter == nullptr) { MS_LOG(ERROR) << "Dropout opParameter nullptr."; return nullptr; @@ -119,7 +118,7 @@ kernel::LiteKernel *CpuDropoutFp32KernelCreator(const std::vector<lite::Tensor * MS_LOG(ERROR) << "Dropout desc type should be " << schema::PrimitiveType_Dropout << " got " << desc.type; return nullptr; } - auto *kernel = new (std::nothrow) DropoutCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) DropoutCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "Dropout new kernel failed."; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.h index dbfe0252bf..b191e73830 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout.h @@ -23,9 +23,8 @@ namespace mindspore::kernel { class DropoutCPUKernel : public LiteKernel { public: DropoutCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) {} ~DropoutCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc index 544fe81433..5c1dd4495a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.cc @@ -95,8 +95,7 @@ int DropoutGradCPUKernel::Run() { kernel::LiteKernel *CpuDropoutGradFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const kernel::KernelKey &desc) { if (opParameter == nullptr) { MS_LOG(ERROR) << "DropoutGrad opParameter nullptr."; return nullptr; @@ -105,7 +104,7 @@ kernel::LiteKernel *CpuDropoutGradFp32KernelCreator(const std::vector<lite::Tens MS_LOG(ERROR) << "DropoutGrad desc type should be " << schema::PrimitiveType_DropoutGrad << " got " << desc.type; return nullptr; } - auto *kernel = new (std::nothrow) DropoutGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) DropoutGradCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "DropoutGrad new kernel failed."; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.h index 11e79a53f4..991f2c4d37 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/dropout_grad.h @@ -23,9 +23,8 @@ namespace mindspore::kernel { class DropoutGradCPUKernel : public LiteKernel { public: DropoutGradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) {} ~DropoutGradCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/make_tuple.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/make_tuple.h index 26ca5156b8..7f7cda3614 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/make_tuple.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/make_tuple.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class MakeTupleCPUKernel : public LiteKernel { public: explicit MakeTupleCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const lite::Primitive *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param = parameter; } ~MakeTupleCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc index 826cd01156..c4a2e68a3c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.cc @@ -67,13 +67,12 @@ int NegGradCPUKernel::Run() { kernel::LiteKernel *CpuNegGradFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *param, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { if (param == nullptr) { MS_LOG(ERROR) << "input parameter is nullptr!"; return nullptr; } - auto *kernel = new (std::nothrow) NegGradCPUKernel(param, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) NegGradCPUKernel(param, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new NegGradCPUKernel fail!"; free(param); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.h index 2c2f5aad07..079f70575b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/neg_grad.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class NegGradCPUKernel : public LiteKernel { public: explicit NegGradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) {} ~NegGradCPUKernel() override {} int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc index 8011384e55..fc18206f3a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.cc @@ -26,7 +26,8 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_PoolingGrad; +using mindspore::schema::PrimitiveType_AvgPoolGrad; +using mindspore::schema::PrimitiveType_MaxPoolGrad; namespace mindspore::kernel { int PoolingGradCPUKernel::ReSize() { @@ -108,12 +109,10 @@ int PoolingGradCPUKernel::Run() { kernel::LiteKernel *CpuPoolingGradFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_PoolingGrad); - auto *kernel = new (std::nothrow) PoolingGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) PoolingGradCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new PoolingGradCPUKernel fail!"; free(opParameter); @@ -130,5 +129,6 @@ kernel::LiteKernel *CpuPoolingGradFp32KernelCreator(const std::vector<lite::Tens return kernel; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_PoolingGrad, CpuPoolingGradFp32KernelCreator) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_AvgPoolGrad, CpuPoolingGradFp32KernelCreator) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_MaxPoolGrad, CpuPoolingGradFp32KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h index 43edc88c39..218c9f3ff0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/pooling_grad.h @@ -29,9 +29,8 @@ using mindspore::schema::RoundMode; class PoolingGradCPUKernel : public LiteKernel { public: explicit PoolingGradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~PoolingGradCPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc index 0cb9b59532..c648381222 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.cc @@ -86,11 +86,10 @@ int PowerGradCPUKernel::Run() { kernel::LiteKernel *CpuPowerGradFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_PowerGrad); - auto *kernel = new (std::nothrow) PowerGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) PowerGradCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new PowerGradCPUKernel fail!"; free(opParameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h index 950d58c5f3..7ce2ba1386 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/power_grad.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class PowerGradCPUKernel : public LiteKernel { public: PowerGradCPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(param, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(param, inputs, outputs, ctx), thread_count_(ctx->thread_num_) { PowerParameter *power_param = reinterpret_cast<PowerParameter *>(param); power_ = power_param->power_; scale_ = power_param->scale_; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc index 9420b9f876..46e0cac4fe 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.cc @@ -26,7 +26,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Sgd; +using mindspore::schema::PrimitiveType_SGD; namespace mindspore::kernel { @@ -200,10 +200,9 @@ int SgdCPUKernel::OptimizerStep() { kernel::LiteKernel *CpuSgdFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const lite::PrimitiveC *primitive) { - MS_ASSERT(desc.type == schema::PrimitiveType_Sgd); - auto *kernel = new (std::nothrow) SgdCPUKernel(opParameter, inputs, outputs, ctx, primitive); + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { + MS_ASSERT(desc.type == schema::PrimitiveType_SGD); + auto *kernel = new (std::nothrow) SgdCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new SgdCPUKernel failed!"; free(opParameter); @@ -221,5 +220,5 @@ kernel::LiteKernel *CpuSgdFp32KernelCreator(const std::vector<lite::Tensor *> &i return kernel; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_Sgd, CpuSgdFp32KernelCreator) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SGD, CpuSgdFp32KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.h index eedcbc92bc..4ad9c4b334 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sgd.h @@ -25,11 +25,8 @@ namespace mindspore::kernel { class SgdCPUKernel : public OptimizerKernel { public: explicit SgdCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : OptimizerKernel(parameter, inputs, outputs, ctx, primitive, 2, 1), - thread_count_(ctx->thread_num_), - sgd_param_(nullptr) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : OptimizerKernel(parameter, inputs, outputs, ctx, 2, 1), thread_count_(ctx->thread_num_), sgd_param_(nullptr) { sgd_param_ = reinterpret_cast<SgdParameter *>(parameter); } ~SgdCPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.cc index 71a61cc150..d0ce449f90 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.cc @@ -71,13 +71,14 @@ int SigmoidCrossEntropyWithLogitsCPUKernel::Run() { int SigmoidCrossEntropyWithLogitsCPUKernel::Init() { return RET_OK; } -kernel::LiteKernel *CpuSigmoidCrossEntropyWithLogitsFp32KernelCreator( - const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { +kernel::LiteKernel *CpuSigmoidCrossEntropyWithLogitsFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, + const std::vector<lite::Tensor *> &outputs, + OpParameter *opParameter, + const lite::InnerContext *ctx, + const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_SigmoidCrossEntropyWithLogits); - auto *kernel = - new (std::nothrow) SigmoidCrossEntropyWithLogitsCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) SigmoidCrossEntropyWithLogitsCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new SigmoidCrossEntropyWithLogits failed"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.h index 5b93fa4502..d36c8ab8b5 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits.h @@ -25,9 +25,8 @@ class SigmoidCrossEntropyWithLogitsCPUKernel : public LiteKernel { public: explicit SigmoidCrossEntropyWithLogitsCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, - const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~SigmoidCrossEntropyWithLogitsCPUKernel() override {} int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc index b01e7a7761..c12f03ebba 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.cc @@ -71,13 +71,14 @@ int SigmoidCrossEntropyWithLogitsGradCPUKernel::Run() { int SigmoidCrossEntropyWithLogitsGradCPUKernel::Init() { return RET_OK; } -kernel::LiteKernel *CpuSigmoidCrossEntropyWithLogitsGradFp32KernelCreator( - const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { +kernel::LiteKernel *CpuSigmoidCrossEntropyWithLogitsGradFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, + const std::vector<lite::Tensor *> &outputs, + OpParameter *opParameter, + const lite::InnerContext *ctx, + const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad); - auto *kernel = - new (std::nothrow) SigmoidCrossEntropyWithLogitsGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) SigmoidCrossEntropyWithLogitsGradCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new SigmoidCrossEntropyWithLogitsGradWithLogitsCPUKernel failed"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.h index 26680a32ce..15e86788e6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sigmoid_cross_entropy_with_logits_grad.h @@ -25,9 +25,8 @@ class SigmoidCrossEntropyWithLogitsGradCPUKernel : public LiteKernel { public: explicit SigmoidCrossEntropyWithLogitsGradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, - const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~SigmoidCrossEntropyWithLogitsGradCPUKernel() override {} int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.cc index b9b4bc13b8..59b8030469 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.cc @@ -86,11 +86,10 @@ int SmoothL1LossCPUKernel::Init() { return RET_OK; } kernel::LiteKernel *CpuSmoothL1LossFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_SmoothL1Loss); - auto *kernel = new (std::nothrow) SmoothL1LossCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) SmoothL1LossCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new SmoothL1Loss failed"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.h index 5fd0c0dea2..b7cfb0ec1c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss.h @@ -25,11 +25,8 @@ namespace mindspore::kernel { class SmoothL1LossCPUKernel : public LiteKernel { public: explicit SmoothL1LossCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), - smooth_l1_param_(nullptr), - thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), smooth_l1_param_(nullptr), thread_count_(ctx->thread_num_) { smooth_l1_param_ = reinterpret_cast<SmoothL1LossParameter *>(parameter); } ~SmoothL1LossCPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.cc index 4f6f99d418..a93efea3b8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.cc @@ -83,11 +83,10 @@ int SmoothL1LossGradCPUKernel::Init() { return RET_OK; } kernel::LiteKernel *CpuSmoothL1LossGradFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_SmoothL1LossGrad); - auto *kernel = new (std::nothrow) SmoothL1LossGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) SmoothL1LossGradCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new SmoothL1LossGradWithLogitsCPUKernel failed"; return nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.h index e702519e20..47647ae69c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/smooth_l1_loss_grad.h @@ -25,11 +25,8 @@ namespace mindspore::kernel { class SmoothL1LossGradCPUKernel : public LiteKernel { public: explicit SmoothL1LossGradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), - smooth_l1_param_(nullptr), - thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), smooth_l1_param_(nullptr), thread_count_(ctx->thread_num_) { smooth_l1_param_ = reinterpret_cast<SmoothL1LossParameter *>(parameter); } ~SmoothL1LossGradCPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc index 3dd7cb25b7..49051e6ba7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.cc @@ -25,7 +25,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_SoftmaxCrossEntropy; +using mindspore::schema::PrimitiveType_SoftmaxCrossEntropyWithLogits; namespace mindspore::kernel { @@ -129,12 +129,10 @@ int SoftmaxCrossEntropyWithLogitsCPUKernel::ReSize() { kernel::LiteKernel *CpuSoftmaxCrossEntropyFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_SoftmaxCrossEntropy); - auto *kernel = - new (std::nothrow) SoftmaxCrossEntropyWithLogitsCPUKernel(opParameter, inputs, outputs, ctx, primitive); + MS_ASSERT(desc.type == schema::PrimitiveType_SoftmaxCrossEntropyWithLogits); + auto *kernel = new (std::nothrow) SoftmaxCrossEntropyWithLogitsCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new SoftmaxCrossEntropyWithLogitsCPUKernel failed"; free(opParameter); @@ -150,5 +148,6 @@ kernel::LiteKernel *CpuSoftmaxCrossEntropyFp32KernelCreator(const std::vector<li return kernel; } -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SoftmaxCrossEntropy, CpuSoftmaxCrossEntropyFp32KernelCreator) +REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_SoftmaxCrossEntropyWithLogits, + CpuSoftmaxCrossEntropyFp32KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.h index faf4da056a..6465a41081 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_cross_entropy_with_logits.h @@ -29,9 +29,8 @@ class SoftmaxCrossEntropyWithLogitsCPUKernel : public LossKernel { public: explicit SoftmaxCrossEntropyWithLogitsCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, - const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LossKernel(parameter, inputs, outputs, ctx, primitive) { + const lite::InnerContext *ctx) + : LossKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<SoftmaxCrossEntropyParameter *>(parameter); } ~SoftmaxCrossEntropyWithLogitsCPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc index fdbe6afab6..f140d27025 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.cc @@ -91,10 +91,9 @@ int SoftmaxGradCPUKernel::Run() { kernel::LiteKernel *CpuSoftmaxGradFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); - auto *kernel = new (std::nothrow) SoftmaxGradCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) SoftmaxGradCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new SoftmaxGradCPUKernel fail!"; free(opParameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.h index f654d6a46f..348798146a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/softmax_grad.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class SoftmaxGradCPUKernel : public LiteKernel { public: explicit SoftmaxGradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param = reinterpret_cast<SoftmaxParameter *>(parameter); } ~SoftmaxGradCPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc index 5c91bc78c9..fff94b7749 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.cc @@ -145,13 +145,14 @@ int SparseSoftmaxCrossEntropyWithLogitsCPUKernel::Init() { return RET_OK; } -kernel::LiteKernel *CpuSparseSoftmaxCrossEntropyFp32KernelCreator( - const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, const mindspore::lite::PrimitiveC *primitive) { +kernel::LiteKernel *CpuSparseSoftmaxCrossEntropyFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, + const std::vector<lite::Tensor *> &outputs, + OpParameter *opParameter, + const lite::InnerContext *ctx, + const kernel::KernelKey &desc) { MS_ASSERT(opParameter != nullptr); MS_ASSERT(desc.type == schema::PrimitiveType_SparseSoftmaxCrossEntropy); - auto *kernel = - new (std::nothrow) SparseSoftmaxCrossEntropyWithLogitsCPUKernel(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) SparseSoftmaxCrossEntropyWithLogitsCPUKernel(opParameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new SparseSoftmaxCrossEntropyWithLogitsCPUKernel failed!"; free(opParameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h index 57e39cf2d8..aa6004be53 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/sparse_softmax_cross_entropy_with_logits.h @@ -30,9 +30,8 @@ class SparseSoftmaxCrossEntropyWithLogitsCPUKernel : public LossKernel { explicit SparseSoftmaxCrossEntropyWithLogitsCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, - const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LossKernel(parameter, inputs, outputs, ctx, primitive) { + const lite::InnerContext *ctx) + : LossKernel(parameter, inputs, outputs, ctx) { param = reinterpret_cast<SoftmaxCrossEntropyParameter *>(parameter); } ~SparseSoftmaxCrossEntropyWithLogitsCPUKernel() override {} diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.h index b6641695c1..9175a6f979 100644 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.h +++ b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/strided_slice_grad.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class StridedSliceGradCPUKernel : public LiteKernel { public: StridedSliceGradCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<StridedSliceParameter *>(parameter); } ~StridedSliceGradCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc deleted file mode 100644 index 55f0717de2..0000000000 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.cc +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include <vector> -#include <algorithm> -#include "src/runtime/kernel/arm/fp32_grad/tuple_getitem.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" -#include "include/errorcode.h" -#include "src/runtime/runtime_api.h" - -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; -using mindspore::lite::RET_ERROR; -using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_TupleGetItem; - -namespace mindspore::kernel { - -int TupleGetItemCPUKernel::Init() { - if (in_tensors_.size() != 1) { - MS_LOG(ERROR) << "Tuple Grad Filter should have one input"; - return RET_ERROR; - } - if (out_tensors_.size() != 1) { - MS_LOG(ERROR) << "Tuple Grad Filter should have one output"; - return RET_ERROR; - } - return RET_OK; -} - -int TupleGetItemCPUKernel::ReSize() { return RET_OK; } - -int TupleGetItemCPUKernel::Execute(int task_id) { - auto in = reinterpret_cast<float *>(in_tensors_.at(0)->MutableData()); - auto out = reinterpret_cast<float *>(out_tensors_.at(0)->MutableData()); - - size_t length = in_tensors_.at(0)->ElementsNum(); - - size_t stride = UP_DIV(length, thread_count_); - size_t count = MSMIN(stride, length - stride * task_id); - - size_t start = stride * task_id; - size_t end = start + count; - - std::copy(&(in[start]), &(in[end]), &(out[start])); - return RET_OK; -} - -int TupleRun(void *cdata, int task_id) { - auto tuple_kernel = reinterpret_cast<TupleGetItemCPUKernel *>(cdata); - auto error_code = tuple_kernel->Execute(task_id); - if (error_code != RET_OK) { - MS_LOG(ERROR) << "tuple grad error task_id[" << task_id << "] error_code[" << error_code << "]"; - return RET_ERROR; - } - return RET_OK; -} - -int TupleGetItemCPUKernel::Run() { - int error_code = ParallelLaunch(this->context_->thread_pool_, TupleRun, this, thread_count_); - if (error_code != RET_OK) { - MS_LOG(ERROR) << "tuple function error error_code[" << error_code << "]"; - return RET_ERROR; - } - return RET_OK; -} - -kernel::LiteKernel *CpuTupleGetItemFp32KernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, - OpParameter *opParameter, const lite::InnerContext *ctx, - const kernel::KernelKey &desc, const lite::PrimitiveC *primitive) { - MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_TupleGetItem); - auto *kernel = new (std::nothrow) TupleGetItemCPUKernel(opParameter, inputs, outputs, ctx, primitive); - if (kernel == nullptr) { - MS_LOG(ERROR) << "new TupleGetItemCPUKernel failed!"; - free(opParameter); - return nullptr; - } - - auto ret = kernel->Init(); - if (RET_OK != ret) { - MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); - delete kernel; - return nullptr; - } - return kernel; -} - -REG_KERNEL(kCPU, kNumberTypeFloat32, PrimitiveType_TupleGetItem, CpuTupleGetItemFp32KernelCreator) -} // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.h b/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.h deleted file mode 100644 index 9a7d470b57..0000000000 --- a/mindspore/lite/src/runtime/kernel/arm/fp32_grad/tuple_getitem.h +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_TUPLE_GETITEM_H_ -#define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_TUPLE_GETITEM_H_ - -#include <vector> -#include "src/lite_kernel.h" -#include "nnacl/fp32/arithmetic_fp32.h" - -namespace mindspore::kernel { -class TupleGetItemCPUKernel : public LiteKernel { - public: - explicit TupleGetItemCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { - param = parameter; - } - ~TupleGetItemCPUKernel() override = default; - - int Init() override; - int ReSize() override; - int Run() override; - int Execute(int task_id); - - private: - int thread_count_ = 1; - OpParameter *param; -}; -} // namespace mindspore::kernel - -#endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_ARM_FP32_GRAD_TUPLE_GETITEM_H_ diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/activation_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/activation_int8.cc index 33d772edb9..74ae7d3d67 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/activation_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/activation_int8.cc @@ -33,8 +33,7 @@ using mindspore::schema::PrimitiveType_Activation; namespace mindspore::kernel { kernel::LiteKernel *CpuActivationInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *parameter, - const lite::InnerContext *ctx, const KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const lite::InnerContext *ctx, const KernelKey &desc) { if (parameter == nullptr) { MS_LOG(ERROR) << "parameter is nullptr"; return nullptr; @@ -44,22 +43,22 @@ kernel::LiteKernel *CpuActivationInt8KernelCreator(const std::vector<lite::Tenso kernel::LiteKernel *kernel = nullptr; switch (static_cast<schema::ActivationType>(type)) { case schema::ActivationType_RELU: - kernel = new (std::nothrow) ReluInt8CPUKernel(parameter, inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) ReluInt8CPUKernel(parameter, inputs, outputs, ctx); break; case schema::ActivationType_RELU6: - kernel = new (std::nothrow) Relu6Int8CPUKernel(parameter, inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) Relu6Int8CPUKernel(parameter, inputs, outputs, ctx); break; case schema::ActivationType_HSWISH: - kernel = new (std::nothrow) HswishInt8CPUKernel(parameter, inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) HswishInt8CPUKernel(parameter, inputs, outputs, ctx); break; case schema::ActivationType_SIGMOID: - kernel = new (std::nothrow) SigmoidInt8CPUKernel(parameter, inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) SigmoidInt8CPUKernel(parameter, inputs, outputs, ctx); break; case schema::ActivationType_LEAKY_RELU: - kernel = new (std::nothrow) LeakyReluInt8CPUKernel(parameter, inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) LeakyReluInt8CPUKernel(parameter, inputs, outputs, ctx); break; case schema::ActivationType_TANH: - kernel = new (std::nothrow) TanhInt8CPUKernel(parameter, inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) TanhInt8CPUKernel(parameter, inputs, outputs, ctx); break; default: break; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc index 1edb796d43..d2fd257837 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.cc @@ -24,7 +24,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Add; +using mindspore::schema::PrimitiveType_AddFusion; namespace mindspore::kernel { int QuantizedAddCPUKernel::Init() { @@ -210,5 +210,5 @@ int QuantizedAddCPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Add, LiteKernelCreator<QuantizedAddCPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_AddFusion, LiteKernelCreator<QuantizedAddCPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.h index 414f88b725..79be464b73 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/add_int8.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class QuantizedAddCPUKernel : public LiteKernel { public: explicit QuantizedAddCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { arith_para_ = reinterpret_cast<ArithmeticParameter *>(parameter); } ~QuantizedAddCPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc index 8cb53b70d4..242ec1ab67 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.cc @@ -23,8 +23,8 @@ using mindspore::lite::RET_OK; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_FORMAT_ERR; using mindspore::lite::RET_PARAM_INVALID; -using mindspore::schema::PrimitiveType_ArgMax; -using mindspore::schema::PrimitiveType_ArgMin; +using mindspore::schema::PrimitiveType_ArgMaxFusion; +using mindspore::schema::PrimitiveType_ArgMinFusion; namespace mindspore::kernel { int ArgMinMaxInt8CPUKernel::Init() { @@ -96,6 +96,6 @@ int ArgMinMaxInt8CPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_ArgMax, LiteKernelCreator<ArgMinMaxInt8CPUKernel>) -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_ArgMin, LiteKernelCreator<ArgMinMaxInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_ArgMaxFusion, LiteKernelCreator<ArgMinMaxInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_ArgMinFusion, LiteKernelCreator<ArgMinMaxInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h index 642c457b14..5031240b60 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/argminmax_int8.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class ArgMinMaxInt8CPUKernel : public LiteKernel { public: ArgMinMaxInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~ArgMinMaxInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc index a86cc1c15d..e677eec54a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.cc @@ -29,14 +29,14 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::RET_PARAM_INVALID; -using mindspore::schema::PrimitiveType_Add; +using mindspore::schema::PrimitiveType_AddFusion; using mindspore::schema::PrimitiveType_Eltwise; using mindspore::schema::PrimitiveType_Equal; using mindspore::schema::PrimitiveType_Greater; using mindspore::schema::PrimitiveType_GreaterEqual; using mindspore::schema::PrimitiveType_Less; using mindspore::schema::PrimitiveType_LessEqual; -using mindspore::schema::PrimitiveType_Mul; +using mindspore::schema::PrimitiveType_MulFusion; using mindspore::schema::PrimitiveType_NotEqual; namespace mindspore::kernel { @@ -162,16 +162,15 @@ int ArithmeticInt8CPUKernel::Run() { kernel::LiteKernel *CpuArithmeticInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *parameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { kernel::LiteKernel *kernel = nullptr; - if (desc.type == PrimitiveType_Eltwise && static_cast<schema::PrimitiveType>(parameter->type_) == PrimitiveType_Add) { - kernel = new (std::nothrow) QuantizedAddCPUKernel(parameter, inputs, outputs, ctx, primitive); - } else if (desc.type == PrimitiveType_Eltwise && - static_cast<schema::PrimitiveType>(parameter->type_) == PrimitiveType_Mul) { - kernel = new (std::nothrow) MulInt8CPUKernel(parameter, inputs, outputs, ctx, primitive); + ArithmeticParameter *param = reinterpret_cast<ArithmeticParameter *>(parameter); + if (desc.type == PrimitiveType_Eltwise && param->eltwise_mode_ == static_cast<int>(schema::EltwiseMode_SUM)) { + kernel = new (std::nothrow) QuantizedAddCPUKernel(parameter, inputs, outputs, ctx); + } else if (desc.type == PrimitiveType_Eltwise && param->eltwise_mode_ == static_cast<int>(schema::EltwiseMode_PROD)) { + kernel = new (std::nothrow) MulInt8CPUKernel(parameter, inputs, outputs, ctx); } else { - kernel = new (std::nothrow) ArithmeticInt8CPUKernel(parameter, inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) ArithmeticInt8CPUKernel(parameter, inputs, outputs, ctx); } if (kernel == nullptr) { MS_LOG(ERROR) << "Create ArithmeticInt8CPUKernel failed, name: " << parameter->name_; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.h index ceb082b79e..1d515be789 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_int8.h @@ -29,9 +29,8 @@ class ArithmeticInt8CPUKernel : public LiteKernel { public: ArithmeticInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~ArithmeticInt8CPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h index 49e3f8274b..513a6656b6 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/arithmetic_self_int8.h @@ -44,9 +44,8 @@ class ArithmeticSelfInt8CPUKernel : public LiteKernel { public: explicit ArithmeticSelfInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) { switch (parameter->type_) { case PrimitiveType_Round: arithmeticSelf_run_ = Int8ElementRound; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h index 2e2b4ac7ab..adee42a2ce 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/batch_to_space_int8.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class BatchToSpaceInt8CPUKernel : public LiteKernel { public: BatchToSpaceInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~BatchToSpaceInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.h index 5d271b6957..97ba999a23 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/batchnorm_int8.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class BatchnormInt8CPUKernel : public LiteKernel { public: BatchnormInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { batchnorm_param_ = reinterpret_cast<BatchNormParameter *>(parameter); } ~BatchnormInt8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.h index b7858383c2..a657cb150b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/bias_add_int8.h @@ -23,9 +23,8 @@ namespace mindspore::kernel { class BiasAddInt8CPUKernel : public QuantizedAddCPUKernel { public: BiasAddInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : QuantizedAddCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : QuantizedAddCPUKernel(parameter, inputs, outputs, ctx) {} ~BiasAddInt8CPUKernel() override = default; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.h index 358028bce3..fc391373f3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/concat_int8.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class ConcatInt8CPUKernel : public LiteKernel { public: ConcatInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { concat_param_ = reinterpret_cast<ConcatParameter *>(op_parameter_); } ~ConcatInt8CPUKernel() override { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.h index 21e81fe446..7394217c5b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_1x1_int8.h @@ -32,9 +32,8 @@ namespace mindspore::kernel { class Convolution1x1Int8CPUKernel : public ConvolutionBaseCPUKernel { public: Convolution1x1Int8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~Convolution1x1Int8CPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc index 7df36b6431..36249faace 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.cc @@ -16,16 +16,11 @@ #include "src/runtime/kernel/arm/int8/convolution_3x3_int8.h" #include "nnacl/int8/conv3x3_int8.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Conv2D; namespace mindspore::kernel { int ProcessFilterUint8(int8_t *origin_weight, int16_t *dst_weight, ConvParameter *conv_param) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.h index da04ba54a7..3aeb1308ca 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_3x3_int8.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class Convolution3x3Int8CPUKernel : public ConvolutionBaseCPUKernel { public: Convolution3x3Int8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~Convolution3x3Int8CPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.cc index 549ce9e0e5..0ef93018e9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.cc @@ -15,17 +15,12 @@ */ #include "src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "nnacl/int8/conv_depthwise_int8.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DepthwiseConv2D; namespace mindspore::kernel { ConvolutionDepthwise3x3Int8CPUKernel::~ConvolutionDepthwise3x3Int8CPUKernel() { @@ -46,7 +41,7 @@ int ConvolutionDepthwise3x3Int8CPUKernel::InitWeightBias() { auto origin_weight = reinterpret_cast<int8_t *>(weight_tensor->MutableData()); int channel = weight_tensor->Batch(); if (channel % 8 != 0) { - MS_LOG(ERROR) << "ConvolutionDepthwise3x3Int8CPUKernel dosen't support channel " << channel; + MS_LOG(ERROR) << "ConvolutionDepthwise3x3Int8CPUKernel doesn't support channel " << channel; return RET_ERROR; } int pack_weight_size = channel * weight_tensor->Height() * weight_tensor->Width(); diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.h index 627a85bd25..433d6e7cfd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class ConvolutionDepthwise3x3Int8CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwise3x3Int8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionDepthwise3x3Int8CPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc index dcbe7fbeba..06afeb6fec 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.cc @@ -15,19 +15,12 @@ */ #include "src/runtime/kernel/arm/int8/convolution_depthwise_int8.h" -#include "src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.h" -#include "src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "nnacl/int8/conv_depthwise_int8.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DepthwiseConv2D; namespace mindspore::kernel { ConvolutionDepthwiseInt8CPUKernel::~ConvolutionDepthwiseInt8CPUKernel() { @@ -163,54 +156,4 @@ int ConvolutionDepthwiseInt8CPUKernel::Run() { row_buffer_ = nullptr; return ret; } - -kernel::LiteKernel *CpuConvDwInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_DepthwiseConv2D); - kernel::LiteKernel *kernel = nullptr; - auto act_quant_size = - MSMAX(inputs.at(kInputIndex)->quant_params().size(), outputs.at(kOutputIndex)->quant_params().size()); - if (act_quant_size == 1) { // per tensor - auto conv_param = reinterpret_cast<ConvParameter *>(opParameter); - if (primitive != nullptr && primitive->infer_flag()) { - conv_param->input_h_ = inputs[kInputIndex]->Height(); - conv_param->input_w_ = inputs[kInputIndex]->Width(); - conv_param->input_channel_ = inputs[kInputIndex]->Channel(); - conv_param->output_h_ = outputs[kOutputIndex]->Height(); - conv_param->output_w_ = outputs[kOutputIndex]->Width(); - } - if (CheckConvDwUse3X3(conv_param) && conv_param->input_channel_ % C8NUM == 0) { -#ifdef ENABLE_ARM64 - kernel = - new (std::nothrow) kernel::ConvolutionDepthwise3x3Int8CPUKernel(opParameter, inputs, outputs, ctx, primitive); -#endif - } - if (kernel == nullptr) { - kernel = - new (std::nothrow) kernel::ConvolutionDepthwiseInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive); - } - } else { // per channel - kernel = - new (std::nothrow) kernel::ConvolutionDepthwiseSWInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive); - } - - if (kernel == nullptr) { - MS_LOG(ERROR) << "kernel is nullptr."; - free(opParameter); - return nullptr; - } - auto ret = kernel->Init(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); - delete kernel; - return nullptr; - } - return kernel; -} - -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_DepthwiseConv2D, CpuConvDwInt8KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.h index f7f668e5e1..fc489783d2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_int8.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class ConvolutionDepthwiseInt8CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionDepthwiseInt8CPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.cc index 4c06585bac..3429a8d844 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.cc @@ -15,17 +15,12 @@ */ #include "src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "nnacl/int8/conv_depthwise_int8.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DepthwiseConv2D; namespace mindspore::kernel { ConvolutionDepthwiseSWInt8CPUKernel::~ConvolutionDepthwiseSWInt8CPUKernel() { @@ -153,15 +148,11 @@ int ConvolutionDepthwiseSWInt8CPUKernel::ReinitQuantParam() { auto input_tensor = in_tensors_.at(kInputIndex); auto channel = conv_param_->input_channel_; input_scale_ = reinterpret_cast<float *>(malloc(channel * sizeof(float))); - if (input_scale_ == nullptr) { - MS_LOG(ERROR) << "malloc input_sacle_ failed."; - return RET_ERROR; - } + MSLITE_CHECK_PTR(input_scale_); + input_zp_ = reinterpret_cast<int8_t *>(malloc(channel * sizeof(int8_t))); - if (input_zp_ == nullptr) { - MS_LOG(ERROR) << "malloc input_zp_ failed."; - return RET_ERROR; - } + MSLITE_CHECK_PTR(input_zp_); + if (input_tensor->quant_params().size() == kPerTensor) { for (int i = 0; i < channel; i++) { auto input_quant_arg = input_tensor->quant_params().front(); @@ -178,15 +169,11 @@ int ConvolutionDepthwiseSWInt8CPUKernel::ReinitQuantParam() { auto output_tensor = out_tensors_.at(kOutputIndex); output_scale_ = reinterpret_cast<float *>(malloc(channel * sizeof(float))); - if (output_scale_ == nullptr) { - MS_LOG(ERROR) << "malloc output_scale_ failed."; - return RET_ERROR; - } + MSLITE_CHECK_PTR(output_scale_); + output_zp_ = reinterpret_cast<int32_t *>(malloc(channel * sizeof(int32_t))); - if (output_zp_ == nullptr) { - MS_LOG(ERROR) << "malloc output_zp_ failed."; - return RET_ERROR; - } + MSLITE_CHECK_PTR(output_zp_); + if (output_tensor->quant_params().size() == kPerTensor) { for (int i = 0; i < channel; i++) { auto output_quant_arg = output_tensor->quant_params().front(); @@ -202,41 +189,26 @@ int ConvolutionDepthwiseSWInt8CPUKernel::ReinitQuantParam() { } conv_quant_arg_->real_multiplier_ = reinterpret_cast<double *>(malloc(channel * sizeof(double))); - if (conv_quant_arg_->real_multiplier_ == nullptr) { - MS_LOG(ERROR) << "malloc conv_quant_arg_->real_multiplier_ failed."; - return RET_ERROR; - } + MSLITE_CHECK_PTR(conv_quant_arg_->real_multiplier_); + conv_quant_arg_->left_shift_ = reinterpret_cast<int32_t *>(malloc(channel * sizeof(int32_t))); - if (conv_quant_arg_->left_shift_ == nullptr) { - MS_LOG(ERROR) << "malloc conv_quant_arg_->left_shift_ failed."; - return RET_ERROR; - } + MSLITE_CHECK_PTR(conv_quant_arg_->left_shift_); + conv_quant_arg_->right_shift_ = reinterpret_cast<int32_t *>(malloc(channel * sizeof(int32_t))); - if (conv_quant_arg_->right_shift_ == nullptr) { - MS_LOG(ERROR) << "malloc conv_quant_arg_->right_shift_ failed."; - return RET_ERROR; - } + MSLITE_CHECK_PTR(conv_quant_arg_->right_shift_); + conv_quant_arg_->quant_multiplier_ = reinterpret_cast<int32_t *>(malloc(channel * sizeof(int32_t))); - if (conv_quant_arg_->quant_multiplier_ == nullptr) { - MS_LOG(ERROR) << "malloc conv_quant_arg_->quant_multiplier_ failed."; - return RET_ERROR; - } + MSLITE_CHECK_PTR(conv_quant_arg_->quant_multiplier_); + conv_quant_arg_->out_act_min_ = reinterpret_cast<int32_t *>(malloc(channel * sizeof(int32_t))); - if (conv_quant_arg_->out_act_min_ == nullptr) { - MS_LOG(ERROR) << "malloc conv_quant_arg_->out_act_min_ failed."; - return RET_ERROR; - } + MSLITE_CHECK_PTR(conv_quant_arg_->out_act_min_); + conv_quant_arg_->out_act_max_ = reinterpret_cast<int32_t *>(malloc(channel * sizeof(int32_t))); - if (conv_quant_arg_->out_act_max_ == nullptr) { - MS_LOG(ERROR) << "malloc conv_quant_arg_->out_act_max_ failed."; - return RET_ERROR; - } + MSLITE_CHECK_PTR(conv_quant_arg_->out_act_max_); weight_scale_ = reinterpret_cast<float *>(malloc(channel * sizeof(float))); - if (weight_scale_ == nullptr) { - MS_LOG(ERROR) << "malloc weight_scale_ failed."; - return RET_ERROR; - } + MSLITE_CHECK_PTR(weight_scale_); + auto weight_tensor = in_tensors_.at(kWeightIndex); if (weight_tensor->quant_params().size() == kPerTensor) { for (int i = 0; i < channel; i++) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h index d97dfe8c29..2bc76b277a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h @@ -19,6 +19,7 @@ #include <vector> #include "src/lite_kernel.h" +#include "src/common/log_util.h" #include "src/runtime/kernel/arm/base/convolution_base.h" #include "nnacl/fp32/conv_depthwise_fp32.h" @@ -26,9 +27,8 @@ namespace mindspore::kernel { class ConvolutionDepthwiseSWInt8CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionDepthwiseSWInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionDepthwiseSWInt8CPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc index 616778ff39..636bf5131c 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.cc @@ -23,6 +23,9 @@ #include "src/runtime/kernel/arm/int8/convolution_1x1_int8.h" #include "src/runtime/kernel/arm/int8/convolution_3x3_int8.h" #include "src/runtime/kernel/arm/int8/group_convolution_int8.h" +#include "src/runtime/kernel/arm/int8/convolution_depthwise_int8.h" +#include "src/runtime/kernel/arm/int8/convolution_depthwise_3x3_int8.h" +#include "src/runtime/kernel/arm/int8/convolution_depthwise_slidewindow_int8.h" #include "src/runtime/runtime_api.h" #ifdef ENABLE_ARM64 #include "src/runtime/kernel/arm/int8/opt_op_handler.h" @@ -32,7 +35,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Conv2D; +using mindspore::schema::PrimitiveType_Conv2DFusion; using mindspore::schema::Format::Format_NHWC; namespace mindspore::kernel { @@ -284,24 +287,24 @@ lite::Tensor *CreateBiasTensorInt8(TypeId data_type, std::vector<int> bias_shape kernel::LiteKernel *CpuConvInt8KernelSelect(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, - const InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive) { + const InnerContext *ctx) { auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); kernel::LiteKernel *kernel = nullptr; if (conv_param->kernel_h_ == 3 && conv_param->kernel_w_ == 3 && conv_param->stride_h_ == 1 && conv_param->stride_w_ == 1 && conv_param->dilation_h_ == 1 && conv_param->dilation_w_ == 1) { #ifdef ENABLE_ARM64 if (mindspore::lite::IsSupportSDot()) { - kernel = new (std::nothrow) kernel::ConvolutionInt8CPUKernel(op_parameter, inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) kernel::ConvolutionInt8CPUKernel(op_parameter, inputs, outputs, ctx); } else { - kernel = new (std::nothrow) kernel::Convolution3x3Int8CPUKernel(op_parameter, inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) kernel::Convolution3x3Int8CPUKernel(op_parameter, inputs, outputs, ctx); } #else - kernel = new (std::nothrow) kernel::Convolution3x3Int8CPUKernel(op_parameter, inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) kernel::Convolution3x3Int8CPUKernel(op_parameter, inputs, outputs, ctx); #endif } else if (conv_param->kernel_h_ == 1 && conv_param->kernel_w_ == 1) { - kernel = new (std::nothrow) kernel::Convolution1x1Int8CPUKernel(op_parameter, inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) kernel::Convolution1x1Int8CPUKernel(op_parameter, inputs, outputs, ctx); } else { - kernel = new (std::nothrow) kernel::ConvolutionInt8CPUKernel(op_parameter, inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) kernel::ConvolutionInt8CPUKernel(op_parameter, inputs, outputs, ctx); } return kernel; } @@ -314,8 +317,7 @@ void CopyTensorQuantParam(lite::Tensor *dst, lite::Tensor *src) { kernel::LiteKernel *CpuGroupConvInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, - const InnerContext *ctx, const mindspore::lite::PrimitiveC *primitive, - int group) { + const InnerContext *ctx, int group) { auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); std::vector<int> in_shape; std::vector<int> out_shape; @@ -330,7 +332,7 @@ kernel::LiteKernel *CpuGroupConvInt8KernelCreator(const std::vector<lite::Tensor int batch = inputs.front()->Batch(); conv_param->input_batch_ = batch; conv_param->output_batch_ = batch; - bool infered_flag = primitive != nullptr && primitive->infer_flag(); + bool infered_flag = op_parameter != nullptr && op_parameter->infer_flag_; if (infered_flag) { int in_h = inputs.front()->Height(); int in_w = inputs.front()->Width(); @@ -406,50 +408,65 @@ kernel::LiteKernel *CpuGroupConvInt8KernelCreator(const std::vector<lite::Tensor CopyTensorQuantParam(out_tensor, outputs[j]); new_outputs.emplace_back(out_tensor); } - group_convs.emplace_back(CpuConvInt8KernelSelect( - new_inputs, new_outputs, reinterpret_cast<OpParameter *>(new_conv_parameter), ctx, primitive)); + group_convs.emplace_back( + CpuConvInt8KernelSelect(new_inputs, new_outputs, reinterpret_cast<OpParameter *>(new_conv_parameter), ctx)); } - return new (std::nothrow) - GroupConvolutionInt8CPUKernel(op_parameter, inputs, outputs, ctx, primitive, group_convs, group); + return new (std::nothrow) GroupConvolutionInt8CPUKernel(op_parameter, inputs, outputs, ctx, group_convs, group); } -kernel::LiteKernel *CpuConvInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_Conv2D); - auto conv_param = reinterpret_cast<ConvParameter *>(opParameter); +kernel::LiteKernel *CpuConvDwInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, + const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, + const InnerContext *ctx, const kernel::KernelKey &desc) { + auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); kernel::LiteKernel *kernel = nullptr; - if (primitive != nullptr && primitive->infer_flag()) { - conv_param->input_h_ = inputs.front()->Height(); - conv_param->input_w_ = inputs.front()->Width(); - conv_param->input_channel_ = inputs.front()->Channel(); - conv_param->output_h_ = outputs.front()->Height(); - conv_param->output_w_ = outputs.front()->Width(); - conv_param->output_channel_ = outputs.front()->Channel(); - conv_param->op_parameter_.thread_num_ = ctx->thread_num_; + + auto act_quant_size = + MSMAX(inputs.at(kInputIndex)->quant_params().size(), outputs.at(kOutputIndex)->quant_params().size()); + if (act_quant_size == 1) { // per tensor + if (CheckConvDwUse3X3(conv_param) && conv_param->input_channel_ % C8NUM == 0) { +#ifdef ENABLE_ARM64 + kernel = new (std::nothrow) kernel::ConvolutionDepthwise3x3Int8CPUKernel(op_parameter, inputs, outputs, ctx); +#endif + } + if (kernel == nullptr) { + kernel = new (std::nothrow) kernel::ConvolutionDepthwiseInt8CPUKernel(op_parameter, inputs, outputs, ctx); + } + } else { // per channel + kernel = new (std::nothrow) kernel::ConvolutionDepthwiseSWInt8CPUKernel(op_parameter, inputs, outputs, ctx); } + return kernel; +} + +kernel::LiteKernel *CpuConvInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, + const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, + const InnerContext *ctx, const kernel::KernelKey &desc) { + MS_ASSERT(op_parameter != nullptr); + MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DFusion); + auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); + kernel::LiteKernel *kernel = nullptr; + if (conv_param->group_ == 1) { - kernel = CpuConvInt8KernelSelect(inputs, outputs, opParameter, ctx, primitive); + kernel = CpuConvInt8KernelSelect(inputs, outputs, op_parameter, ctx); + } else if (conv_param->group_ == conv_param->input_channel_ && conv_param->group_ == conv_param->output_channel_) { + kernel = CpuConvDwInt8KernelCreator(inputs, outputs, op_parameter, ctx, desc); } else { MS_ASSERT(conv_param->group_ > 1); - kernel = CpuGroupConvInt8KernelCreator(inputs, outputs, opParameter, ctx, primitive, conv_param->group_); + kernel = CpuGroupConvInt8KernelCreator(inputs, outputs, op_parameter, ctx, conv_param->group_); } if (kernel == nullptr) { MS_LOG(ERROR) << "kernel is nullptr."; - free(opParameter); + free(op_parameter); return nullptr; } auto ret = kernel->Init(); if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); + MS_LOG(ERROR) << "Init kernel failed, name: " << op_parameter->name_ << ", type: " + << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_)); delete kernel; return nullptr; } return kernel; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Conv2D, CpuConvInt8KernelCreator) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Conv2DFusion, CpuConvInt8KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.h index 0f8a4e2cf7..363931cc2e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/convolution_int8.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class ConvolutionInt8CPUKernel : public ConvolutionBaseCPUKernel { public: ConvolutionInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionInt8CPUKernel() override { FreeQuantParam(); if (packed_weight_ != nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h index eb5e869c97..bee5aa5ac3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/crop_int8.h @@ -30,9 +30,8 @@ namespace mindspore::kernel { class CropInt8CPUKernel : public CropBaseCPUKernel { public: CropInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : CropBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx) + : CropBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~CropInt8CPUKernel(); int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc index e9b614ed9e..3d1e75d70b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.cc @@ -15,17 +15,12 @@ */ #include "src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.h" -#include "schema/model_generated.h" -#include "src/kernel_registry.h" #include "include/errorcode.h" #include "nnacl/int8/conv_depthwise_int8.h" #include "src/runtime/runtime_api.h" -using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DeDepthwiseConv2D; namespace mindspore::kernel { DeconvolutionDepthwiseInt8CPUKernel::~DeconvolutionDepthwiseInt8CPUKernel() { @@ -211,29 +206,4 @@ int DeconvolutionDepthwiseInt8CPUKernel::Run() { output_buffer_ = nullptr; return ret; } - -kernel::LiteKernel *CpuDeconvDwInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_DeDepthwiseConv2D); - auto kernel = - new (std::nothrow) kernel::DeconvolutionDepthwiseInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive); - if (kernel == nullptr) { - MS_LOG(ERROR) << "kernel is nullptr."; - free(opParameter); - return nullptr; - } - auto ret = kernel->Init(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); - delete kernel; - return nullptr; - } - return kernel; -} - -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_DeDepthwiseConv2D, CpuDeconvDwInt8KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.h index 230dcf8796..893dd45ab9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class DeconvolutionDepthwiseInt8CPUKernel : public ConvolutionBaseCPUKernel { public: DeconvolutionDepthwiseInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~DeconvolutionDepthwiseInt8CPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc index 823624a0d9..d6b6acde66 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.cc @@ -15,6 +15,7 @@ */ #include "src/runtime/kernel/arm/int8/deconvolution_int8.h" +#include "src/runtime/kernel/arm/int8/deconvolution_depthwise_int8.h" #include "src/runtime/runtime_api.h" #include "src/common/utils.h" #include "src/runtime/kernel/arm/int8/opt_op_handler.h" @@ -24,7 +25,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_DeConv2D; +using mindspore::schema::PrimitiveType_Conv2dTransposeFusion; namespace mindspore::kernel { DeConvInt8CPUKernel::~DeConvInt8CPUKernel() { @@ -278,26 +279,37 @@ int DeConvInt8CPUKernel::Run() { } kernel::LiteKernel *CpuDeConvInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - MS_ASSERT(opParameter != nullptr); - MS_ASSERT(desc.type == schema::PrimitiveType_DeConv2D); - auto kernel = new (std::nothrow) kernel::DeConvInt8CPUKernel(opParameter, inputs, outputs, ctx, primitive); + const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { + MS_ASSERT(op_parameter != nullptr); + MS_ASSERT(desc.type == schema::PrimitiveType_Conv2dTransposeFusion); + + auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); + kernel::LiteKernel *kernel = nullptr; + + if (conv_param->group_ == 1) { + kernel = new (std::nothrow) kernel::DeConvInt8CPUKernel(op_parameter, inputs, outputs, ctx); + } else if (conv_param->group_ == conv_param->input_channel_ && conv_param->group_ == conv_param->output_channel_) { + kernel = new (std::nothrow) kernel::DeconvolutionDepthwiseInt8CPUKernel(op_parameter, inputs, outputs, ctx); + } else { + MS_LOG(ERROR) << "deconv do not support group deconv!"; + kernel = nullptr; + } + if (kernel == nullptr) { MS_LOG(ERROR) << "kernel is nullptr."; - free(opParameter); + free(op_parameter); return nullptr; } auto ret = kernel->Init(); if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << opParameter->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(opParameter->type_)); + MS_LOG(ERROR) << "Init kernel failed, name: " << op_parameter->name_ << ", type: " + << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_)); delete kernel; return nullptr; } return kernel; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_DeConv2D, CpuDeConvInt8KernelCreator) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Conv2dTransposeFusion, CpuDeConvInt8KernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.h index 558cfd7de8..c09f1226ee 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/deconvolution_int8.h @@ -32,9 +32,8 @@ namespace mindspore::kernel { class DeConvInt8CPUKernel : public ConvolutionBaseCPUKernel { public: DeConvInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : ConvolutionBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~DeConvInt8CPUKernel() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h index 0dbcdfc03c..06512bbf82 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/depth_to_space_int8.h @@ -27,9 +27,8 @@ namespace mindspore::kernel { class DepthToSpaceInt8CPUKernel : public DepthToSpaceBaseCPUKernel { public: DepthToSpaceInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : DepthToSpaceBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : DepthToSpaceBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~DepthToSpaceInt8CPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/detection_post_process_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/detection_post_process_int8.h index 6f1473767b..c3f8de94bb 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/detection_post_process_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/detection_post_process_int8.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class DetectionPostProcessInt8CPUKernel : public DetectionPostProcessBaseCPUKernel { public: DetectionPostProcessInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : DetectionPostProcessBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : DetectionPostProcessBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~DetectionPostProcessInt8CPUKernel() = default; int8_t *data_int8_ = nullptr; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc index 53ef549398..a1df673f23 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.cc @@ -25,7 +25,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Div; +using mindspore::schema::PrimitiveType_DivFusion; namespace mindspore::kernel { @@ -131,5 +131,5 @@ int DivInt8CPUKernel::Run() { return ret; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Div, LiteKernelCreator<DivInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_DivFusion, LiteKernelCreator<DivInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.h index 5f265e342e..d352fed35e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/div_int8.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class DivInt8CPUKernel : public LiteKernel { public: explicit DivInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~DivInt8CPUKernel() override {} int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.h index 5a71f2afeb..0f1f63fd3d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/fullconnection_int8.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class FullconnectionInt8CPUKernel : public MatmulBaseInt8CPUKernel { public: FullconnectionInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : MatmulBaseInt8CPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const mindspore::lite::InnerContext *ctx) + : MatmulBaseInt8CPUKernel(parameter, inputs, outputs, ctx) {} ~FullconnectionInt8CPUKernel() override = default; int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.h index 92294824b3..d54c16fa9f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/gatherNd_int8.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class GatherNdInt8CPUKernel : public LiteKernel { public: GatherNdInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) {} ~GatherNdInt8CPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc index 11ac11a5aa..0f2118e4db 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.cc @@ -33,7 +33,6 @@ namespace mindspore::kernel { int GatherInt8CPUKernel::Init() { axis_ = (reinterpret_cast<GatherParameter *>(op_parameter_))->axis_; - batchDims_ = (reinterpret_cast<GatherParameter *>(op_parameter_))->batchDims_; auto in_quant_args = in_tensors_.at(0)->quant_params(); auto out_quant_args = out_tensors_.at(0)->quant_params(); param_.alpha_ = in_quant_args.front().scale / out_quant_args.front().scale; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.h index 04a546a487..57f5096942 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/gather_int8.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class GatherInt8CPUKernel : public LiteKernel { public: GatherInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) {} ~GatherInt8CPUKernel() {} int Init() override; @@ -38,7 +37,6 @@ class GatherInt8CPUKernel : public LiteKernel { private: int thread_count_; - int batchDims_; int axis_; GatherQuantArg param_; }; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.cc index 697e95a79d..080166ea4e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.cc @@ -23,7 +23,6 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Conv2D; namespace mindspore::kernel { void GroupConvolutionInt8CPUKernel::SeparateInput(int group_id) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.h index 1330d71794..0bbebb4935 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/group_convolution_int8.h @@ -28,9 +28,8 @@ class GroupConvolutionInt8CPUKernel : public GroupConvolutionCPUKernel { public: GroupConvolutionInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive, std::vector<kernel::LiteKernel *> group_convs, const int group_num) - : GroupConvolutionCPUKernel(parameter, inputs, outputs, ctx, primitive, group_convs, group_num) { + : GroupConvolutionCPUKernel(parameter, inputs, outputs, ctx, group_convs, group_num) { } // opParameter(in channel, out channel) in this kernel has been split to groups, if // you want to get real params, multiply in channel / out channel with group num diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h index 3ff202cf7a..655240a8e2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/hswish_int8.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class HswishInt8CPUKernel : public LiteKernel { public: HswishInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) {} ~HswishInt8CPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/l2_norm_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/l2_norm_int8.cc index 47c7539c13..2a67e0d4ae 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/l2_norm_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/l2_norm_int8.cc @@ -21,7 +21,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_L2Norm; +using mindspore::schema::PrimitiveType_L2NormalizeFusion; namespace mindspore::kernel { int L2NormInt8CPUKernel::Init() { @@ -70,5 +70,5 @@ int L2NormInt8CPUKernel::DoExecute(int task_id) { return L2NormalizationInt8(input_data, output_data, l2_norm_param_, &quant_param_, begin, end); } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_L2Norm, LiteKernelCreator<L2NormInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_L2NormalizeFusion, LiteKernelCreator<L2NormInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/l2_norm_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/l2_norm_int8.h index 1a455263d3..28df43c1f7 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/l2_norm_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/l2_norm_int8.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class L2NormInt8CPUKernel : public L2NormCPUKernel { public: explicit L2NormInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : L2NormCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : L2NormCPUKernel(parameter, inputs, outputs, ctx) {} ~L2NormInt8CPUKernel() {} int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/layer_norm_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/layer_norm_int8.cc index cebe3b4ca6..dafa9c56cd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/layer_norm_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/layer_norm_int8.cc @@ -20,7 +20,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_LayerNorm; +using mindspore::schema::PrimitiveType_LayerNormFusion; namespace mindspore::kernel { LayerNormInt8CPUKernel::~LayerNormInt8CPUKernel() { @@ -131,5 +131,5 @@ int LayerNormInt8CPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_LayerNorm, LiteKernelCreator<LayerNormInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_LayerNormFusion, LiteKernelCreator<LayerNormInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/layer_norm_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/layer_norm_int8.h index e5c80bd6c3..e12d0ab06a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/layer_norm_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/layer_norm_int8.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class LayerNormInt8CPUKernel : public LiteKernel { public: LayerNormInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<LayerNormParameter *>(parameter); } ~LayerNormInt8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc index d6e2786ced..2aa35b7ff4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.cc @@ -23,7 +23,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_LeakyReLU; +using mindspore::schema::PrimitiveType_LeakyRelu; namespace mindspore::kernel { namespace { @@ -130,5 +130,5 @@ int LeakyReluInt8CPUKernel::DoExecute(int task_id) { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_LeakyReLU, LiteKernelCreator<LeakyReluInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_LeakyRelu, LiteKernelCreator<LeakyReluInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.h index f96934b287..3a8a4de4d2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/leaky_relu_int8.h @@ -30,9 +30,8 @@ namespace mindspore::kernel { class LeakyReluInt8CPUKernel : public LiteKernel { public: LeakyReluInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~LeakyReluInt8CPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_base_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_base_int8.h index d125f2ea9b..ab96da216b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_base_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_base_int8.h @@ -31,9 +31,8 @@ namespace mindspore::kernel { class MatmulBaseInt8CPUKernel : public LiteKernel { public: MatmulBaseInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<MatMulParameter *>(op_parameter_); } ~MatmulBaseInt8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h index 71f6d205b2..2f4d5ac81d 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/matmul_int8.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class MatmulInt8CPUKernel : public MatmulBaseInt8CPUKernel { public: MatmulInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : MatmulBaseInt8CPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : MatmulBaseInt8CPUKernel(parameter, inputs, outputs, ctx) {} ~MatmulInt8CPUKernel() override = default; int Init() override; int ReSize() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc index 789a9c50ba..3846d174f8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.cc @@ -22,7 +22,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Mul; +using mindspore::schema::PrimitiveType_MulFusion; namespace mindspore::kernel { int MulInt8CPUKernel::Init() { @@ -217,5 +217,5 @@ int MulInt8CPUKernel::DoExecute(int task_id) { return lite::RET_OK; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Mul, LiteKernelCreator<MulInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_MulFusion, LiteKernelCreator<MulInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.h index 08a3a3d23a..f10f53ae33 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/mul_int8.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class MulInt8CPUKernel : public LiteKernel { public: explicit MulInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx_->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), ctx_(ctx), thread_count_(ctx_->thread_num_) { tile_para = reinterpret_cast<ArithmeticParameter *>(parameter); } ~MulInt8CPUKernel() override{}; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc index d4d0fe1d80..03788425b0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.cc @@ -24,7 +24,7 @@ using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::lite::KernelRegistrar; -using mindspore::schema::PrimitiveType_Pad; +using mindspore::schema::PrimitiveType_PadFusion; namespace mindspore::kernel { namespace { @@ -286,5 +286,5 @@ int PadInt8CPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Pad, LiteKernelCreator<PadInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_PadFusion, LiteKernelCreator<PadInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h index ecd1f61fed..42e9e5c1e2 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pad_int8.h @@ -30,9 +30,8 @@ namespace mindspore::kernel { class PadInt8CPUKernel : public LiteKernel { public: explicit PadInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { op_parameter_->thread_num_ = ctx->thread_num_; pad_param_ = reinterpret_cast<PadParameter *>(op_parameter_); } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc index eb74e99803..e861a1913b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.cc @@ -26,7 +26,8 @@ using mindspore::lite::RET_OK; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_MEMORY_FAILED; -using mindspore::schema::PrimitiveType_Pooling; +using mindspore::schema::PrimitiveType_AvgPoolFusion; +using mindspore::schema::PrimitiveType_MaxPoolFusion; namespace mindspore::kernel { int PoolingInt8CPUKernel::Init() { @@ -103,5 +104,6 @@ int PoolingInt8CPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Pooling, LiteKernelCreator<PoolingInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_AvgPoolFusion, LiteKernelCreator<PoolingInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_MaxPoolFusion, LiteKernelCreator<PoolingInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.h index c9af9ac83f..9052a52116 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/pooling_int8.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class PoolingInt8CPUKernel : public PoolingBaseCPUKernel { public: PoolingInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : PoolingBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : PoolingBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~PoolingInt8CPUKernel() { FreeQuantParam(); } int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.cc index 0cd31b65cc..a3f75cdb1a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,7 +25,7 @@ using mindspore::kernel::KERNEL_ARCH::kCPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Power; +using mindspore::schema::PrimitiveType_PowFusion; namespace mindspore::kernel { int PowerInt8CPUKernel::Init() { @@ -106,5 +106,5 @@ int PowerInt8CPUKernel::Run() { return ret; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Power, LiteKernelCreator<PowerInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_PowFusion, LiteKernelCreator<PowerInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.h index 1928542cd3..9bb2e7ce2a 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/power_int8.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class PowerInt8CPUKernel : public LiteKernel { public: PowerInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { param_ = reinterpret_cast<PowerParameter *>(op_parameter_); } ~PowerInt8CPUKernel() {} diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc index 479f2e6285..86ca7c7c66 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.cc @@ -26,7 +26,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Reduce; +using mindspore::schema::PrimitiveType_ReduceFusion; using mindspore::schema::ReduceMode_ReduceMax; using mindspore::schema::ReduceMode_ReduceMean; using mindspore::schema::ReduceMode_ReduceMin; @@ -35,7 +35,6 @@ using mindspore::schema::ReduceMode_ReduceSum; using mindspore::schema::ReduceMode_ReduceSumSquare; using mindspore::kernel::KERNEL_ARCH::kCPU; -using mindspore::schema::PrimitiveType_Reduce; namespace mindspore::kernel { void ReduceInt8CPUKernel::OneAxis() { @@ -285,30 +284,36 @@ int ReduceInt8CPUKernel::CalculateQuantArgs() { // quant_out = sum((quant_in - zp)^2) * scale_in^2 / scale_out + zp_out // scale_in * scale_in/scale_out if (mode_ == static_cast<int>(schema::ReduceMode_ReduceSumSquare)) { - for (auto i = 0; i < num_axes_ - 1; i++) { - QuantMulArg *qm = new (std::nothrow) QuantMulArg; - if (qm == nullptr) { - MS_LOG(ERROR) << "ReduceProd new QuantMultiplier failed."; - return RET_NULL_PTR; - } - double sumsquare_multiplier = quant_arg_.in_scale_; - QuantizeMultiplierSmallerThanOne(sumsquare_multiplier, &qm->multiplier_, &shift); - qm->left_shift_ = shift < 0 ? -shift : 0; - qm->right_shift_ = shift > 0 ? shift : 0; - sum_square_multipliers_.push_back(qm); - } + return CalculateQuantArgsReduceSumSquare(); + } + return RET_OK; +} +int ReduceInt8CPUKernel::CalculateQuantArgsReduceSumSquare() { + int shift; + for (auto i = 0; i < num_axes_ - 1; i++) { QuantMulArg *qm = new (std::nothrow) QuantMulArg; if (qm == nullptr) { MS_LOG(ERROR) << "ReduceProd new QuantMultiplier failed."; return RET_NULL_PTR; } - double sumsquare_multiplier = quant_arg_.in_scale_ * quant_arg_.in_scale_ / quant_arg_.out_scale_; + double sumsquare_multiplier = quant_arg_.in_scale_; QuantizeMultiplierSmallerThanOne(sumsquare_multiplier, &qm->multiplier_, &shift); qm->left_shift_ = shift < 0 ? -shift : 0; qm->right_shift_ = shift > 0 ? shift : 0; sum_square_multipliers_.push_back(qm); } + + QuantMulArg *qm = new (std::nothrow) QuantMulArg; + if (qm == nullptr) { + MS_LOG(ERROR) << "ReduceProd new QuantMultiplier failed."; + return RET_NULL_PTR; + } + double sumsquare_multiplier = quant_arg_.in_scale_ * quant_arg_.in_scale_ / quant_arg_.out_scale_; + QuantizeMultiplierSmallerThanOne(sumsquare_multiplier, &qm->multiplier_, &shift); + qm->left_shift_ = shift < 0 ? -shift : 0; + qm->right_shift_ = shift > 0 ? shift : 0; + sum_square_multipliers_.push_back(qm); return RET_OK; } @@ -532,5 +537,5 @@ int ReduceInt8CPUKernel::CallReduceUnit(int task_id) { return ret; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Reduce, LiteKernelCreator<ReduceInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_ReduceFusion, LiteKernelCreator<ReduceInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h index 1745e64bb2..bdeaf6da18 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/reduce_int8.h @@ -37,9 +37,8 @@ class ReduceInt8CPUKernel : public ReduceBaseCPUKernel { public: ReduceInt8CPUKernel(OpParameter *param, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ReduceBaseCPUKernel(param, inputs, outputs, ctx, primitive), ctx_(ctx) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ReduceBaseCPUKernel(param, inputs, outputs, ctx), ctx_(ctx) {} ~ReduceInt8CPUKernel() { for (auto qm : mean_multipliers_) { delete qm; @@ -77,6 +76,7 @@ class ReduceInt8CPUKernel : public ReduceBaseCPUKernel { void ThreeAxes(); void ReduceMean4DCalQuantParam(); int CalculateQuantArgs(); + int CalculateQuantArgsReduceSumSquare(); void GetQuantArgs(size_t i); private: diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.h index 69a191a1d6..c8ceedbbe0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/relux_int8.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class ReluXInt8CPUKernel : public LiteKernel { public: ReluXInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { type_ = (reinterpret_cast<ActivationParameter *>(parameter))->type_; } ~ReluXInt8CPUKernel() override = default; @@ -47,9 +46,8 @@ class ReluXInt8CPUKernel : public LiteKernel { class ReluInt8CPUKernel : public ReluXInt8CPUKernel { public: ReluInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ReluXInt8CPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ReluXInt8CPUKernel(parameter, inputs, outputs, ctx) {} ~ReluInt8CPUKernel() override = default; @@ -64,9 +62,8 @@ class ReluInt8CPUKernel : public ReluXInt8CPUKernel { class Relu6Int8CPUKernel : public ReluXInt8CPUKernel { public: Relu6Int8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ReluXInt8CPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ReluXInt8CPUKernel(parameter, inputs, outputs, ctx) {} ~Relu6Int8CPUKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.h index 07119e81b3..456cf63721 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/reshape_int8.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class ReshapeInt8CPUKernel : public LiteKernel { public: ReshapeInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { reshape_param_ = reinterpret_cast<ReshapeParameter *>(op_parameter_); } ~ReshapeInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h index 568b07c72e..7a951e3ca8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/resize_int8.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class ResizeInt8CPUKernel : public ResizeBaseCPUKernel { public: ResizeInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ResizeBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ResizeBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~ResizeInt8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.cc index f06005471a..e71c4e9628 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.cc @@ -23,7 +23,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Scale; +using mindspore::schema::PrimitiveType_ScaleFusion; namespace mindspore::kernel { namespace { @@ -347,5 +347,5 @@ int ScaleInt8CPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Scale, LiteKernelCreator<ScaleInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_ScaleFusion, LiteKernelCreator<ScaleInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.h index b0835f6126..6c5ca7f372 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/scale_int8.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class ScaleInt8CPUKernel : public LiteKernel { public: ScaleInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), ctx_(ctx), thread_count_(ctx_->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), ctx_(ctx), thread_count_(ctx_->thread_num_) { scale_param_ = reinterpret_cast<ScaleParameter *>(op_parameter_); } ~ScaleInt8CPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.h index 56c0a695a8..e21476d9a1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sigmoid_int8.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class SigmoidInt8CPUKernel : public LiteKernel { public: SigmoidInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~SigmoidInt8CPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.cc index 672efcd025..1dcb6be190 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.cc @@ -24,7 +24,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Slice; +using mindspore::schema::PrimitiveType_SliceFusion; namespace mindspore::kernel { @@ -90,5 +90,5 @@ int SliceInt8CPUKernel::Run() { return ret; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Slice, LiteKernelCreator<SliceInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_SliceFusion, LiteKernelCreator<SliceInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h index d0707411fa..f956f7a623 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/slice_int8.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class SliceInt8CPUKernel : public SliceCPUKernel { public: SliceInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : SliceCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : SliceCPUKernel(parameter, inputs, outputs, ctx) {} ~SliceInt8CPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc index 72678c3f8b..bd83f8aa75 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.cc @@ -27,7 +27,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_NULL_PTR; -using mindspore::schema::PrimitiveType_SoftMax; +using mindspore::schema::PrimitiveType_Softmax; namespace mindspore::kernel { @@ -131,5 +131,5 @@ int SoftmaxInt8CPUKernel::Run() { return ret; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_SoftMax, LiteKernelCreator<SoftmaxInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Softmax, LiteKernelCreator<SoftmaxInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h index ecbb1ba62a..62d83bf2f1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/softmax_int8.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class SoftmaxInt8CPUKernel : public SoftmaxBaseCPUKernel { public: SoftmaxInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : SoftmaxBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : SoftmaxBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~SoftmaxInt8CPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/space_to_batch_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/space_to_batch_int8.h index 021565f9c1..262e61349f 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/space_to_batch_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/space_to_batch_int8.h @@ -23,9 +23,8 @@ namespace mindspore::kernel { class SpaceToBatchInt8CPUKernel : public SpaceToBatchCPUKernel { public: SpaceToBatchInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : SpaceToBatchCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : SpaceToBatchCPUKernel(parameter, inputs, outputs, ctx) {} ~SpaceToBatchInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h index f81435bdf3..aaa935b844 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/split_int8.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class SplitInt8CPUKernel : public SplitBaseCPUKernel { public: SplitInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : SplitBaseCPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : SplitBaseCPUKernel(parameter, inputs, outputs, ctx) {} ~SplitInt8CPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h index acde5d5368..b9d64ccf31 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/squeeze_int8.h @@ -30,9 +30,8 @@ namespace mindspore::kernel { class SqueezeInt8CPUKernel : public LiteKernel { public: SqueezeInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~SqueezeInt8CPUKernel() override; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc index 3d2765fc2d..b09758ba92 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.cc @@ -22,7 +22,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Sub; +using mindspore::schema::PrimitiveType_SubFusion; namespace mindspore::kernel { int SubInt8CPUKernel::Init() { @@ -128,12 +128,12 @@ int SubInt8CPUKernel::Run() { } tile0_data_ = static_cast<int8_t *>(context_->allocator->Malloc(out_tensors_.at(0)->Size())); if (tile0_data_ == nullptr) { - MS_LOG(ERROR) << "malloc memroy fail!"; + MS_LOG(ERROR) << "malloc memory fail!"; return RET_ERROR; } tile1_data_ = static_cast<int8_t *>(context_->allocator->Malloc(out_tensors_.at(0)->Size())); if (tile1_data_ == nullptr) { - MS_LOG(ERROR) << "malloc memroy fail!"; + MS_LOG(ERROR) << "malloc memory fail!"; context_->allocator->Free(tile0_data_); return RET_ERROR; } @@ -152,35 +152,5 @@ int SubInt8CPUKernel::Run() { return ret; } -kernel::LiteKernel *CpuSubInt8KernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, OpParameter *parameter, - const lite::InnerContext *ctx, const KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - if (parameter == nullptr) { - MS_LOG(ERROR) << "parameter is nullptr"; - return nullptr; - } - if (ctx == nullptr) { - MS_LOG(ERROR) << "ctx is nullptr"; - free(parameter); - return nullptr; - } - MS_ASSERT(desc.type == PrimitiveType_Sub); - auto *kernel = new (std::nothrow) SubInt8CPUKernel(parameter, inputs, outputs, ctx, primitive); - if (kernel == nullptr) { - MS_LOG(ERROR) << "kernel is nullptr."; - free(parameter); - return nullptr; - } - auto ret = kernel->Init(); - if (ret != RET_OK) { - MS_LOG(ERROR) << "Init kernel failed, name: " << parameter->name_ - << ", type: " << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(parameter->type_)); - delete kernel; - return nullptr; - } - return kernel; -} - -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_Sub, LiteKernelCreator<SubInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_SubFusion, LiteKernelCreator<SubInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.h index 1b5f83d9ec..0ecb6e6ca4 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/sub_int8.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class SubInt8CPUKernel : public LiteKernel { public: explicit SubInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~SubInt8CPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/tanh_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/tanh_int8.h index d23bb98756..0be5e503d9 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/tanh_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/tanh_int8.h @@ -29,9 +29,8 @@ namespace mindspore::kernel { class TanhInt8CPUKernel : public LiteKernel { public: TanhInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~TanhInt8CPUKernel() override = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc index 55c78717f5..053aa6bb91 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.cc @@ -21,7 +21,7 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_TopK; +using mindspore::schema::PrimitiveType_TopKFusion; namespace mindspore::kernel { int TopKInt8CPUKernel::Init() { @@ -64,5 +64,5 @@ int TopKInt8CPUKernel::Run() { return RET_OK; } -REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_TopK, LiteKernelCreator<TopKInt8CPUKernel>) +REG_KERNEL(kCPU, kNumberTypeInt8, PrimitiveType_TopKFusion, LiteKernelCreator<TopKInt8CPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h index 2f62e569f8..94b05a7bf0 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/topk_int8.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class TopKInt8CPUKernel : public LiteKernel { public: explicit TopKInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { TopkParameter *param = reinterpret_cast<TopkParameter *>(op_parameter_); param->topk_node_list_ = nullptr; } diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.cc b/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.cc index da2dad0069..6a870a97f3 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.cc +++ b/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.cc @@ -86,6 +86,15 @@ int TransposeInt8CPUKernel::ReSize() { transpose_param_->data_size_ = in_tensor->Size(); + // get perm data + auto perm_tensor = in_tensors_.at(1); + int *perm_data = reinterpret_cast<int *>(perm_tensor->data_c()); + MS_ASSERT(perm_data != nullptr); + transpose_param_->num_axes_ = perm_tensor->ElementsNum(); + for (int i = 0; i < transpose_param_->num_axes_; ++i) { + transpose_param_->perm_[i] = perm_data[i]; + } + transpose_param_->strides_[transpose_param_->num_axes_ - 1] = 1; transpose_param_->out_strides_[transpose_param_->num_axes_ - 1] = 1; for (int i = transpose_param_->num_axes_ - 2; i >= 0; i--) { diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.h index b8acf56773..c78c826935 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/transpose_int8.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class TransposeInt8CPUKernel : public LiteKernel { public: TransposeInt8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { transpose_param_ = reinterpret_cast<TransposeParameter *>(op_parameter_); } ~TransposeInt8CPUKernel() = default; diff --git a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h index f411d6c20e..f39757dc4e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h +++ b/mindspore/lite/src/runtime/kernel/arm/int8/unsqueeze_int8.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class Unsqueezeint8CPUKernel : public LiteKernel { public: Unsqueezeint8CPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), thread_count_(ctx->thread_num_) { + const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx), thread_count_(ctx->thread_num_) { param_ = reinterpret_cast<UnSqueezeParameter *>(op_parameter_); param_->thread_count_ = op_parameter_->thread_num_; } diff --git a/mindspore/lite/src/runtime/kernel/arm/string/extract_feature.cc b/mindspore/lite/src/runtime/kernel/arm/string/extract_feature.cc index 9d6bc8cbbf..a328d27e57 100644 --- a/mindspore/lite/src/runtime/kernel/arm/string/extract_feature.cc +++ b/mindspore/lite/src/runtime/kernel/arm/string/extract_feature.cc @@ -73,9 +73,8 @@ int ExtractFeatureCPUKernel::Run() { kernel::LiteKernel *CpuExtractFeatureKernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *parameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - auto *kernel = new (std::nothrow) ExtractFeatureCPUKernel(parameter, inputs, outputs, ctx, primitive); + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { + auto *kernel = new (std::nothrow) ExtractFeatureCPUKernel(parameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new ExtractFeatureCPUKernel fail!"; free(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/string/extract_feature.h b/mindspore/lite/src/runtime/kernel/arm/string/extract_feature.h index 72e8d23b6a..460a4e522e 100644 --- a/mindspore/lite/src/runtime/kernel/arm/string/extract_feature.h +++ b/mindspore/lite/src/runtime/kernel/arm/string/extract_feature.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class ExtractFeatureCPUKernel : public LiteKernel { public: ExtractFeatureCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~ExtractFeatureCPUKernel() {} int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/string/hashtable_lookup.cc b/mindspore/lite/src/runtime/kernel/arm/string/hashtable_lookup.cc index 0f8d4829dd..79980e4589 100644 --- a/mindspore/lite/src/runtime/kernel/arm/string/hashtable_lookup.cc +++ b/mindspore/lite/src/runtime/kernel/arm/string/hashtable_lookup.cc @@ -72,9 +72,8 @@ int HashtableLookupCPUKernel::Run() { kernel::LiteKernel *CpuHashtableLookupKernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *parameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - auto *kernel = new (std::nothrow) HashtableLookupCPUKernel(parameter, inputs, outputs, ctx, primitive); + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { + auto *kernel = new (std::nothrow) HashtableLookupCPUKernel(parameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new HashtableLookupCPUKernel fail!"; free(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/string/hashtable_lookup.h b/mindspore/lite/src/runtime/kernel/arm/string/hashtable_lookup.h index 75faadebdc..f04bed86b1 100644 --- a/mindspore/lite/src/runtime/kernel/arm/string/hashtable_lookup.h +++ b/mindspore/lite/src/runtime/kernel/arm/string/hashtable_lookup.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class HashtableLookupCPUKernel : public LiteKernel { public: HashtableLookupCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~HashtableLookupCPUKernel() {} int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/string/normalize.cc b/mindspore/lite/src/runtime/kernel/arm/string/normalize.cc index 4af5b127f9..3eb0c31e1b 100644 --- a/mindspore/lite/src/runtime/kernel/arm/string/normalize.cc +++ b/mindspore/lite/src/runtime/kernel/arm/string/normalize.cc @@ -140,9 +140,8 @@ int NormalizeCPUKernel::Run() { kernel::LiteKernel *CpuNormalizeKernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *parameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - auto *kernel = new (std::nothrow) NormalizeCPUKernel(parameter, inputs, outputs, ctx, primitive); + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { + auto *kernel = new (std::nothrow) NormalizeCPUKernel(parameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new NormalizeCPUKernel fail!"; free(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/string/normalize.h b/mindspore/lite/src/runtime/kernel/arm/string/normalize.h index de7ea81a27..f7f6852dbd 100644 --- a/mindspore/lite/src/runtime/kernel/arm/string/normalize.h +++ b/mindspore/lite/src/runtime/kernel/arm/string/normalize.h @@ -26,9 +26,8 @@ namespace mindspore::kernel { class NormalizeCPUKernel : public LiteKernel { public: NormalizeCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~NormalizeCPUKernel() = default; int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/arm/string/predict.cc b/mindspore/lite/src/runtime/kernel/arm/string/predict.cc index 3f59975ad7..f0b948e1fc 100644 --- a/mindspore/lite/src/runtime/kernel/arm/string/predict.cc +++ b/mindspore/lite/src/runtime/kernel/arm/string/predict.cc @@ -95,9 +95,8 @@ int PredictCPUKernel::Run() { kernel::LiteKernel *CpuPredictKernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *parameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - auto *kernel = new (std::nothrow) PredictCPUKernel(parameter, inputs, outputs, ctx, primitive); + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { + auto *kernel = new (std::nothrow) PredictCPUKernel(parameter, inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "new PredictCPUKernel fail!"; free(parameter); diff --git a/mindspore/lite/src/runtime/kernel/arm/string/predict.h b/mindspore/lite/src/runtime/kernel/arm/string/predict.h index 4239c6de78..8c04a5ded8 100644 --- a/mindspore/lite/src/runtime/kernel/arm/string/predict.h +++ b/mindspore/lite/src/runtime/kernel/arm/string/predict.h @@ -25,9 +25,8 @@ namespace mindspore::kernel { class PredictCPUKernel : public LiteKernel { public: PredictCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~PredictCPUKernel() {} int Init() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/activation_npu.cc b/mindspore/lite/src/runtime/kernel/npu/activation_npu.cc index d22072e0a1..a9f0156690 100644 --- a/mindspore/lite/src/runtime/kernel/npu/activation_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/activation_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,6 +15,7 @@ */ #include "src/runtime/kernel/npu/activation_npu.h" +#include "include/graph/op/all_ops.h" #include "src/kernel_registry.h" using mindspore::kernel::KERNEL_ARCH::kNPU; diff --git a/mindspore/lite/src/runtime/kernel/npu/activation_npu.h b/mindspore/lite/src/runtime/kernel/npu/activation_npu.h index 3336e1fce8..6475a597ba 100644 --- a/mindspore/lite/src/runtime/kernel/npu/activation_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/activation_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,15 +18,16 @@ #include <vector> #include "include/graph/op/all_ops.h" +#include "include/graph/compatible/all_ops.h" #include "src/runtime/kernel/npu/npu_kernel.h" #include "nnacl/fp32/activation_fp32.h" + namespace mindspore::kernel { class ActivationNPUKernel : public NPUKernel { public: ActivationNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { act_param_ = reinterpret_cast<ActivationParameter *>(parameter); } ~ActivationNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/arithmetic_npu.cc b/mindspore/lite/src/runtime/kernel/npu/arithmetic_npu.cc index f3e9ed968a..aa9322d989 100644 --- a/mindspore/lite/src/runtime/kernel/npu/arithmetic_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/arithmetic_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ #include "src/runtime/kernel/npu/arithmetic_npu.h" #include <string> +#include "include/graph/op/all_ops.h" #include "src/kernel_registry.h" using mindspore::kernel::KERNEL_ARCH::kNPU; @@ -23,8 +24,8 @@ using mindspore::lite::KernelRegistrar; using mindspore::schema::ActivationType_NO_ACTIVATION; using mindspore::schema::ActivationType_RELU; using mindspore::schema::ActivationType_RELU6; -using mindspore::schema::PrimitiveType_Add; -using mindspore::schema::PrimitiveType_Div; +using mindspore::schema::PrimitiveType_AddFusion; +using mindspore::schema::PrimitiveType_DivFusion; using mindspore::schema::PrimitiveType_Equal; using mindspore::schema::PrimitiveType_FloorDiv; using mindspore::schema::PrimitiveType_FloorMod; @@ -36,10 +37,9 @@ using mindspore::schema::PrimitiveType_LogicalAnd; using mindspore::schema::PrimitiveType_LogicalOr; using mindspore::schema::PrimitiveType_Maximum; using mindspore::schema::PrimitiveType_Minimum; -using mindspore::schema::PrimitiveType_Mul; +using mindspore::schema::PrimitiveType_MulFusion; using mindspore::schema::PrimitiveType_NotEqual; -using mindspore::schema::PrimitiveType_SquaredDifference; -using mindspore::schema::PrimitiveType_Sub; +using mindspore::schema::PrimitiveType_SubFusion; namespace mindspore::kernel { int ArithmeticNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, @@ -88,17 +88,17 @@ int ArithmeticNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, const std::vector<ge::Operator *> &npu_inputs) { ge::Operator *op = nullptr; - switch (primitive_->Type()) { - case PrimitiveType_Mul: + switch (op_parameter_->type_) { + case PrimitiveType_MulFusion: op = CreateOperator<hiai::op::Mul>(npu_inputs, name_); break; - case PrimitiveType_Add: + case PrimitiveType_AddFusion: op = CreateOperator<hiai::op::Add>(npu_inputs, name_); break; - case PrimitiveType_Sub: + case PrimitiveType_SubFusion: op = CreateOperator<hiai::op::Sub>(npu_inputs, name_); break; - case PrimitiveType_Div: + case PrimitiveType_DivFusion: op = CreateOperator<hiai::op::RealDiv>(npu_inputs, name_); break; case PrimitiveType_FloorMod: @@ -119,9 +119,6 @@ int ArithmeticNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, case PrimitiveType_Minimum: op = CreateOperator<hiai::op::Minimum>(npu_inputs, name_); break; - case PrimitiveType_SquaredDifference: - op = CreateOperator<hiai::op::SquaredDifference>(npu_inputs, name_); - break; case PrimitiveType_NotEqual: op = CreateOperator<hiai::op::NotEqual>(npu_inputs, name_); break; @@ -142,7 +139,7 @@ int ArithmeticNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, break; default: MS_LOG(ERROR) << "Unsupported primitive type:" - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive_->Type())); + << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter_->type_)); return RET_ERROR; } if (op == nullptr) { @@ -177,17 +174,17 @@ ArithmeticNPUKernel::~ArithmeticNPUKernel() { } } -REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Mul, NPUKernelCreator<ArithmeticNPUKernel>) -REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Add, NPUKernelCreator<ArithmeticNPUKernel>) -REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Sub, NPUKernelCreator<ArithmeticNPUKernel>) -REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Div, NPUKernelCreator<ArithmeticNPUKernel>) +// SquaredDifference don't supported in NPU. +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_MulFusion, NPUKernelCreator<ArithmeticNPUKernel>) +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_AddFusion, NPUKernelCreator<ArithmeticNPUKernel>) +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_SubFusion, NPUKernelCreator<ArithmeticNPUKernel>) +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_DivFusion, NPUKernelCreator<ArithmeticNPUKernel>) REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_FloorMod, NPUKernelCreator<ArithmeticNPUKernel>) REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_FloorDiv, NPUKernelCreator<ArithmeticNPUKernel>) REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_LogicalAnd, NPUKernelCreator<ArithmeticNPUKernel>) REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_LogicalOr, NPUKernelCreator<ArithmeticNPUKernel>) REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Maximum, NPUKernelCreator<ArithmeticNPUKernel>) REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Minimum, NPUKernelCreator<ArithmeticNPUKernel>) -REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_SquaredDifference, NPUKernelCreator<ArithmeticNPUKernel>) REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_NotEqual, NPUKernelCreator<ArithmeticNPUKernel>) REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Equal, NPUKernelCreator<ArithmeticNPUKernel>) REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Less, NPUKernelCreator<ArithmeticNPUKernel>) diff --git a/mindspore/lite/src/runtime/kernel/npu/arithmetic_npu.h b/mindspore/lite/src/runtime/kernel/npu/arithmetic_npu.h index 35780f9355..9f48009dbe 100644 --- a/mindspore/lite/src/runtime/kernel/npu/arithmetic_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/arithmetic_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,16 +17,15 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETIC_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETIC_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" #include "nnacl/arithmetic.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" namespace mindspore::kernel { class ArithmeticNPUKernel : public NPUKernel { public: ArithmeticNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { activation_type_ = reinterpret_cast<ArithmeticParameter *>(parameter)->activation_type_; } ~ArithmeticNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/arithmetic_self_npu.cc b/mindspore/lite/src/runtime/kernel/npu/arithmetic_self_npu.cc index a1c11206a2..421e13f84c 100644 --- a/mindspore/lite/src/runtime/kernel/npu/arithmetic_self_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/arithmetic_self_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ #include "src/runtime/kernel/npu/arithmetic_self_npu.h" #include <string> +#include "include/graph/op/all_ops.h" #include "src/kernel_registry.h" using mindspore::kernel::KERNEL_ARCH::kNPU; @@ -54,7 +55,7 @@ int ArithmeticSelfNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inp const std::vector<lite::Tensor *> &outputs, const std::vector<ge::Operator *> &npu_inputs) { ge::Operator *op = nullptr; - switch (primitive_->Type()) { + switch (op_parameter_->type_) { case PrimitiveType_Cos: op = CreateOperator<hiai::op::Cos>(npu_inputs[0], name_); break; @@ -93,7 +94,7 @@ int ArithmeticSelfNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inp break; default: MS_LOG(ERROR) << "Unsupported primitive type:" - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive_->Type())); + << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter_->type_)); return RET_ERROR; } if (op == nullptr) { diff --git a/mindspore/lite/src/runtime/kernel/npu/arithmetic_self_npu.h b/mindspore/lite/src/runtime/kernel/npu/arithmetic_self_npu.h index d841dc6f51..e860ab6833 100644 --- a/mindspore/lite/src/runtime/kernel/npu/arithmetic_self_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/arithmetic_self_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,15 +17,14 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETICSELF_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ARITHMETICSELF_NPU_H_ #include <vector> -#include "include/graph/op/math_defs.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/math_defs.h" namespace mindspore::kernel { class ArithmeticSelfNPUKernel : public NPUKernel { public: ArithmeticSelfNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) {} ~ArithmeticSelfNPUKernel() override; int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, diff --git a/mindspore/lite/src/runtime/kernel/npu/batchnorm_npu.cc b/mindspore/lite/src/runtime/kernel/npu/batchnorm_npu.cc index 726296a090..2ccedf0f26 100644 --- a/mindspore/lite/src/runtime/kernel/npu/batchnorm_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/batchnorm_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/src/runtime/kernel/npu/batchnorm_npu.h b/mindspore/lite/src/runtime/kernel/npu/batchnorm_npu.h index 9e62207498..f9b2d84611 100644 --- a/mindspore/lite/src/runtime/kernel/npu/batchnorm_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/batchnorm_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_BATCHNORM_NPU_H_ #include <vector> +#include "include/graph/op/all_ops.h" #include "include/graph/compatible/all_ops.h" #include "src/runtime/kernel/npu/npu_kernel.h" #include "nnacl/batchnorm_parameter.h" @@ -25,9 +26,8 @@ namespace mindspore::kernel { class BatchnormNPUKernel : public NPUKernel { public: BatchnormNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { batchnorm_param_ = reinterpret_cast<BatchNormParameter *>(parameter); } ~BatchnormNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/cast_npu.cc b/mindspore/lite/src/runtime/kernel/npu/cast_npu.cc index 1f9e7e3dad..22915a1654 100644 --- a/mindspore/lite/src/runtime/kernel/npu/cast_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/cast_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,12 @@ using mindspore::schema::PrimitiveType_Cast; namespace mindspore::kernel { int CastNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter) { + if (inputs.size() >= 2 && inputs[1]->ElementsNum() == 1) { + dst_type_ = static_cast<int *>(inputs[1]->data_c())[0]; + } else { + MS_LOG(WARNING) << "NPU dst dtype is attribute."; + return RET_ERROR; + } return RET_OK; } @@ -35,7 +41,7 @@ int CastNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const return RET_ERROR; } op_->set_input_x(*npu_inputs[0]); - op_->set_attr_dst_dtype(lite::ConverterToNPUDataType(static_cast<TypeId>(outputs[0]->data_type()))); + op_->set_attr_dst_dtype(lite::ConverterToNPUDataType(static_cast<TypeId>(dst_type_))); op_->set_attr_src_dtype(lite::ConverterToNPUDataType(static_cast<TypeId>(inputs[0]->data_type()))); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/npu/cast_npu.h b/mindspore/lite/src/runtime/kernel/npu/cast_npu.h index fe3acb3a7d..682732b8a9 100644 --- a/mindspore/lite/src/runtime/kernel/npu/cast_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/cast_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,15 +17,14 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CAST_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CAST_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" namespace mindspore::kernel { class CastNPUKernel : public NPUKernel { public: CastNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) {} ~CastNPUKernel() override; int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, @@ -36,6 +35,7 @@ class CastNPUKernel : public NPUKernel { private: hiai::op::CastT *op_ = nullptr; + int dst_type_; }; } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CAST_NPU_H_ diff --git a/mindspore/lite/src/runtime/kernel/npu/concat_npu.cc b/mindspore/lite/src/runtime/kernel/npu/concat_npu.cc index e094ef31ad..a9e2aa3f75 100644 --- a/mindspore/lite/src/runtime/kernel/npu/concat_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/concat_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/src/runtime/kernel/npu/concat_npu.h b/mindspore/lite/src/runtime/kernel/npu/concat_npu.h index 35ba693a9b..e8cee303d8 100644 --- a/mindspore/lite/src/runtime/kernel/npu/concat_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/concat_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,16 +17,16 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CONCAT_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_CONCAT_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" #include "nnacl/concat_parameter.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" + namespace mindspore::kernel { class ConcatNPUKernel : public NPUKernel { public: ConcatNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { concat_param_ = reinterpret_cast<ConcatParameter *>(parameter); } ~ConcatNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/convolution_base_npu.cc b/mindspore/lite/src/runtime/kernel/npu/convolution_base_npu.cc index 282d732b88..6f134709c0 100644 --- a/mindspore/lite/src/runtime/kernel/npu/convolution_base_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/convolution_base_npu.cc @@ -91,7 +91,7 @@ int ConvolutionBaseNPUKernel::SetActivation(const ge::Operator *input, ActType a } else if (act_type == ActType_Relu6) { act_->set_attr_mode(14); } else { - MS_LOG(ERROR) << "Unsupport activation type for convolution."; + MS_LOG(ERROR) << "Unsupported activation type for convolution."; return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/npu/convolution_base_npu.h b/mindspore/lite/src/runtime/kernel/npu/convolution_base_npu.h index 8df99eff7d..2446b15cb6 100644 --- a/mindspore/lite/src/runtime/kernel/npu/convolution_base_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/convolution_base_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,16 +18,16 @@ #include <vector> #include <memory> -#include "include/graph/op/all_ops.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" #include "nnacl/conv_parameter.h" + namespace mindspore::kernel { class ConvolutionBaseNPUKernel : public NPUKernel { public: ConvolutionBaseNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) {} ~ConvolutionBaseNPUKernel() override; protected: diff --git a/mindspore/lite/src/runtime/kernel/npu/convolution_depthwise_npu.cc b/mindspore/lite/src/runtime/kernel/npu/convolution_depthwise_npu.cc index e463e241c4..ba7360908c 100644 --- a/mindspore/lite/src/runtime/kernel/npu/convolution_depthwise_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/convolution_depthwise_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,10 @@ #include "src/runtime/kernel/npu/convolution_depthwise_npu.h" #include "src/kernel_registry.h" +#include "src/runtime/agent/npu/npu_converter_utils.h" + using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; -using mindspore::schema::PrimitiveType_DepthwiseConv2D; namespace mindspore::kernel { int ConvolutionDepthwiseNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, @@ -30,10 +31,10 @@ int ConvolutionDepthwiseNPUKernel::SetConvDwParam() { conv_dw_->set_attr_strides(ge::AttrValue::LIST_INT({conv_param_->stride_h_, conv_param_->stride_w_})); conv_dw_->set_attr_dilations(ge::AttrValue::LIST_INT({conv_param_->dilation_h_, conv_param_->dilation_w_})); - if (conv_param_->pad_mode_ == Pad_Same) { + if (conv_param_->pad_mode_ == Pad_same) { conv_dw_->set_attr_pad_mode(ge::AttrValue::STR{"SAME"}); conv_dw_->set_attr_pads(ge::AttrValue::LIST_INT({0, 0, 0, 0})); - } else if (conv_param_->pad_mode_ == Pad_Valid) { + } else if (conv_param_->pad_mode_ == Pad_valid) { conv_dw_->set_attr_pad_mode(ge::AttrValue::STR{"VALID"}); conv_dw_->set_attr_pads(ge::AttrValue::LIST_INT({0, 0, 0, 0})); } else { @@ -100,6 +101,4 @@ ConvolutionDepthwiseNPUKernel::~ConvolutionDepthwiseNPUKernel() { conv_dw_ = nullptr; } } - -REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_DepthwiseConv2D, NPUKernelCreator<ConvolutionDepthwiseNPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/npu/convolution_depthwise_npu.h b/mindspore/lite/src/runtime/kernel/npu/convolution_depthwise_npu.h index cf2e6e33bb..e8cc94de32 100644 --- a/mindspore/lite/src/runtime/kernel/npu/convolution_depthwise_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/convolution_depthwise_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,16 +18,17 @@ #include <vector> #include "include/graph/op/all_ops.h" +#include "include/graph/compatible/all_ops.h" #include "src/runtime/kernel/npu/convolution_base_npu.h" +#include "src/runtime/kernel/npu/npu_kernel.h" #include "nnacl/conv_parameter.h" namespace mindspore::kernel { class ConvolutionDepthwiseNPUKernel : public ConvolutionBaseNPUKernel { public: ConvolutionDepthwiseNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseNPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionBaseNPUKernel(parameter, inputs, outputs, ctx) { conv_param_ = reinterpret_cast<ConvParameter *>(parameter); } ~ConvolutionDepthwiseNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/convolution_npu.cc b/mindspore/lite/src/runtime/kernel/npu/convolution_npu.cc index bbbaee9888..cf82a6ef74 100644 --- a/mindspore/lite/src/runtime/kernel/npu/convolution_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/convolution_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,10 +15,12 @@ */ #include "src/runtime/kernel/npu/convolution_npu.h" +#include "src/runtime/agent/npu/npu_converter_utils.h" +#include "src/runtime/kernel/npu/convolution_depthwise_npu.h" using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; -using mindspore::schema::PrimitiveType_Conv2D; +using mindspore::schema::PrimitiveType_Conv2DFusion; namespace mindspore::kernel { int ConvolutionNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, @@ -31,10 +33,10 @@ int ConvolutionNPUKernel::SetConvParam() { conv_->set_attr_dilations(ge::AttrValue::LIST_INT({conv_param_->dilation_h_, conv_param_->dilation_w_})); conv_->set_attr_groups(conv_param_->group_); - if (conv_param_->pad_mode_ == Pad_Same) { + if (conv_param_->pad_mode_ == Pad_same) { conv_->set_attr_pad_mode(ge::AttrValue::STR{"SAME"}); conv_->set_attr_pads(ge::AttrValue::LIST_INT({0, 0, 0, 0})); - } else if (conv_param_->pad_mode_ == Pad_Valid) { + } else if (conv_param_->pad_mode_ == Pad_valid) { conv_->set_attr_pad_mode(ge::AttrValue::STR{"VALID"}); conv_->set_attr_pads(ge::AttrValue::LIST_INT({0, 0, 0, 0})); } else { @@ -66,7 +68,6 @@ int ConvolutionNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs return RET_ERROR; } conv_->set_input_filter(*weight_); - if (inputs.size() == 3) { ret = InitBiasConst(inputs); if (ret != RET_OK) { @@ -102,5 +103,36 @@ ConvolutionNPUKernel::~ConvolutionNPUKernel() { } } -REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Conv2D, NPUKernelCreator<ConvolutionNPUKernel>) +kernel::LiteKernel *NpuConvKernelCreator(const std::vector<lite::Tensor *> &inputs, + const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { + MS_ASSERT(op_parameter != nullptr); + MS_ASSERT(desc.type == schema::PrimitiveType_Conv2DFusion); + + auto conv_param = reinterpret_cast<ConvParameter *>(op_parameter); + kernel::NPUKernel *kernel = nullptr; + + if (conv_param->group_ == 1) { + kernel = new (std::nothrow) kernel::ConvolutionNPUKernel(op_parameter, inputs, outputs, ctx); + } else if (conv_param->group_ == conv_param->input_channel_ && conv_param->group_ == conv_param->output_channel_) { + kernel = new (std::nothrow) kernel::ConvolutionDepthwiseNPUKernel(op_parameter, inputs, outputs, ctx); + } else { + MS_LOG(ERROR) << "npu do not support group conv!"; + kernel = nullptr; + } + if (kernel == nullptr) { + MS_LOG(ERROR) << "kernel " << op_parameter->name_ << "is nullptr."; + free(op_parameter); + return nullptr; + } + + auto ret = kernel->IsSupport(inputs, outputs, op_parameter); + if (ret != RET_OK) { + delete kernel; + return nullptr; + } + return kernel; +} + +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Conv2DFusion, NpuConvKernelCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/npu/convolution_npu.h b/mindspore/lite/src/runtime/kernel/npu/convolution_npu.h index 010386d7b4..a4f82eda93 100644 --- a/mindspore/lite/src/runtime/kernel/npu/convolution_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/convolution_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,9 +25,8 @@ namespace mindspore::kernel { class ConvolutionNPUKernel : public ConvolutionBaseNPUKernel { public: ConvolutionNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseNPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionBaseNPUKernel(parameter, inputs, outputs, ctx) { conv_param_ = reinterpret_cast<ConvParameter *>(parameter); } ~ConvolutionNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/deconvolution_npu.cc b/mindspore/lite/src/runtime/kernel/npu/deconvolution_npu.cc index 69bda798c7..0d07e14625 100644 --- a/mindspore/lite/src/runtime/kernel/npu/deconvolution_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/deconvolution_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,9 +15,11 @@ */ #include "src/runtime/kernel/npu/deconvolution_npu.h" +#include "src/runtime/agent/npu/npu_converter_utils.h" + using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; -using mindspore::schema::PrimitiveType_DeConv2D; +using mindspore::schema::PrimitiveType_Conv2dTransposeFusion; namespace mindspore::kernel { int DeconvolutionNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, @@ -34,10 +36,10 @@ int DeconvolutionNPUKernel::SetConvParam() { deconv_->set_attr_dilations(ge::AttrValue::LIST_INT({conv_param_->dilation_h_, conv_param_->dilation_w_})); deconv_->set_attr_groups(conv_param_->group_); - if (conv_param_->pad_mode_ == Pad_Same) { + if (conv_param_->pad_mode_ == Pad_same) { deconv_->set_attr_pad_mode(ge::AttrValue::STR{"SAME"}); deconv_->set_attr_pads(ge::AttrValue::LIST_INT({0, 0, 0, 0})); - } else if (conv_param_->pad_mode_ == Pad_Valid) { + } else if (conv_param_->pad_mode_ == Pad_valid) { deconv_->set_attr_pad_mode(ge::AttrValue::STR{"VALID"}); deconv_->set_attr_pads(ge::AttrValue::LIST_INT({0, 0, 0, 0})); } else { @@ -69,7 +71,6 @@ int DeconvolutionNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inpu return RET_ERROR; } deconv_->set_input_filter(*weight_); - if (inputs.size() == 3) { ret = InitBiasConst(inputs); if (ret != RET_OK) { @@ -105,5 +106,5 @@ DeconvolutionNPUKernel::~DeconvolutionNPUKernel() { } } -REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_DeConv2D, NPUKernelCreator<DeconvolutionNPUKernel>) +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Conv2dTransposeFusion, NPUKernelCreator<DeconvolutionNPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/npu/deconvolution_npu.h b/mindspore/lite/src/runtime/kernel/npu/deconvolution_npu.h index a1e4a1ad91..91e16c7c18 100644 --- a/mindspore/lite/src/runtime/kernel/npu/deconvolution_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/deconvolution_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,9 +25,8 @@ namespace mindspore::kernel { class DeconvolutionNPUKernel : public ConvolutionBaseNPUKernel { public: DeconvolutionNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseNPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionBaseNPUKernel(parameter, inputs, outputs, ctx) { conv_param_ = reinterpret_cast<ConvParameter *>(parameter); } ~DeconvolutionNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/eltwise_npu.cc b/mindspore/lite/src/runtime/kernel/npu/eltwise_npu.cc index d3b7f230c1..1d8ebee2de 100644 --- a/mindspore/lite/src/runtime/kernel/npu/eltwise_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/eltwise_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,6 +15,7 @@ */ #include "src/runtime/kernel/npu/eltwise_npu.h" +#include "include/graph/op/all_ops.h" #include "src/kernel_registry.h" #include "src/runtime/agent/npu/npu_converter_utils.h" diff --git a/mindspore/lite/src/runtime/kernel/npu/eltwise_npu.h b/mindspore/lite/src/runtime/kernel/npu/eltwise_npu.h index 96893e5d31..2af9792414 100644 --- a/mindspore/lite/src/runtime/kernel/npu/eltwise_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/eltwise_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,18 +17,16 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ELTWISE_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_ELTWISE_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" -#include "src/ops/eltwise.h" +#include "nnacl/arithmetic.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" namespace mindspore::kernel { class EltwiseNPUKernel : public NPUKernel { public: EltwiseNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { - auto eltwise = reinterpret_cast<const mindspore::lite::Eltwise *>(primitive); - mode_ = static_cast<schema::EltwiseMode>(eltwise->GetMode()); + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { + mode_ = static_cast<schema::EltwiseMode>(reinterpret_cast<ArithmeticParameter *>(parameter)->eltwise_mode_); } ~EltwiseNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/fullconnection_npu.h b/mindspore/lite/src/runtime/kernel/npu/fullconnection_npu.h index 1edffd7402..699c0d6a6c 100644 --- a/mindspore/lite/src/runtime/kernel/npu/fullconnection_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/fullconnection_npu.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class FullconnectionNPUKernel : public ConvolutionBaseNPUKernel { public: FullconnectionNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseNPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionBaseNPUKernel(parameter, inputs, outputs, ctx) { fc_param_ = reinterpret_cast<MatMulParameter *>(parameter); } ~FullconnectionNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/gather_npu.cc b/mindspore/lite/src/runtime/kernel/npu/gather_npu.cc index 7bf752d58f..8de20c5c19 100644 --- a/mindspore/lite/src/runtime/kernel/npu/gather_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/gather_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ #include "src/runtime/kernel/npu/gather_npu.h" #include "src/kernel_registry.h" +#include "src/runtime/agent/npu/npu_converter_utils.h" using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; using mindspore::schema::PrimitiveType_Gather; @@ -27,6 +28,12 @@ int GatherNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const MS_LOG(WARNING) << "Gather indices only support Int32"; return RET_ERROR; } + if (inputs.size() >= 3 && inputs[2]->ElementsNum() == 1) { + axis_ = static_cast<int *>(inputs[2]->data_c())[0]; + } else { + MS_LOG(WARNING) << "NPU axis is attribute."; + return RET_ERROR; + } return RET_OK; } @@ -40,7 +47,7 @@ int GatherNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, con op_->set_input_x(*npu_inputs[0]); op_->set_input_indices(*npu_inputs[1]); - op_->set_attr_axis(gather_parameter_->axis_); + op_->set_attr_axis(axis_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/npu/gather_npu.h b/mindspore/lite/src/runtime/kernel/npu/gather_npu.h index dcbc7537dc..b72fe8c752 100644 --- a/mindspore/lite/src/runtime/kernel/npu/gather_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/gather_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,16 +17,15 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_GATHER_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_GATHER_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" #include "nnacl/gather_parameter.h" namespace mindspore::kernel { class GatherNPUKernel : public NPUKernel { public: GatherNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { gather_parameter_ = reinterpret_cast<GatherParameter *>(parameter); } ~GatherNPUKernel() override; @@ -40,6 +39,7 @@ class GatherNPUKernel : public NPUKernel { private: hiai::op::GatherV2D *op_ = nullptr; GatherParameter *gather_parameter_; + int axis_; }; } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_GATHER_NPU_H_ diff --git a/mindspore/lite/src/runtime/kernel/npu/instance_norm_npu.h b/mindspore/lite/src/runtime/kernel/npu/instance_norm_npu.h index 26d90132cc..b71701d044 100644 --- a/mindspore/lite/src/runtime/kernel/npu/instance_norm_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/instance_norm_npu.h @@ -24,9 +24,8 @@ namespace mindspore::kernel { class InstanceNormNPUKernel : public NPUKernel { public: InstanceNormNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { instance_norm_param_ = reinterpret_cast<InstanceNormParameter *>(parameter); } ~InstanceNormNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/matmul_npu.cc b/mindspore/lite/src/runtime/kernel/npu/matmul_npu.cc index ba4cf5b76e..08a9de491f 100644 --- a/mindspore/lite/src/runtime/kernel/npu/matmul_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/matmul_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -32,6 +32,9 @@ int MatMulNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, con op_ = new (std::nothrow) hiai::op::MatMul(name_); op_->set_input_x1(*npu_inputs[0]); op_->set_input_x2(*npu_inputs[1]); + if (npu_inputs.size() == 3) { + op_->set_input_bias(*npu_inputs[2]); + } op_->set_attr_transpose_x1(matmul_parameter_->a_transpose_); op_->set_attr_transpose_x2(matmul_parameter_->b_transpose_); diff --git a/mindspore/lite/src/runtime/kernel/npu/matmul_npu.h b/mindspore/lite/src/runtime/kernel/npu/matmul_npu.h index 1beef6ac54..4b54d9e293 100644 --- a/mindspore/lite/src/runtime/kernel/npu/matmul_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/matmul_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,17 +17,16 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_MATMUL_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_MATMUL_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" #include "nnacl/matmul_parameter.h" #include "src/runtime/kernel/npu/npu_kernel.h" #include "nnacl/softmax_parameter.h" +#include "include/graph/op/all_ops.h" namespace mindspore::kernel { class MatMulNPUKernel : public NPUKernel { public: MatMulNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { matmul_parameter_ = reinterpret_cast<MatMulParameter *>(parameter); } ~MatMulNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/npu_kernel.h b/mindspore/lite/src/runtime/kernel/npu/npu_kernel.h index aa8f6cfb7c..fc69df177b 100644 --- a/mindspore/lite/src/runtime/kernel/npu/npu_kernel.h +++ b/mindspore/lite/src/runtime/kernel/npu/npu_kernel.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,9 +18,9 @@ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_KERNEL_NPU_H_ #include <vector> -#include "include/graph/graph.h" #include "src/lite_kernel.h" #include "include/errorcode.h" +#include "include/graph/graph.h" #include "src/kernel_registry.h" using mindspore::kernel::LiteKernel; @@ -30,9 +30,8 @@ namespace mindspore::kernel { class NPUKernel : public LiteKernel { public: NPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~NPUKernel() override = default; int Run() override { return RET_ERROR; } @@ -50,28 +49,27 @@ class NPUKernel : public LiteKernel { }; template <class T> kernel::LiteKernel *NPUKernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - if (!primitive->infer_flag()) { + const std::vector<lite::Tensor *> &outputs, OpParameter *op_parameter, + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { + if (!op_parameter->infer_flag_) { MS_LOG(ERROR) << "NPU does not support runtime inference shape. Type is:" - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type())); + << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(op_parameter->type_)); return nullptr; } if (inputs[0]->shape().size() > 4) { MS_LOG(ERROR) << "Npu does not support input tensor dims greater than 4"; return nullptr; } - - auto *kernel = new (std::nothrow) T(opParameter, inputs, outputs, ctx, primitive); + auto *kernel = new (std::nothrow) T(op_parameter, inputs, outputs, ctx); if (kernel == nullptr) { - MS_LOG(ERROR) << "kernel " << opParameter->name_ << "is nullptr."; - free(opParameter); + MS_LOG(ERROR) << "kernel " << op_parameter->name_ << "is nullptr."; + free(op_parameter); return nullptr; } - auto ret = kernel->IsSupport(inputs, outputs, opParameter); + auto ret = kernel->IsSupport(inputs, outputs, op_parameter); if (ret != RET_OK) { + delete kernel; return nullptr; } return kernel; diff --git a/mindspore/lite/src/runtime/kernel/npu/pad_npu.cc b/mindspore/lite/src/runtime/kernel/npu/pad_npu.cc index 0bf8859272..f6096f8b9a 100644 --- a/mindspore/lite/src/runtime/kernel/npu/pad_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/pad_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,15 +20,23 @@ #include "src/runtime/agent/npu/npu_converter_utils.h" using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; -using mindspore::schema::PrimitiveType_Pad; +using mindspore::schema::PrimitiveType_PadFusion; namespace mindspore::kernel { int PadNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter) { - if (pad_->GetPaddingMode() != schema::PaddingMode_CONSTANT) { + if (param_->pad_mode_ != schema::PaddingMode_CONSTANT) { MS_LOG(WARNING) << "NPU only support CONSTANT padding mode"; return RET_ERROR; } + if (inputs.size() >= 2 && inputs[1]->data_c() != nullptr) { + for (int i = 0; i < inputs[1]->ElementsNum(); i++) { + paddings_.push_back(static_cast<int *>(inputs[1]->data_c())[i]); + } + } else { + MS_LOG(WARNING) << "NPU axis is attribute."; + return RET_ERROR; + } return RET_OK; } @@ -39,16 +47,16 @@ int PadNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const MS_LOG(ERROR) << name_ << " op is nullptr"; return RET_ERROR; } - int size = static_cast<int>(pad_->GetPaddings().size() / 2); + int size = static_cast<int>(param_->padding_length / 2); ge::TensorDesc padding_tensor_desc(ge::Shape({size, 2}), ge::FORMAT_NCHW, ge::DT_INT32); ge::TensorPtr padding_tensor = std::make_shared<hiai::Tensor>(padding_tensor_desc); - padding_tensor->SetData(reinterpret_cast<uint8_t *>(pad_->GetPaddings().data()), 2 * size * sizeof(int)); + padding_tensor->SetData(reinterpret_cast<uint8_t *>(paddings_.data()), 2 * size * sizeof(int)); auto paddings = new hiai::op::Const(name_ + "paddings"); paddings->set_attr_value(padding_tensor); ge::TensorDesc constant_values_tensor_desc(ge::Shape({1}), ge::FORMAT_NCHW, ge::DT_FLOAT); ge::TensorPtr constant_values_tensor = std::make_shared<hiai::Tensor>(constant_values_tensor_desc); - vector<float> constant_values_data_value = {pad_->GetConstantValue()}; + vector<float> constant_values_data_value = {param_->constant_value_}; constant_values_tensor->SetData(reinterpret_cast<uint8_t *>(constant_values_data_value.data()), 1 * sizeof(float)); auto constant = new hiai::op::Const(name_ + "constant"); constant->set_attr_value(constant_values_tensor); @@ -69,5 +77,5 @@ PadNPUKernel::~PadNPUKernel() { } } -REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Pad, NPUKernelCreator<PadNPUKernel>) +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_PadFusion, NPUKernelCreator<PadNPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/npu/pad_npu.h b/mindspore/lite/src/runtime/kernel/npu/pad_npu.h index 2310663122..91c0447385 100644 --- a/mindspore/lite/src/runtime/kernel/npu/pad_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/pad_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,18 +17,16 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_PAD_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_PAD_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" #include "nnacl/pad_parameter.h" -#include "src/ops/pad.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" namespace mindspore::kernel { class PadNPUKernel : public NPUKernel { public: PadNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { - pad_ = reinterpret_cast<const mindspore::lite::Pad *>(primitive); + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { + param_ = reinterpret_cast<PadParameter *>(parameter); } ~PadNPUKernel() override; @@ -40,7 +38,8 @@ class PadNPUKernel : public NPUKernel { private: hiai::op::PadV2 *op_ = nullptr; - const mindspore::lite::Pad *pad_; + PadParameter *param_; + std::vector<int> paddings_; }; } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_PAD_NPU_H_ diff --git a/mindspore/lite/src/runtime/kernel/npu/pooling_npu.cc b/mindspore/lite/src/runtime/kernel/npu/pooling_npu.cc index 7cc37bf49a..75468aff1b 100644 --- a/mindspore/lite/src/runtime/kernel/npu/pooling_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/pooling_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,7 +19,8 @@ using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; -using mindspore::schema::PrimitiveType_Pooling; +using mindspore::schema::PrimitiveType_AvgPoolFusion; +using mindspore::schema::PrimitiveType_MaxPoolFusion; namespace mindspore::kernel { int PoolingNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, @@ -42,10 +43,10 @@ int PoolingNPUKernel::SetPoolingParam() { pooling_->set_attr_global_pooling(pooling_param_->global_); pooling_->set_attr_window({pooling_param_->window_h_, pooling_param_->window_w_}); pooling_->set_attr_stride({pooling_param_->stride_h_, pooling_param_->stride_w_}); - if (pooling_param_->pad_mode_ == Pad_Same) { + if (pooling_param_->pad_mode_ == Pad_same) { pooling_->set_attr_pad_mode(6); pooling_->set_attr_pad({0, 0, 0, 0}); - } else if (pooling_param_->pad_mode_ == Pad_Valid) { + } else if (pooling_param_->pad_mode_ == Pad_valid) { pooling_->set_attr_pad_mode(5); pooling_->set_attr_pad({0, 0, 0, 0}); } else { @@ -104,5 +105,6 @@ PoolingNPUKernel::~PoolingNPUKernel() { } } -REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Pooling, NPUKernelCreator<PoolingNPUKernel>) +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_MaxPoolFusion, NPUKernelCreator<PoolingNPUKernel>) +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_AvgPoolFusion, NPUKernelCreator<PoolingNPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/npu/pooling_npu.h b/mindspore/lite/src/runtime/kernel/npu/pooling_npu.h index 572cc07f50..bb00cb7028 100644 --- a/mindspore/lite/src/runtime/kernel/npu/pooling_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/pooling_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,9 +25,8 @@ namespace mindspore::kernel { class PoolingNPUKernel : public ConvolutionBaseNPUKernel { public: PoolingNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : ConvolutionBaseNPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : ConvolutionBaseNPUKernel(parameter, inputs, outputs, ctx) { pooling_param_ = reinterpret_cast<PoolingParameter *>(parameter); } ~PoolingNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/reduce_npu.cc b/mindspore/lite/src/runtime/kernel/npu/reduce_npu.cc index 15ff4d7ef7..a7e6520488 100644 --- a/mindspore/lite/src/runtime/kernel/npu/reduce_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/reduce_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ #include "src/kernel_registry.h" using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; -using mindspore::schema::PrimitiveType_Reduce; +using mindspore::schema::PrimitiveType_ReduceFusion; using mindspore::schema::ReduceMode_ReduceMean; namespace mindspore::kernel { @@ -37,22 +37,14 @@ int ReduceNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const int ReduceNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, const std::vector<ge::Operator *> &npu_inputs) { - std::vector<int32_t> axes; - for (int i = 0; i < reduce_param_->num_axes_; i++) { - axes.push_back(reduce_param_->axes_[i]); - } - auto axes_op = new (std::nothrow) hiai::op::Const(name_ + "_reduce_axes"); - ge::TensorDesc axes_tensor_desc(ge::Shape({reduce_param_->num_axes_}), ge::FORMAT_NCHW, ge::DT_INT32); - ge::TensorPtr axes_tensor = std::make_shared<hiai::Tensor>(axes_tensor_desc); - axes_tensor->SetData(reinterpret_cast<uint8_t *>(axes.data()), reduce_param_->num_axes_ * sizeof(int32_t)); - axes_op->set_attr_value(axes_tensor); - auto reduce_mean_ = new (std::nothrow) hiai::op::ReduceMean(name_); if (reduce_mean_ == nullptr) { MS_LOG(ERROR) << "New reduce operator for op " << name_ << " failed."; return RET_ERROR; } - reduce_mean_->set_input_x(*npu_inputs[0]).set_input_axes(*axes_op).set_attr_keep_dims(reduce_param_->keep_dims_); + reduce_mean_->set_input_x(*npu_inputs[0]) + .set_input_axes(*npu_inputs[1]) + .set_attr_keep_dims(reduce_param_->keep_dims_); reduce_ = reduce_mean_; return RET_OK; } @@ -66,5 +58,5 @@ ReduceNPUKernel::~ReduceNPUKernel() { } } -REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Reduce, NPUKernelCreator<ReduceNPUKernel>) +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_ReduceFusion, NPUKernelCreator<ReduceNPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/npu/reduce_npu.h b/mindspore/lite/src/runtime/kernel/npu/reduce_npu.h index ca8c41e643..c618b7f70b 100644 --- a/mindspore/lite/src/runtime/kernel/npu/reduce_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/reduce_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,9 +24,8 @@ namespace mindspore::kernel { class ReduceNPUKernel : public NPUKernel { public: ReduceNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { reduce_param_ = reinterpret_cast<ReduceParameter *>(parameter); } ~ReduceNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc b/mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc index 8ad53a3318..0e31280e77 100644 --- a/mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/reshape_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +17,8 @@ #include "src/runtime/kernel/npu/reshape_npu.h" #include <memory> #include "src/kernel_registry.h" +#include "include/graph/op/all_ops.h" +#include "src/runtime/agent/npu/npu_converter_utils.h" using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; using mindspore::schema::PrimitiveType_Reshape; @@ -40,17 +42,7 @@ int ReshapeNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, return RET_ERROR; } op_->set_input_x(*npu_inputs[0]); - - auto shape_op = new (std::nothrow) hiai::op::Const(name_ + "_shape"); - std::vector<int> shape; - for (int i = 0; i < reshape_param_->shape_dim_; i++) { - shape.push_back(reshape_param_->shape_[i]); - } - ge::TensorDesc shape_tensor_desc(ge::Shape({reshape_param_->shape_dim_}), ge::FORMAT_NCHW, ge::DT_INT32); - ge::TensorPtr ai_shape_tensor = std::make_shared<hiai::Tensor>(shape_tensor_desc); - ai_shape_tensor->SetData(reinterpret_cast<uint8_t *>(shape.data()), reshape_param_->shape_dim_ * sizeof(int32_t)); - shape_op->set_attr_value(ai_shape_tensor); - op_->set_input_shape(*shape_op); + op_->set_input_shape(*npu_inputs[1]); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/npu/reshape_npu.h b/mindspore/lite/src/runtime/kernel/npu/reshape_npu.h index 9c44fa2da4..52d6170dbc 100644 --- a/mindspore/lite/src/runtime/kernel/npu/reshape_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/reshape_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,16 +17,15 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_RESHAPE_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_RESHAPE_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" #include "nnacl/reshape_parameter.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" namespace mindspore::kernel { class ReshapeNPUKernel : public NPUKernel { public: ReshapeNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { reshape_param_ = reinterpret_cast<ReshapeParameter *>(parameter); } ~ReshapeNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/resize_npu.cc b/mindspore/lite/src/runtime/kernel/npu/resize_npu.cc index e17f054d48..3ec8b9b05a 100644 --- a/mindspore/lite/src/runtime/kernel/npu/resize_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/resize_npu.cc @@ -16,6 +16,7 @@ #include "src/runtime/kernel/npu/resize_npu.h" #include <memory> +#include "include/graph/op/all_ops.h" #include "src/kernel_registry.h" #include "src/runtime/agent/npu/npu_converter_utils.h" diff --git a/mindspore/lite/src/runtime/kernel/npu/resize_npu.h b/mindspore/lite/src/runtime/kernel/npu/resize_npu.h index 6c15926932..698462dd87 100644 --- a/mindspore/lite/src/runtime/kernel/npu/resize_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/resize_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,17 +17,16 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_RESIZE_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_RESIZE_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" #include "nnacl/resize_parameter.h" -#include "src/ops/resize.h" +#include "nnacl/arithmetic.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" namespace mindspore::kernel { class ResizeNPUKernel : public NPUKernel { public: ResizeNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { resize_parameter_ = reinterpret_cast<ResizeParameter *>(parameter); } ~ResizeNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/scale_npu.cc b/mindspore/lite/src/runtime/kernel/npu/scale_npu.cc index 6ac66e4b43..cd4b981ce9 100644 --- a/mindspore/lite/src/runtime/kernel/npu/scale_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/scale_npu.cc @@ -19,7 +19,7 @@ using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; -using mindspore::schema::PrimitiveType_Scale; +using mindspore::schema::PrimitiveType_ScaleFusion; namespace mindspore::kernel { int ScaleNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, @@ -59,5 +59,5 @@ ScaleNPUKernel::~ScaleNPUKernel() { } } -REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Scale, NPUKernelCreator<ScaleNPUKernel>) +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_ScaleFusion, NPUKernelCreator<ScaleNPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/npu/scale_npu.h b/mindspore/lite/src/runtime/kernel/npu/scale_npu.h index 4ecb9169fa..b592fc31d9 100644 --- a/mindspore/lite/src/runtime/kernel/npu/scale_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/scale_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,16 +17,15 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SCALE_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SCALE_NPU_H_ #include <vector> -#include "include/graph/op/nn_defs.h" #include "nnacl/scale.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/nn_defs.h" namespace mindspore::kernel { class ScaleNPUKernel : public NPUKernel { public: ScaleNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { scale_parameter_ = reinterpret_cast<ScaleParameter *>(parameter); } ~ScaleNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/shape_npu.cc b/mindspore/lite/src/runtime/kernel/npu/shape_npu.cc index a1ed3544b8..150c157717 100644 --- a/mindspore/lite/src/runtime/kernel/npu/shape_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/shape_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ #include "src/runtime/kernel/npu/shape_npu.h" #include "src/kernel_registry.h" +#include "src/runtime/agent/npu/npu_converter_utils.h" using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; using mindspore::schema::PrimitiveType_Shape; diff --git a/mindspore/lite/src/runtime/kernel/npu/shape_npu.h b/mindspore/lite/src/runtime/kernel/npu/shape_npu.h index ea18bd363a..ac34225ffc 100644 --- a/mindspore/lite/src/runtime/kernel/npu/shape_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/shape_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,15 +17,14 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SHAPE_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SHAPE_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" namespace mindspore::kernel { class ShapeNPUKernel : public NPUKernel { public: ShapeNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) {} ~ShapeNPUKernel() override; int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, diff --git a/mindspore/lite/src/runtime/kernel/npu/slice_npu.cc b/mindspore/lite/src/runtime/kernel/npu/slice_npu.cc index 074e5c6eb0..3241135ce4 100644 --- a/mindspore/lite/src/runtime/kernel/npu/slice_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/slice_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,10 @@ #include "src/runtime/kernel/npu/slice_npu.h" #include "src/kernel_registry.h" +#include "src/runtime/agent/npu/npu_converter_utils.h" using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; -using mindspore::schema::PrimitiveType_Slice; +using mindspore::schema::PrimitiveType_SliceFusion; namespace mindspore::kernel { int SliceNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, @@ -49,5 +50,5 @@ SliceNPUKernel::~SliceNPUKernel() { } } -REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Slice, NPUKernelCreator<SliceNPUKernel>) +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_SliceFusion, NPUKernelCreator<SliceNPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/npu/slice_npu.h b/mindspore/lite/src/runtime/kernel/npu/slice_npu.h index ce00d3dff8..f5e35aaa56 100644 --- a/mindspore/lite/src/runtime/kernel/npu/slice_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/slice_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,16 +17,14 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SLICE_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SLICE_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" -#include "src/ops/slice.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" namespace mindspore::kernel { class SliceNPUKernel : public NPUKernel { public: SliceNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) {} ~SliceNPUKernel() override; int IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, diff --git a/mindspore/lite/src/runtime/kernel/npu/softmax_npu.cc b/mindspore/lite/src/runtime/kernel/npu/softmax_npu.cc index 606429e9bb..129ef75388 100644 --- a/mindspore/lite/src/runtime/kernel/npu/softmax_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/softmax_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,7 @@ #include "src/kernel_registry.h" using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; -using mindspore::schema::PrimitiveType_SoftMax; +using mindspore::schema::PrimitiveType_Softmax; namespace mindspore::kernel { int SoftmaxNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, @@ -52,5 +52,5 @@ SoftmaxNPUKernel::~SoftmaxNPUKernel() { } } -REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_SoftMax, NPUKernelCreator<SoftmaxNPUKernel>) +REG_KERNEL(kNPU, kNumberTypeFloat32, PrimitiveType_Softmax, NPUKernelCreator<SoftmaxNPUKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/npu/softmax_npu.h b/mindspore/lite/src/runtime/kernel/npu/softmax_npu.h index 05c865419a..9066434bb0 100644 --- a/mindspore/lite/src/runtime/kernel/npu/softmax_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/softmax_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,16 +17,15 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SOFTMAX_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SOFTMAX_NPU_H_ #include <vector> -#include "include/graph/op/nn_defs.h" #include "src/runtime/kernel/npu/npu_kernel.h" #include "nnacl/softmax_parameter.h" +#include "include/graph/op/nn_defs.h" namespace mindspore::kernel { class SoftmaxNPUKernel : public NPUKernel { public: SoftmaxNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { softmax_parameter_ = reinterpret_cast<SoftmaxParameter *>(parameter); } ~SoftmaxNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/split_npu.cc b/mindspore/lite/src/runtime/kernel/npu/split_npu.cc index b63f3d5d11..c279bff7ff 100644 --- a/mindspore/lite/src/runtime/kernel/npu/split_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/split_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,25 +35,25 @@ int SplitNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, cons MS_LOG(ERROR) << name_ << " op is nullptr"; return RET_ERROR; } - int size = split_->size_splits().size(); + int size = param_->num_split_; ge::TensorDesc size_splits_tensor_desc(ge::Shape({size}), ge::FORMAT_NCHW, ge::DT_INT32); ge::TensorPtr size_splits_tensor = std::make_shared<hiai::Tensor>(size_splits_tensor_desc); - size_splits_tensor->SetData(reinterpret_cast<uint8_t *>(split_->size_splits().data()), size * sizeof(int)); + size_splits_tensor->SetData(reinterpret_cast<uint8_t *>(param_->split_sizes_), size * sizeof(int)); auto size_splits = new hiai::op::Const(name_ + "_size"); size_splits->set_attr_value(size_splits_tensor); ge::TensorDesc split_dim_tensor_desc(ge::Shape({1}), ge::FORMAT_NCHW, ge::DT_INT32); ge::TensorPtr split_dim_tensor = std::make_shared<hiai::Tensor>(split_dim_tensor_desc); - vector<int32_t> split_dim_data_value = {split_->GetSplitDim()}; + vector<int32_t> split_dim_data_value = {param_->split_dim_}; split_dim_tensor->SetData(reinterpret_cast<uint8_t *>(split_dim_data_value.data()), 1 * sizeof(int)); auto split_dim = new hiai::op::Const(name_ + "_dim"); split_dim->set_attr_value(split_dim_tensor); op_->set_input_x(*npu_inputs[0]); - op_->set_attr_num_split(split_->GetNumberSplit()); + op_->set_attr_num_split(param_->num_split_); op_->set_input_split_dim(*split_dim); op_->set_input_size_splits(*size_splits); - op_->create_dynamic_output_y(split_->GetNumberSplit()); + op_->create_dynamic_output_y(param_->num_split_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/npu/split_npu.h b/mindspore/lite/src/runtime/kernel/npu/split_npu.h index 1ee47a4b05..f67700f4e9 100644 --- a/mindspore/lite/src/runtime/kernel/npu/split_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/split_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,17 +17,16 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SPLIT_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SPLIT_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" -#include "src/ops/split.h" +#include "nnacl/split_parameter.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" namespace mindspore::kernel { class SplitNPUKernel : public NPUKernel { public: SplitNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { - split_ = reinterpret_cast<const mindspore::lite::Split *>(primitive); + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { + param_ = reinterpret_cast<SplitParameter *>(parameter); } ~SplitNPUKernel() override; @@ -39,7 +38,7 @@ class SplitNPUKernel : public NPUKernel { private: hiai::op::SplitV *op_ = nullptr; - const mindspore::lite::Split *split_; + SplitParameter *param_; }; } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SPLIT_NPU_H_ diff --git a/mindspore/lite/src/runtime/kernel/npu/squeeze_npu.cc b/mindspore/lite/src/runtime/kernel/npu/squeeze_npu.cc index a70ed57b2e..189852af77 100644 --- a/mindspore/lite/src/runtime/kernel/npu/squeeze_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/squeeze_npu.cc @@ -34,12 +34,8 @@ int SqueezeNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &inputs, MS_LOG(ERROR) << "New squeeze npu operator for op " << name_ << " failed."; return RET_ERROR; } - std::vector<int64_t> axes; - for (int i = 0; i < axes_.size(); i++) { - axes.push_back(axes_[i]); - } op_->set_input_x(*npu_inputs[0]); - op_->set_attr_axis(axes); + op_->set_attr_axis(axes_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/npu/squeeze_npu.h b/mindspore/lite/src/runtime/kernel/npu/squeeze_npu.h index a0788959e5..948e466b4a 100644 --- a/mindspore/lite/src/runtime/kernel/npu/squeeze_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/squeeze_npu.h @@ -18,17 +18,19 @@ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SQUEEZE_NPU_H_ #include <vector> #include "include/graph/op/all_ops.h" -#include "src/ops/squeeze.h" +#include "nnacl/squeeze_parameter.h" #include "src/runtime/kernel/npu/npu_kernel.h" namespace mindspore::kernel { class SqueezeNPUKernel : public NPUKernel { public: SqueezeNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { - auto squeeze = reinterpret_cast<const mindspore::lite::Squeeze *>(primitive); - axes_ = squeeze->GetAxis(); + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { + param_ = reinterpret_cast<SqueezeParameter *>(parameter); + axes_.resize(param_->axis_size_); + for (int i = 0; i < param_->axis_size_; i++) { + axes_[i] = param_->axis_[i]; + } } ~SqueezeNPUKernel() override; @@ -40,7 +42,8 @@ class SqueezeNPUKernel : public NPUKernel { private: hiai::op::Squeeze *op_ = nullptr; - vector<int> axes_; + vector<int64_t> axes_; + SqueezeParameter *param_; }; } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_SQUEEZE_NPU_H_ diff --git a/mindspore/lite/src/runtime/kernel/npu/strided_slice_npu.cc b/mindspore/lite/src/runtime/kernel/npu/strided_slice_npu.cc index 05fe9555cf..fe8ac09f76 100644 --- a/mindspore/lite/src/runtime/kernel/npu/strided_slice_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/strided_slice_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ #include "src/runtime/kernel/npu/strided_slice_npu.h" #include "src/kernel_registry.h" +#include "src/runtime/agent/npu/npu_converter_utils.h" using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; using mindspore::schema::PrimitiveType_StridedSlice; @@ -58,11 +59,11 @@ int StridedSliceNPUKernel::SetNPUInputs(const std::vector<lite::Tensor *> &input } else { op_->set_input_strides(*npu_inputs[3]); } - op_->set_attr_begin_mask(strided_slice_->GetBeginMask()); - op_->set_attr_ellipsis_mask(strided_slice_->GetEllipsisMask()); - op_->set_attr_end_mask(strided_slice_->GetEndMask()); - op_->set_attr_shrink_axis_mask(strided_slice_->GetShrinkAxisMask()); - op_->set_attr_new_axis_mask(strided_slice_->GetNewAxisMask()); + op_->set_attr_begin_mask(param_->begins_mask_); + op_->set_attr_ellipsis_mask(param_->ellipsisMask_); + op_->set_attr_end_mask(param_->ends_mask_); + op_->set_attr_shrink_axis_mask(param_->shrinkAxisMask_); + op_->set_attr_new_axis_mask(param_->newAxisMask_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/npu/strided_slice_npu.h b/mindspore/lite/src/runtime/kernel/npu/strided_slice_npu.h index 59d52d06db..d149ebb005 100644 --- a/mindspore/lite/src/runtime/kernel/npu/strided_slice_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/strided_slice_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,18 +17,16 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_STRIDEDSLICE_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_STRIDEDSLICE_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" -#include "src/ops/strided_slice.h" #include "nnacl/strided_slice_parameter.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" namespace mindspore::kernel { class StridedSliceNPUKernel : public NPUKernel { public: StridedSliceNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { - strided_slice_ = reinterpret_cast<const mindspore::lite::StridedSlice *>(primitive); + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { + param_ = reinterpret_cast<StridedSliceParameter *>(parameter); } ~StridedSliceNPUKernel() override; @@ -40,7 +38,7 @@ class StridedSliceNPUKernel : public NPUKernel { private: hiai::op::StridedSlice *op_ = nullptr; - const mindspore::lite::StridedSlice *strided_slice_; + StridedSliceParameter *param_; }; } // namespace mindspore::kernel #endif // MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_STRIDEDSLICE_NPU_H_ diff --git a/mindspore/lite/src/runtime/kernel/npu/transpose_npu.cc b/mindspore/lite/src/runtime/kernel/npu/transpose_npu.cc index a1c07d660d..c4ac33c6b3 100644 --- a/mindspore/lite/src/runtime/kernel/npu/transpose_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/transpose_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,10 +16,9 @@ #include "src/runtime/kernel/npu/transpose_npu.h" #include "src/kernel_registry.h" +#include "src/runtime/agent/npu/npu_converter_utils.h" using mindspore::kernel::KERNEL_ARCH::kNPU; using mindspore::lite::KernelRegistrar; -using mindspore::schema::PrimitiveType_Nchw2Nhwc; -using mindspore::schema::PrimitiveType_Nhwc2Nchw; using mindspore::schema::PrimitiveType_Transpose; namespace mindspore::kernel { @@ -29,6 +28,15 @@ int TransposeNPUKernel::IsSupport(const std::vector<lite::Tensor *> &inputs, con MS_LOG(ERROR) << "Unsupported conjugate transpose."; return RET_ERROR; } + if (inputs.size() >= 2 && inputs[1]->data_c() != nullptr) { + for (int i = 0; i < inputs[1]->ElementsNum(); i++) { + perm_.push_back(static_cast<int *>(inputs[1]->data_c())[i]); + } + } else { + MS_LOG(WARNING) << "NPU perm is attribute."; + return RET_ERROR; + } + return RET_ERROR; } diff --git a/mindspore/lite/src/runtime/kernel/npu/transpose_npu.h b/mindspore/lite/src/runtime/kernel/npu/transpose_npu.h index 3df2b34097..ea5ffff83e 100644 --- a/mindspore/lite/src/runtime/kernel/npu/transpose_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/transpose_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,26 +17,18 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_TRANSPOSE_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_TRANSPOSE_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" #include "nnacl/transpose.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" namespace mindspore::kernel { class TransposeNPUKernel : public NPUKernel { public: TransposeNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { - if (primitive->Type() == schema::PrimitiveType_Transpose) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { + if (parameter->type_ == schema::PrimitiveType_Transpose) { auto transpose_parameter = reinterpret_cast<TransposeParameter *>(parameter); conjugate_ = transpose_parameter->conjugate_; - for (int i = 0; i < transpose_parameter->num_axes_; i++) { - perm_.push_back(transpose_parameter->perm_[i]); - } - } else if (primitive->Type() == schema::PrimitiveType_Nchw2Nhwc) { - perm_ = {0, 2, 3, 1}; - } else if (primitive->Type() == schema::PrimitiveType_Nhwc2Nchw) { - perm_ = {0, 3, 1, 2}; } } ~TransposeNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/npu/unsqueeze_npu.cc b/mindspore/lite/src/runtime/kernel/npu/unsqueeze_npu.cc index cd9c016679..fdcdbdd3d1 100644 --- a/mindspore/lite/src/runtime/kernel/npu/unsqueeze_npu.cc +++ b/mindspore/lite/src/runtime/kernel/npu/unsqueeze_npu.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/src/runtime/kernel/npu/unsqueeze_npu.h b/mindspore/lite/src/runtime/kernel/npu/unsqueeze_npu.h index 28b4641f82..8078fdf2c7 100644 --- a/mindspore/lite/src/runtime/kernel/npu/unsqueeze_npu.h +++ b/mindspore/lite/src/runtime/kernel/npu/unsqueeze_npu.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,18 +17,17 @@ #ifndef MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_UNSQUEEZE_NPU_H_ #define MINDSPORE_LITE_SRC_RUNTIME_KERNEL_NPU_UNSQUEEZE_NPU_H_ #include <vector> -#include "include/graph/op/all_ops.h" -#include "src/ops/unsqueeze.h" +#include "nnacl/unsqueeze_parameter.h" #include "src/runtime/kernel/npu/npu_kernel.h" +#include "include/graph/op/all_ops.h" namespace mindspore::kernel { class UnsqueezeNPUKernel : public NPUKernel { public: UnsqueezeNPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : NPUKernel(parameter, inputs, outputs, ctx, primitive) { - auto unsqueeze = reinterpret_cast<const mindspore::lite::Unsqueeze *>(primitive); - axis_ = unsqueeze->GetAxis(); + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : NPUKernel(parameter, inputs, outputs, ctx) { + auto *param = reinterpret_cast<UnSqueezeParameter *>(parameter); + axis_.insert(axis_.begin(), param->dims_, param->dims_ + param->num_dim_); } ~UnsqueezeNPUKernel() override; diff --git a/mindspore/lite/src/runtime/kernel/opencl/cl/activation.cl b/mindspore/lite/src/runtime/kernel/opencl/cl/activation.cl index 2a688df136..63f042c8a4 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/cl/activation.cl +++ b/mindspore/lite/src/runtime/kernel/opencl/cl/activation.cl @@ -93,14 +93,14 @@ __kernel void HSwish(__read_only image2d_t input, __write_only image2d_t output, } __kernel void HSigmoid(__read_only image2d_t input, __write_only image2d_t output, const int2 img_shape) { - int X = get_global_id(0); // w*c - int Y = get_global_id(1); // n*h - if (X >= img_shape.x || Y >= img_shape.y) return; - FLT4 temp = READ_IMAGE(input, smp_zero, (int2)(X, Y)); - FLT4 result = (FLT4)(0.0f, 0.0f, 0.0f, 0.0f); - result.x = temp.x <= -3 ? 0 : (temp.x >= 3 ? 1 : temp.x / 6 + 0.5f); - result.y = temp.y <= -3 ? 0 : (temp.y >= 3 ? 1 : temp.y / 6 + 0.5f); - result.z = temp.z <= -3 ? 0 : (temp.z >= 3 ? 1 : temp.z / 6 + 0.5f); - result.w = temp.w <= -3 ? 0 : (temp.w >= 3 ? 1 : temp.w / 6 + 0.5f); - WRITE_IMAGE(output, (int2)(X, Y), result); + int X = get_global_id(0); // w*c + int Y = get_global_id(1); // n*h + if (X >= img_shape.x || Y >= img_shape.y) return; + FLT4 temp = READ_IMAGE(input, smp_zero, (int2)(X, Y)); + FLT4 result = (FLT4)(0.0f, 0.0f, 0.0f, 0.0f); + result.x = temp.x <= -3 ? 0 : (temp.x >= 3 ? 1 : temp.x / 6 + 0.5f); + result.y = temp.y <= -3 ? 0 : (temp.y >= 3 ? 1 : temp.y / 6 + 0.5f); + result.z = temp.z <= -3 ? 0 : (temp.z >= 3 ? 1 : temp.z / 6 + 0.5f); + result.w = temp.w <= -3 ? 0 : (temp.w >= 3 ? 1 : temp.w / 6 + 0.5f); + WRITE_IMAGE(output, (int2)(X, Y), result); } diff --git a/mindspore/lite/src/runtime/kernel/opencl/cl/arithmeticself.cl b/mindspore/lite/src/runtime/kernel/opencl/cl/arithmeticself.cl index 4971f9f9fe..6a167fab32 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/cl/arithmeticself.cl +++ b/mindspore/lite/src/runtime/kernel/opencl/cl/arithmeticself.cl @@ -208,4 +208,3 @@ __kernel void ArithmeticSelf_ElementRound_NHWC4(__read_only image2d_t input0, __ result.w = round(result.w); WRITE_IMAGE(output, (int2)((Y)*output_shape.w + Z, (X)), result); } - diff --git a/mindspore/lite/src/runtime/kernel/opencl/cl/cast.cl b/mindspore/lite/src/runtime/kernel/opencl/cl/cast.cl index ff3a3971bb..76965ad43e 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/cl/cast.cl +++ b/mindspore/lite/src/runtime/kernel/opencl/cl/cast.cl @@ -1,46 +1,43 @@ #pragma OPENCL EXTENSION cl_khr_fp16 : enable + __constant sampler_t smp_none = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_NONE | CLK_FILTER_NEAREST; -__kernel void Cast_Fp32ToFp16_NHWC4(__read_only image2d_t input0, __write_only image2d_t output, int4 output_shape) { - int X = get_global_id(0); // N*H - int Y = get_global_id(1); // W - int Z = get_global_id(2); // c/4 - if (X >= output_shape.x * output_shape.y || Y >= output_shape.z || Z >= output_shape.w) { +__kernel void Cast_fp32_to_fp16(__read_only image2d_t input, __write_only image2d_t output, int2 XY) { + int x = get_global_id(0); + int y = get_global_id(1); + if (x >= XY.x || y >= XY.y) { return; } - half4 result = convert_half4(READ_IMAGE(input0, smp_none, (int2)((Y)*output_shape.w + Z, (X)))); - write_imageh(output, (int2)((Y)*output_shape.w + Z, (X)), result); + half4 result = convert_half4(READ_IMAGE(input, smp_none, (int2)(x, y))); + write_imageh(output, (int2)(x, y), result); } -__kernel void Cast_Fp32ToFp16_NC4HW4(__read_only image2d_t input0, __write_only image2d_t output, int4 output_shape) { - int X = get_global_id(0); // N*H - int Y = get_global_id(1); // W - int Z = get_global_id(2); // c/4 - if (X >= output_shape.x * output_shape.y || Y >= output_shape.z || Z >= output_shape.w) { +__kernel void Cast_fp32_to_fp32(__read_only image2d_t input, __write_only image2d_t output, int2 XY) { + int x = get_global_id(0); + int y = get_global_id(1); + if (x >= XY.x || y >= XY.y) { return; } - half4 result = convert_half4(READ_IMAGE(input0, smp_none, (int2)((Y), (Z * output_shape.y + X)))); - write_imageh(output, (int2)((Y), (Z * output_shape.y + X)), result); + float4 result = READ_IMAGE(input, smp_none, (int2)(x, y)); + write_imageh(output, (int2)(x, y), result); } -__kernel void Cast_Fp16ToFp32_NHWC4(__read_only image2d_t input0, __write_only image2d_t output, int4 output_shape) { - int X = get_global_id(0); // N*H - int Y = get_global_id(1); // W - int Z = get_global_id(2); // c/4 - if (X >= output_shape.x * output_shape.y || Y >= output_shape.z || Z >= output_shape.w) { +__kernel void Cast_fp16_to_fp16(__read_only image2d_t input, __write_only image2d_t output, int2 XY) { + int x = get_global_id(0); + int y = get_global_id(1); + if (x >= XY.x || y >= XY.y) { return; } - float4 result = convert_float4(READ_IMAGE(input0, smp_none, (int2)((Y)*output_shape.w + Z, (X)))); - WRITE_IMAGE(output, (int2)((Y)*output_shape.w + Z, (X)), result); + half4 result = READ_IMAGE(input, smp_none, (int2)(x, y)); + write_imageh(output, (int2)(x, y), result); } -__kernel void Cast_Fp16ToFp32_NC4HW4(__read_only image2d_t input0, __write_only image2d_t output, int4 output_shape) { - int X = get_global_id(0); // N*H - int Y = get_global_id(1); // W - int Z = get_global_id(2); // c/4 - if (X >= output_shape.x * output_shape.y || Y >= output_shape.z || Z >= output_shape.w) { +__kernel void Cast_fp16_to_fp32(__read_only image2d_t input, __write_only image2d_t output, int2 XY) { + int x = get_global_id(0); + int y = get_global_id(1); + if (x >= XY.x || y >= XY.y) { return; } - float4 result = convert_float4(READ_IMAGE(input0, smp_none, (int2)((Y), (Z * output_shape.y + X)))); - WRITE_IMAGE(output, (int2)((Y), (Z * output_shape.y + X)), result); + float4 result = convert_float4(READ_IMAGE(input, smp_none, (int2)(x, y))); + write_imageh(output, (int2)(x, y), result); } diff --git a/mindspore/lite/src/runtime/kernel/opencl/cl/conv2d_transpose.cl b/mindspore/lite/src/runtime/kernel/opencl/cl/conv2d_transpose.cl index 2c2afd7fc7..b67e19383a 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/cl/conv2d_transpose.cl +++ b/mindspore/lite/src/runtime/kernel/opencl/cl/conv2d_transpose.cl @@ -1,8 +1,10 @@ #pragma OPENCL EXTENSION cl_khr_fp16 : enable + __constant sampler_t smp_zero = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST; -__kernel void conv2d_transpose_NHWC4(__read_only image2d_t src_data, __write_only image2d_t dst_data, - __global FLT16 *weight, __read_only image2d_t biases, int2 kernel_size, - int2 stride, int2 padding, int4 src_size, int4 dst_size, int act_type) { + +__kernel void conv2d_transpose(__read_only image2d_t src_data, __write_only image2d_t dst_data, __global FLT16 *weight, + __read_only image2d_t biases, int2 kernel_size, int2 stride, int2 padding, int4 src_size, + int4 dst_size, int act_type) { int dst_h = get_global_id(0); int rem_h = dst_h % stride.x; int ceil_h = dst_h / stride.x; diff --git a/mindspore/lite/src/runtime/kernel/opencl/cl/reduce.cl b/mindspore/lite/src/runtime/kernel/opencl/cl/reduce.cl index cecd0ec859..c013d80930 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/cl/reduce.cl +++ b/mindspore/lite/src/runtime/kernel/opencl/cl/reduce.cl @@ -296,28 +296,16 @@ __kernel void LocalWCSumSquare(__read_only image2d_t src_data, __write_only imag #define DoSum(A, B) A += B #define InitSum 0.f -GlobalHW(Sum) -GlobalWC(Sum) -LocalHW(Sum) -LocalWC(Sum) +GlobalHW(Sum) GlobalWC(Sum) LocalHW(Sum) LocalWC(Sum) #define DoMin(A, B) A = min(A, B) #define InitMin 10000.f -GlobalHW(Min) -GlobalWC(Min) -LocalHW(Min) -LocalWC(Min) + GlobalHW(Min) GlobalWC(Min) LocalHW(Min) LocalWC(Min) #define DoMax(A, B) A = max(A, B) #define InitMax -10000.f -GlobalHW(Max) -GlobalWC(Max) -LocalHW(Max) -LocalWC(Max) + GlobalHW(Max) GlobalWC(Max) LocalHW(Max) LocalWC(Max) #define DoProd(A, B) A *= B #define InitProd 1.f -GlobalHW(Prod) -GlobalWC(Prod) -LocalHW(Prod) -LocalWC(Prod) + GlobalHW(Prod) GlobalWC(Prod) LocalHW(Prod) LocalWC(Prod) diff --git a/mindspore/lite/src/runtime/kernel/opencl/cl/softmax.cl b/mindspore/lite/src/runtime/kernel/opencl/cl/softmax.cl index 0fa9c06a72..8d2b572141 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/cl/softmax.cl +++ b/mindspore/lite/src/runtime/kernel/opencl/cl/softmax.cl @@ -4,7 +4,7 @@ #define divide_no_check(a, b) (a / b) __constant sampler_t smp_none = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_NONE | CLK_FILTER_NEAREST; __constant sampler_t smp_zero = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_NEAREST; -__kernel void SoftMaxAxis3_NHWC4(__read_only image2d_t input, __write_only image2d_t output, const float4 mask, +__kernel void SoftmaxAxis3_NHWC4(__read_only image2d_t input, __write_only image2d_t output, const float4 mask, const int4 input_shape) { int X = get_global_id(1); // H int Y = get_global_id(0); // W @@ -47,7 +47,7 @@ __kernel void SoftMaxAxis3_NHWC4(__read_only image2d_t input, __write_only image WRITE_IMAGEOUT(output, (int2)(Y * C4 + C4 - 1, X), OUT_FLT4(result)); } -__kernel void SoftMaxAxis1_NHWC4(__read_only image2d_t input, __write_only image2d_t output, const float4 mask, +__kernel void SoftmaxAxis1_NHWC4(__read_only image2d_t input, __write_only image2d_t output, const float4 mask, const int4 input_shape) { int X = get_global_id(1); // W int Y = get_global_id(0); // C4 @@ -69,7 +69,7 @@ __kernel void SoftMaxAxis1_NHWC4(__read_only image2d_t input, __write_only image } } -__kernel void SoftMaxAxis2_NHWC4(__read_only image2d_t input, __write_only image2d_t output, const float4 mask, +__kernel void SoftmaxAxis2_NHWC4(__read_only image2d_t input, __write_only image2d_t output, const float4 mask, const int4 input_shape) { int X = get_global_id(1); // H int Y = get_global_id(0); // C4 @@ -91,7 +91,7 @@ __kernel void SoftMaxAxis2_NHWC4(__read_only image2d_t input, __write_only image } } -__kernel void SoftMax1x1_NHWC4(__read_only image2d_t input, __write_only image2d_t output, const float4 mask, +__kernel void Softmax1x1_NHWC4(__read_only image2d_t input, __write_only image2d_t output, const float4 mask, const int4 input_shape) { int tid = get_local_id(0); int C4 = input_shape.w; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.h index f50e4bbf03..b3edda57b6 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/activation.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class ActivationOpenCLKernel : public OpenCLKernel { public: ActivationOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : OpenCLKernel(parameter, inputs, outputs, ctx, primitive), + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : OpenCLKernel(parameter, inputs, outputs, ctx), type_(reinterpret_cast<ActivationParameter *>(parameter)->type_), alpha_(reinterpret_cast<ActivationParameter *>(parameter)->alpha_) {} ~ActivationOpenCLKernel() override = default; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.cc index 8614f9b2b9..e8282bebd2 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.cc @@ -28,8 +28,8 @@ using mindspore::kernel::KERNEL_ARCH::kGPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_ArgMax; -using mindspore::schema::PrimitiveType_ArgMin; +using mindspore::schema::PrimitiveType_ArgMaxFusion; +using mindspore::schema::PrimitiveType_ArgMinFusion; namespace mindspore::kernel { @@ -152,7 +152,7 @@ int ArgMinMaxOpenCLKernel::Prepare() { param->dims_size_ = in_tensors_[0]->shape().size(); param->axis_ = (param->axis_ + param->dims_size_) % param->dims_size_; param->axis_ = GetBroadcastGpuAxis(param->dims_size_, param->axis_); - param->get_max_ = (Type() == PrimitiveType_ArgMax); + param->get_max_ = (Type() == PrimitiveType_ArgMaxFusion); param->keep_dims_ = param->keep_dims_ || param->topk_ > 1 || in_tensors_[0]->shape().size() == out_tensors_[0]->shape().size(); @@ -171,8 +171,8 @@ int ArgMinMaxOpenCLKernel::Run() { return RET_OK; } -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_ArgMin, OpenCLKernelCreator<ArgMinMaxOpenCLKernel>); -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_ArgMin, OpenCLKernelCreator<ArgMinMaxOpenCLKernel>); -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_ArgMax, OpenCLKernelCreator<ArgMinMaxOpenCLKernel>); -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_ArgMax, OpenCLKernelCreator<ArgMinMaxOpenCLKernel>); +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_ArgMinFusion, OpenCLKernelCreator<ArgMinMaxOpenCLKernel>); +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_ArgMinFusion, OpenCLKernelCreator<ArgMinMaxOpenCLKernel>); +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_ArgMaxFusion, OpenCLKernelCreator<ArgMinMaxOpenCLKernel>); +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_ArgMaxFusion, OpenCLKernelCreator<ArgMinMaxOpenCLKernel>); } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.h index feb5782f97..1a8fbc827c 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/argminmax.h @@ -41,8 +41,8 @@ class ArgMinMaxOpenCLKernel : public OpenCLKernel { private: void *buff_{nullptr}; void *ids_{nullptr}; - GpuTensorInfo im_in_{GpuTensorInfo(nullptr)}; - GpuTensorInfo im_out_{GpuTensorInfo(nullptr)}; + GpuTensorInfo im_in_; + GpuTensorInfo im_out_; cl_int4 src_size_; cl_int4 cus_size_; cl_int4 strides_; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc index 34d258ca16..3e22ce36d1 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic.cc @@ -35,6 +35,9 @@ using mindspore::lite::opencl::MemType; using mindspore::schema::ActivationType_NO_ACTIVATION; using mindspore::schema::ActivationType_RELU; using mindspore::schema::ActivationType_RELU6; +using mindspore::schema::EltwiseMode_MAXIMUM; +using mindspore::schema::EltwiseMode_PROD; +using mindspore::schema::EltwiseMode_SUM; using mindspore::schema::PrimitiveType_BiasAdd; using mindspore::schema::PrimitiveType_Eltwise; @@ -50,6 +53,13 @@ int ArithmeticOpenCLKernel::CheckSpecs() { MS_LOG(ERROR) << "UnSupported Operator: " << schema::EnumNamePrimitiveType(Type()); return RET_ERROR; } + if (Type() == schema::PrimitiveType_Eltwise) { + auto mode = param->eltwise_mode_; + if (mode != EltwiseMode_PROD && mode != EltwiseMode_SUM && mode != EltwiseMode_MAXIMUM) { + MS_LOG(ERROR) << "Eltwise mode not support, mode:" << mode; + return RET_ERROR; + } + } if (!(param->activation_type_ == ActivationType_NO_ACTIVATION || param->activation_type_ == ActivationType_RELU || param->activation_type_ == ActivationType_RELU6)) { MS_LOG(ERROR) << "Unsupported activation type " << param->activation_type_; @@ -130,7 +140,34 @@ int ArithmeticOpenCLKernel::Prepare() { } element_flag_ = !param->broadcasting_; kernel_name_ = param->broadcasting_ ? "BroadcastNHWC4" : "Element"; - kernel_name_ += schema::EnumNamePrimitiveType(Type()); + switch (Type()) { + case PrimitiveType_MulFusion: + kernel_name_ += "Mul"; + break; + case PrimitiveType_AddFusion: + kernel_name_ += "Add"; + break; + case PrimitiveType_SubFusion: + kernel_name_ += "Sub"; + break; + case PrimitiveType_DivFusion: + kernel_name_ += "Div"; + break; + case PrimitiveType_Eltwise: { + auto mode = param->eltwise_mode_; + if (mode == EltwiseMode_PROD) { + kernel_name_ += "Mul"; + } else if (mode == EltwiseMode_SUM) { + kernel_name_ += "Add"; + } else if (mode == EltwiseMode_MAXIMUM) { + kernel_name_ += "Maximum"; + } + break; + } + default: + kernel_name_ += schema::EnumNamePrimitiveType(Type()); + } + if (param->activation_type_ == ActivationType_RELU) { activation_min_ = 0.f; } else if (param->activation_type_ == ActivationType_RELU6) { @@ -169,10 +206,10 @@ int ArithmeticOpenCLKernel::Run() { return RET_OK; } -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Mul, OpenCLKernelCreator<ArithmeticOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Add, OpenCLKernelCreator<ArithmeticOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Sub, OpenCLKernelCreator<ArithmeticOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Div, OpenCLKernelCreator<ArithmeticOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_MulFusion, OpenCLKernelCreator<ArithmeticOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_AddFusion, OpenCLKernelCreator<ArithmeticOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_SubFusion, OpenCLKernelCreator<ArithmeticOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_DivFusion, OpenCLKernelCreator<ArithmeticOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_LogicalAnd, OpenCLKernelCreator<ArithmeticOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_LogicalOr, OpenCLKernelCreator<ArithmeticOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Maximum, OpenCLKernelCreator<ArithmeticOpenCLKernel>) @@ -188,10 +225,10 @@ REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Greater, OpenCLKernelCreator< REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_GreaterEqual, OpenCLKernelCreator<ArithmeticOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Eltwise, OpenCLKernelCreator<ArithmeticOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_BiasAdd, OpenCLKernelCreator<ArithmeticOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Mul, OpenCLKernelCreator<ArithmeticOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Add, OpenCLKernelCreator<ArithmeticOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Sub, OpenCLKernelCreator<ArithmeticOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Div, OpenCLKernelCreator<ArithmeticOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_MulFusion, OpenCLKernelCreator<ArithmeticOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_AddFusion, OpenCLKernelCreator<ArithmeticOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_SubFusion, OpenCLKernelCreator<ArithmeticOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_DivFusion, OpenCLKernelCreator<ArithmeticOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_LogicalAnd, OpenCLKernelCreator<ArithmeticOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_LogicalOr, OpenCLKernelCreator<ArithmeticOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Maximum, OpenCLKernelCreator<ArithmeticOpenCLKernel>) diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc index 642ac24bc2..496c11e298 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.cc @@ -80,7 +80,12 @@ void ArithmeticSelfOpenCLKernel::SetGlobalLocal() { } int ArithmeticSelfOpenCLKernel::Prepare() { - std::string kernel_name = "ArithmeticSelf_Element" + std::string(schema::EnumNamePrimitiveType(Type())) + "_NHWC4"; + std::string kernel_name = "ArithmeticSelf_Element"; + if (Type() == schema::PrimitiveType_ExpFusion) { + kernel_name += "Exp_NHWC4"; + } else { + kernel_name += std::string(schema::EnumNamePrimitiveType(Type())) + "_NHWC4"; + } MS_LOG(DEBUG) << "execute kernel name : " << kernel_name; std::string program_name = "ArithmeticSelf"; ocl_runtime_->LoadSource(program_name, arithmeticself_source); @@ -101,7 +106,7 @@ int ArithmeticSelfOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Abs, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Ceil, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Cos, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Exp, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_ExpFusion, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Floor, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Log, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_LogicalNot, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) @@ -114,7 +119,7 @@ REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Square, OpenCLKernelCreator<A REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Abs, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Ceil, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Cos, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Exp, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_ExpFusion, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Floor, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Log, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_LogicalNot, OpenCLKernelCreator<ArithmeticSelfOpenCLKernel>) diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.h index 6e2988b94c..daaa3e3eb0 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/arithmetic_self.h @@ -26,7 +26,7 @@ using mindspore::schema::PrimitiveType_Abs; using mindspore::schema::PrimitiveType_Ceil; using mindspore::schema::PrimitiveType_Cos; using mindspore::schema::PrimitiveType_Eltwise; -using mindspore::schema::PrimitiveType_Exp; +using mindspore::schema::PrimitiveType_ExpFusion; using mindspore::schema::PrimitiveType_Floor; using mindspore::schema::PrimitiveType_Log; using mindspore::schema::PrimitiveType_LogicalNot; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.cc index 669c9f0c1c..8ae12f75c8 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.cc @@ -17,6 +17,7 @@ #include <cstring> #include <algorithm> #include <set> +#include <map> #include <string> #include "src/kernel_registry.h" #include "src/runtime/kernel/opencl/kernel/cast.h" @@ -31,74 +32,48 @@ using mindspore::schema::PrimitiveType_Cast; namespace mindspore::kernel { -int CastOpenCLKernel::GetKernelName(std::string *kernel_name, CastParameter *param) { - if (param->src_type_ == kNumberTypeFloat32 && param->dst_type_ == kNumberTypeFloat16) { - kernel_name[0] += "_Fp32ToFp16"; - } else if (param->src_type_ == kNumberTypeFloat16 && param->dst_type_ == kNumberTypeFloat32) { - kernel_name[0] += "_Fp16ToFp32"; - } else { - MS_LOG(ERROR) << "unsupported convert format from : " << param->src_type_ << "to " << param->dst_type_; - return RET_ERROR; - } - return RET_OK; -} - int CastOpenCLKernel::CheckSpecs() { - if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + // the 2nd tensor is DstType + if (in_tensors_.size() != 2 || out_tensors_.size() != 1) { MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } - if (in_tensors_.at(0)->shape().size() == 4) { - MS_LOG(ERROR) << "The dim of in_tensors->shape must be 4 but your dim is : " << in_tensors_.at(0)->shape().size(); + if (in_tensors_.front()->shape() != out_tensors_.front()->shape()) { + MS_LOG(ERROR) << "input shape must be equal to output shape"; return RET_ERROR; } + auto input_dtype = in_tensors_.front()->data_type(); + if (input_dtype != kNumberTypeFloat32 && input_dtype != kNumberTypeFloat16) { + MS_LOG(ERROR) << "input dtype must be float32/float16"; + } + auto output_dtype = out_tensors_.front()->data_type(); + if (output_dtype != kNumberTypeFloat32 && output_dtype != kNumberTypeFloat16) { + MS_LOG(ERROR) << "output dtype must be float32/float16"; + } return RET_OK; } void CastOpenCLKernel::SetConstArgs() { - auto input_shape = in_tensors_[0]->shape(); - cl_int4 input_shape_ = {input_shape[0], input_shape[1], input_shape[2], UP_DIV(input_shape[3], C4NUM)}; - int arg_cn = 2; - ocl_runtime_->SetKernelArg(kernel_, arg_cn++, input_shape_); -} - -void CastGetWorkGroup(const std::vector<size_t> &global, std::vector<size_t> *local, int max_size) { - const int max_divider = 8; - const int max_x = 4, max_y = 8; - int x = std::min(GetMaxDivisorStrategy1(global[0], max_divider), max_x); - int yz = max_size / x; - int y = std::min(std::min(GetMaxDivisorStrategy1(global[1], max_divider), yz), max_y); - int z = std::min(yz / y, static_cast<int>(UP_DIV(global[2], 2))); - - local->clear(); - local->push_back(x); - local->push_back(y); - local->push_back(z); + cl_int4 shape = {static_cast<int>(shape_.width), static_cast<int>(shape_.height)}; + ocl_runtime_->SetKernelArg(kernel_, 2, shape); } void CastOpenCLKernel::SetGlobalLocal() { - auto input_shape = in_tensors_[0]->shape(); - uint32_t OH = input_shape[1]; - uint32_t OW = input_shape[2]; - uint32_t OC = UP_DIV(input_shape[3], C4NUM); - - const std::vector<size_t> &max_global = ocl_runtime_->GetWorkItemSize(); - local_size_ = {1, 1, 1}; // init local - global_size_ = {OH, OW, OC}; - CastGetWorkGroup(global_size_, &local_size_, max_global[0]); - OpenCLKernel::AlignGlobalLocal(global_size_, local_size_); + global_size_ = {shape_.width, shape_.height}; + OpenCLKernel::AlignGlobalLocal(global_size_, {}); } int CastOpenCLKernel::Prepare() { - auto param = reinterpret_cast<CastParameter *>(this->op_parameter_); - std::string kernel_name = "Cast"; - GetKernelName(&kernel_name, param); - kernel_name += "_NHWC4"; - std::string source = cast_source; - std::string program_name = "cast"; - ocl_runtime_->LoadSource(program_name, source); + shape_ = GpuTensorInfo(in_tensors_.front()); + std::map<int, std::string> dtype_names = { + {kNumberTypeFloat32, "fp32"}, + {kNumberTypeFloat16, "fp16"}, + }; + std::string program_name = "Cast"; + std::string kernel_name = + "Cast_" + dtype_names[in_tensors_.front()->data_type()] + "_to_" + dtype_names[out_tensors_.front()->data_type()]; + ocl_runtime_->LoadSource(program_name, cast_source); ocl_runtime_->BuildKernel(kernel_, program_name, kernel_name); - MS_LOG(DEBUG) << kernel_name << " Init Done!"; SetConstArgs(); SetGlobalLocal(); return RET_OK; @@ -106,9 +81,8 @@ int CastOpenCLKernel::Prepare() { int CastOpenCLKernel::Run() { MS_LOG(DEBUG) << this->name() << " Running! "; - int arg_cn = 0; - ocl_runtime_->SetKernelArg(kernel_, arg_cn++, in_tensors_[0]->data_c()); // input tensor - ocl_runtime_->SetKernelArg(kernel_, arg_cn++, out_tensors_[0]->data_c()); // out tensor + ocl_runtime_->SetKernelArg(kernel_, 0, in_tensors_.front()->data_c()); + ocl_runtime_->SetKernelArg(kernel_, 1, out_tensors_.front()->data_c()); ocl_runtime_->RunKernel(kernel_, global_range_, local_range_, nullptr, &event_); return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.h index 44e50ed06d..d50e81b4ea 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/cast.h @@ -38,7 +38,7 @@ class CastOpenCLKernel : public OpenCLKernel { int Run() override; private: - int GetKernelName(std::string *kernel_name, CastParameter *param); + GpuTensorInfo shape_; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d.cc index 540cebc7f9..95aa5a87ae 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d.cc @@ -23,6 +23,7 @@ #include "schema/ops_generated.h" #include "src/common/utils.h" #include "src/runtime/kernel/opencl/utils.h" +#include "src/runtime/kernel/opencl/kernel/depthwise_conv2d.h" #include "src/runtime/kernel/opencl/kernel/fullconnection.h" #include "src/runtime/kernel/opencl/kernel/winograd.h" #include "src/runtime/kernel/opencl/cl/conv2d.cl.inc" @@ -36,7 +37,7 @@ using mindspore::schema::ActivationType_RELU; using mindspore::schema::ActivationType_RELU6; using mindspore::schema::ActivationType_SIGMOID; using mindspore::schema::ActivationType_TANH; -using mindspore::schema::PrimitiveType_Conv2D; +using mindspore::schema::PrimitiveType_Conv2DFusion; using mindspore::schema::PrimitiveType_FullConnection; namespace mindspore::kernel { @@ -418,16 +419,18 @@ bool UseFcReplaceConv(const std::vector<lite::Tensor *> &inputs, const std::vect return hw_is_1 && attr_valid; } -OpParameter *CreateFcParam(const ConvParameter *conv_param) { +OpParameter *CreateFcParam(const ConvParameter *conv_param, const std::vector<lite::Tensor *> &inputs) { auto fc_param = static_cast<MatMulParameter *>(malloc(sizeof(MatMulParameter))); if (fc_param == nullptr) { MS_LOG(ERROR) << "Create FullConnection kernel param failed."; return nullptr; } fc_param->op_parameter_.type_ = PrimitiveType_FullConnection; + fc_param->op_parameter_.infer_flag_ = true; fc_param->a_transpose_ = false; fc_param->b_transpose_ = true; fc_param->act_type_ = conv_param->act_type_; + fc_param->has_bias_ = inputs.size() == 3; return reinterpret_cast<OpParameter *>(fc_param); } @@ -476,29 +479,38 @@ bool UseWinograd4x4To6x6(const ConvParameter *param, const std::vector<lite::Ten return attr_valid && shape_valid && channel_good && hw_good; } -kernel::LiteKernel *OpenCLConvolutionKernelCreator(const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - kernel::OpenCLKernel *kernel; - OpParameter *real_param; +kernel::LiteKernel *OpenCLConv2DCreator(const std::vector<lite::Tensor *> &inputs, + const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { + MS_ASSERT(!inputs.empty()); + MS_ASSERT(!outputs.empty()); + MS_ASSERT(opParameter); + MS_ASSERT(inputs.front()->shape().size() == 4); + MS_ASSERT(outputs.front()->shape().size() == 4); auto *conv_param = reinterpret_cast<ConvParameter *>(opParameter); - bool infer_shape_done; - if (primitive != nullptr) { - infer_shape_done = primitive->infer_flag(); - } else { - bool output_shape_setted = true; - for (auto output : outputs) { - if (output->shape().empty() || output->ElementsNum() < 0) { - output_shape_setted = false; - break; - } - } - infer_shape_done = output_shape_setted; + int input_channel = inputs.front()->shape().at(3); + int output_channel = outputs.front()->shape().at(3); + int group = conv_param->group_; + + // case 1: depthwise conv2d + if (group == input_channel && group == output_channel) { + return OpenCLKernelCreator<DepthwiseConv2dOpenCLKernel>(inputs, outputs, opParameter, ctx, desc); + } + + // case 2: group conv2d + if (group != 1) { + MS_LOG(ERROR) << "OpenCL doesn't support group conv2d."; + free(conv_param); + return nullptr; } + + // case 3: common conv2d + kernel::OpenCLKernel *kernel; + OpParameter *real_param; + bool infer_shape_done = opParameter->infer_flag_; if (infer_shape_done && UseFcReplaceConv(inputs, outputs, conv_param)) { - auto *fc_param = CreateFcParam(conv_param); - kernel = new (std::nothrow) FullConnectionOpenCLKernel(fc_param, inputs, outputs, ctx, primitive); + auto *fc_param = CreateFcParam(conv_param, inputs); + kernel = new (std::nothrow) FullConnectionOpenCLKernel(fc_param, inputs, outputs, ctx); real_param = fc_param; if (kernel == nullptr) { MS_LOG(ERROR) << "Create FullConnection kernel failed."; @@ -512,11 +524,10 @@ kernel::LiteKernel *OpenCLConvolutionKernelCreator(const std::vector<lite::Tenso } else { if (infer_shape_done && UseWinograd4x4To6x6(conv_param, inputs, outputs)) { MS_LOG(DEBUG) << "use Winograd algorithm."; - kernel = new (std::nothrow) - WinogradOpenCLKernel(reinterpret_cast<OpParameter *>(conv_param), inputs, outputs, ctx, primitive); + kernel = + new (std::nothrow) WinogradOpenCLKernel(reinterpret_cast<OpParameter *>(conv_param), inputs, outputs, ctx); } else { - kernel = new (std::nothrow) - Conv2DOpenCLKernel(reinterpret_cast<OpParameter *>(conv_param), inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) Conv2DOpenCLKernel(reinterpret_cast<OpParameter *>(conv_param), inputs, outputs, ctx); } real_param = reinterpret_cast<OpParameter *>(conv_param); if (kernel == nullptr) { @@ -539,6 +550,6 @@ kernel::LiteKernel *OpenCLConvolutionKernelCreator(const std::vector<lite::Tenso return kernel; } -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Conv2D, OpenCLConvolutionKernelCreator) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Conv2D, OpenCLConvolutionKernelCreator) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Conv2DFusion, OpenCLConv2DCreator) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Conv2DFusion, OpenCLConv2DCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d.h index 266fb6c023..7f20d14932 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d.h @@ -44,9 +44,8 @@ void ConvertFilter(void *src, void *dst, TypeId src_dtype, TypeId dst_dtype, Fil class Conv2DOpenCLKernel : public OpenCLKernel { public: Conv2DOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : OpenCLKernel(parameter, inputs, outputs, ctx, primitive), param_(reinterpret_cast<ConvParameter *>(parameter)) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : OpenCLKernel(parameter, inputs, outputs, ctx), param_(reinterpret_cast<ConvParameter *>(parameter)) { bool is_adreno = ocl_runtime_->GetGpuInfo().type == lite::opencl::GpuType::ADRENO; filter_type_ = is_adreno ? MemType::IMG : MemType::BUF; } @@ -72,6 +71,9 @@ class Conv2DOpenCLKernel : public OpenCLKernel { // for opencl fusion: Conv2D + PReLU(weight is scalar) -> param_.act_type=ActivationType_LEAKY_RELU float alpha_{0.0f}; + // for opencl fusion + bool use_winograd_ = false; + protected: void InitAttrs(); virtual void BuildKernel(); diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc index 38f2fe375c..2146f2080e 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/conv2d_transpose.cc @@ -19,7 +19,6 @@ #include <set> #include "nnacl/fp32/common_func_fp32.h" #include "src/kernel_registry.h" -#include "src/ops/conv2d.h" #ifndef PROGRAM_WITH_IL #include "src/runtime/kernel/opencl/cl/conv2d_transpose.cl.inc" #endif @@ -32,7 +31,7 @@ using mindspore::lite::RET_OK; using mindspore::lite::opencl::ImageSize; using mindspore::schema::ActivationType_RELU; using mindspore::schema::ActivationType_RELU6; -using mindspore::schema::PrimitiveType_DeConv2D; +using mindspore::schema::PrimitiveType_Conv2dTransposeFusion; namespace mindspore::kernel { @@ -41,24 +40,24 @@ int Conv2dTransposeOpenCLKernel::CheckSpecs() { MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } - ConvParameter *param = reinterpret_cast<ConvParameter *>(op_parameter_); + auto *param = reinterpret_cast<ConvParameter *>(op_parameter_); if (param->act_type_ != ActType_No && param->act_type_ != ActType_Relu && param->act_type_ != ActType_Relu6) { MS_LOG(ERROR) << "Unsupported activation type " << param->act_type_; return RET_ERROR; } if (!in_tensors_.at(1)->IsConst()) { - MS_LOG(ERROR) << "Conv2dTranspose don't support non-constant filter yet."; + MS_LOG(ERROR) << "Conv2dTranspose doesn't support non-constant filter yet."; return RET_ERROR; } if (in_tensors_.size() == 3 && !in_tensors_.at(2)->IsConst()) { - MS_LOG(ERROR) << "Conv2dTranspose don't support non-constant bias yet."; + MS_LOG(ERROR) << "Conv2dTranspose doesn't support non-constant bias yet."; return RET_ERROR; } return RET_OK; } int Conv2dTransposeOpenCLKernel::Prepare() { - std::string kernel_name = "conv2d_transpose_NHWC4"; + std::string kernel_name = "conv2d_transpose"; enable_fp16_ = ocl_runtime_->GetFp16Enable(); #ifdef PROGRAM_WITH_IL kernel_ = ocl_runtime_->GetKernelFromBinary(kernel_name); @@ -79,7 +78,7 @@ int Conv2dTransposeOpenCLKernel::Prepare() { } void Conv2dTransposeOpenCLKernel::SetGlobalLocal() { - ConvParameter *param = reinterpret_cast<ConvParameter *>(op_parameter_); + auto *param = reinterpret_cast<ConvParameter *>(op_parameter_); int co = out_tensors_[0]->shape()[3]; int co4 = UP_DIV(co, C4NUM); int stride_h = param->stride_h_; @@ -93,7 +92,7 @@ void Conv2dTransposeOpenCLKernel::SetGlobalLocal() { void Conv2dTransposeOpenCLKernel::SetConstArgs() { int arg_cnt = 2; - ConvParameter *param = reinterpret_cast<ConvParameter *>(op_parameter_); + auto *param = reinterpret_cast<ConvParameter *>(op_parameter_); int ci = in_tensors_[0]->shape()[3]; int co = out_tensors_[0]->shape()[3]; int kh = param->kernel_h_; @@ -118,7 +117,7 @@ void Conv2dTransposeOpenCLKernel::SetConstArgs() { ocl_runtime_->SetKernelArg(kernel_, arg_cnt++, padding); ocl_runtime_->SetKernelArg(kernel_, arg_cnt++, src_size); ocl_runtime_->SetKernelArg(kernel_, arg_cnt++, dst_size); - ocl_runtime_->SetKernelArg(kernel_, arg_cnt++, static_cast<cl_int>(param->act_type_)); + ocl_runtime_->SetKernelArg(kernel_, arg_cnt, static_cast<cl_int>(param->act_type_)); } int Conv2dTransposeOpenCLKernel::InitWeights() { @@ -134,7 +133,7 @@ int Conv2dTransposeOpenCLKernel::InitFilter() { if (ret != RET_OK) { return ret; } - ConvParameter *param = reinterpret_cast<ConvParameter *>(op_parameter_); + auto *param = reinterpret_cast<ConvParameter *>(op_parameter_); int ci = in_tensors_[0]->shape()[3]; int co = out_tensors_[0]->shape()[3]; int kh = param->kernel_h_; @@ -242,15 +241,40 @@ int Conv2dTransposeOpenCLKernel::InferShape() { if (ret != RET_OK) { return ret; } - auto param = reinterpret_cast<ConvParameter *>(op_parameter_); - auto conv2d_lite_primitive = (lite::Conv2D *)primitive_; - param->pad_u_ = conv2d_lite_primitive->PadUp(); - param->pad_d_ = conv2d_lite_primitive->PadDown(); - param->pad_l_ = conv2d_lite_primitive->PadLeft(); - param->pad_r_ = conv2d_lite_primitive->PadRight(); return RET_OK; } -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_DeConv2D, OpenCLKernelCreator<Conv2dTransposeOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_DeConv2D, OpenCLKernelCreator<Conv2dTransposeOpenCLKernel>) +kernel::LiteKernel *OpenCLConv2dTransposeCreator(const std::vector<lite::Tensor *> &inputs, + const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { + MS_ASSERT(!inputs.empty()); + MS_ASSERT(!outputs.empty()); + MS_ASSERT(opParameter); + MS_ASSERT(inputs.front()->shape().size() == 4); + MS_ASSERT(outputs.front()->shape().size() == 4); + auto *conv_param = reinterpret_cast<ConvParameter *>(opParameter); + int input_channel = inputs.front()->shape().at(3); + int output_channel = outputs.front()->shape().at(3); + int group = conv_param->group_; + + // case 1: depthwise Conv2dTranspose + if (group == input_channel && group == output_channel) { + MS_LOG(ERROR) << "OpenCL doesn't support depthwise Conv2dTranspose."; + free(conv_param); + return nullptr; + } + + // case 2: group Conv2dTranspose + if (group != 1) { + MS_LOG(ERROR) << "OpenCL doesn't support group Conv2dTranspose."; + free(conv_param); + return nullptr; + } + + // case 3: common Conv2dTranspose + return OpenCLKernelCreator<Conv2dTransposeOpenCLKernel>(inputs, outputs, opParameter, ctx, desc); +} + +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Conv2dTransposeFusion, OpenCLConv2dTransposeCreator) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Conv2dTransposeFusion, OpenCLConv2dTransposeCreator) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc index 131fc79706..f15de87d94 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.cc @@ -39,7 +39,6 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::opencl::ImageSize; using mindspore::lite::opencl::MemType; -using mindspore::schema::PrimitiveType_DepthwiseConv2D; namespace mindspore::kernel { @@ -92,6 +91,10 @@ int DepthwiseConv2dOpenCLKernel::Prepare() { if (ret != RET_OK) { return ret; } + ret = InitBias(); + if (ret != RET_OK) { + return ret; + } SetGlobalLocal(); SetConstArgs(); MS_LOG(DEBUG) << kernel_name << " Init Done! mem type=" << static_cast<int>(out_mem_type_); @@ -163,6 +166,18 @@ int DepthwiseConv2dOpenCLKernel::InitWeights() { if (packed_weight_ == nullptr) { return RET_ERROR; } + return mindspore::lite::RET_OK; +} + +int DepthwiseConv2dOpenCLKernel::InitBias() { + auto allocator = ocl_runtime_->GetAllocator(); + bool is_fp16 = ocl_runtime_->GetFp16Enable(); + + size_t dtype_size = is_fp16 ? sizeof(int16_t) : sizeof(float); + auto out_info = GpuTensorInfo(out_tensors_[0]); + int CO4 = UP_DIV(out_info.C, C4NUM * block_size_.C); + auto src_type = in_tensors_.at(kWeightIndex)->data_type(); + auto dst_type = is_fp16 ? kNumberTypeFloat16 : kNumberTypeFloat32; auto ConvertBias = [](void *src, void *dst, size_t size, size_t dtype_size, TypeId src_type, TypeId dst_type) { if (dst_type == kNumberTypeFloat16 && src_type == kNumberTypeFloat32) { @@ -247,6 +262,4 @@ int DepthwiseConv2dOpenCLKernel::Run() { return mindspore::lite::RET_OK; } -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_DepthwiseConv2D, OpenCLKernelCreator<DepthwiseConv2dOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_DepthwiseConv2D, OpenCLKernelCreator<DepthwiseConv2dOpenCLKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.h index a09e04f50b..706f755329 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/depthwise_conv2d.h @@ -28,9 +28,8 @@ namespace mindspore::kernel { class DepthwiseConv2dOpenCLKernel : public OpenCLKernel { public: DepthwiseConv2dOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : OpenCLKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : OpenCLKernel(parameter, inputs, outputs, ctx) { bool is_adreno = ocl_runtime_->GetGpuInfo().type == lite::opencl::GpuType::ADRENO; filter_type_ = is_adreno ? MemType::IMG : MemType::BUF; } @@ -42,6 +41,7 @@ class DepthwiseConv2dOpenCLKernel : public OpenCLKernel { int CheckSpecs() override; int InitWeights() override; + int InitBias(); void SetConstArgs() override; void SetGlobalLocal() override; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/fusion_eltwise.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/fusion_eltwise.cc index 5304690109..57ce27f397 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/fusion_eltwise.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/fusion_eltwise.cc @@ -19,6 +19,7 @@ #include "include/errorcode.h" #include "nnacl/fp32/activation_fp32.h" #include "nnacl/scale.h" +#include "src/common/prim_inner.h" using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; @@ -61,14 +62,14 @@ std::pair<bool, FusionEltwiseParameter *> CheckSupportOrCreateParam( param = reinterpret_cast<FusionEltwiseParameter *>(eltwise->GetParameter()); eltwise->ClearParameter(); } - } else if (IsArithmetic(node_type) || node_type == schema::PrimitiveType_Scale) { + } else if (IsArithmetic(node_type) || node_type == schema::PrimitiveType_ScaleFusion) { auto *arith_param = reinterpret_cast<ArithmeticParameter *>(op_parameter); auto *scale_param = reinterpret_cast<ScaleParameter *>(op_parameter); auto act_type = static_cast<ActivationType>( - node_type == schema::PrimitiveType_Scale ? scale_param->activation_type_ : arith_param->activation_type_); + node_type == schema::PrimitiveType_ScaleFusion ? scale_param->activation_type_ : arith_param->activation_type_); EltwiseOperator act_operator = Activation2Operator(act_type); support = SupportedOperators.count(operator_) && SupportedOperators.count(act_operator); - if (node_type == schema::PrimitiveType_Scale) { + if (node_type == schema::PrimitiveType_ScaleFusion) { support = support && node->in_tensors().size() == 3 && scale_param->axis_ == -1; } else { support = support && (node->in_tensors().size() == 2); @@ -415,7 +416,7 @@ int FusionEltwiseOpenCLKernel::GetTensorIdx(lite::Tensor *in_tensor) { MS_ASSERT(in_kernel); MS_ASSERT(in_kernel->in_tensors().size()); MS_ASSERT(in_kernel->out_tensors().size()); - if (in_kernel->Type() == schema::PrimitiveType_ToFormat) { + if (static_cast<int>(in_kernel->Type()) == lite::PRIM_TO_FORMAT) { if (in_tensor == in_kernel->in_tensors().front()) { return std::find(in_tensors_.begin(), in_tensors_.end(), in_kernel->out_tensors().front()) - in_tensors_.begin(); diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/fusion_eltwise.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/fusion_eltwise.h index 6dc6bf44e8..177efbff83 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/fusion_eltwise.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/fusion_eltwise.h @@ -39,10 +39,10 @@ constexpr schema::PrimitiveType PrimitiveType_FusionEltwise = static_cast<schema enum EltwiseOperator { // Arithmetic Primitive - Operator_Mul = PrimitiveType_Mul, - Operator_Add = PrimitiveType_Add, - Operator_Sub = PrimitiveType_Sub, - Operator_Div = PrimitiveType_Div, + Operator_Mul = PrimitiveType_MulFusion, + Operator_Add = PrimitiveType_AddFusion, + Operator_Sub = PrimitiveType_SubFusion, + Operator_Div = PrimitiveType_DivFusion, Operator_LogicalAnd = PrimitiveType_LogicalAnd, Operator_LogicalOr = PrimitiveType_LogicalOr, Operator_Maximum = PrimitiveType_Maximum, @@ -62,7 +62,7 @@ enum EltwiseOperator { Operator_Abs = PrimitiveType_Abs, Operator_Ceil = PrimitiveType_Ceil, Operator_Cos = PrimitiveType_Cos, - Operator_Exp = PrimitiveType_Exp, + Operator_Exp = PrimitiveType_ExpFusion, Operator_Floor = PrimitiveType_Floor, Operator_Log = PrimitiveType_Log, Operator_LogicalNot = PrimitiveType_LogicalNot, @@ -74,7 +74,7 @@ enum EltwiseOperator { Operator_Square = PrimitiveType_Square, // Other Primitive - Operator_Scale = schema::PrimitiveType_Scale, + Operator_Scale = schema::PrimitiveType_ScaleFusion, // Activation Operator_Act_NO_ACTIVATION = schema::ActivationType_NO_ACTIVATION + PrimitiveType_MAX, @@ -106,7 +106,7 @@ struct FusionEltwiseParameter { Node_(bool is_leaf, FusionEltwiseParameter *value, std::string value_name) : is_leaf_(is_leaf), value_(value), name_(std::move(value_name)) {} }; - OpParameter op_parameter_{"FusionEltwiseParameter", PrimitiveType_FusionEltwise, 1}; + OpParameter op_parameter_{"FusionEltwiseParameter", true, PrimitiveType_FusionEltwise, 1}; EltwiseOperator operator_; std::string name_; std::vector<Node_> inputs_; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/gather.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/gather.cc index 51c52b9900..db95826d8b 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/gather.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/gather.cc @@ -21,6 +21,7 @@ #include "src/kernel_registry.h" #include "src/runtime/kernel/opencl/kernel/gather.h" #include "src/runtime/kernel/opencl/cl/gather.cl.inc" +#include "src/runtime/kernel/opencl/utils.h" using mindspore::kernel::KERNEL_ARCH::kGPU; using mindspore::lite::KernelRegistrar; @@ -62,8 +63,10 @@ int GatherOpenCLKernel::CheckSpecs() { return RET_ERROR; } - auto *param = reinterpret_cast<GatherParameter *>(this->op_parameter_); - axis_ = param->axis_; + if (CheckParamLikeTensor("Gather", "axis", in_tensors_.at(2), kNumberTypeInt32, {1}) != RET_OK) { + return RET_ERROR; + } + axis_ = *reinterpret_cast<int32_t *>(in_tensors_.at(2)->data_c()); if (axis_ < 0) { axis_ += input_ndim; } diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/layer_norm.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/layer_norm.cc index 115161c871..982a6ada0f 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/layer_norm.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/layer_norm.cc @@ -27,7 +27,7 @@ using mindspore::kernel::KERNEL_ARCH::kGPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_LayerNorm; +using mindspore::schema::PrimitiveType_LayerNormFusion; namespace mindspore::kernel { @@ -200,6 +200,6 @@ int LayerNormOpenCLKernel::Run() { return RET_OK; } -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_LayerNorm, OpenCLKernelCreator<LayerNormOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_LayerNorm, OpenCLKernelCreator<LayerNormOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_LayerNormFusion, OpenCLKernelCreator<LayerNormOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_LayerNormFusion, OpenCLKernelCreator<LayerNormOpenCLKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc index 47e19636c7..1aaf62fff2 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/matmul.cc @@ -225,27 +225,14 @@ int MatMulOpenCLKernel::Run() { kernel::LiteKernel *OpenCLMatMulKernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { kernel::OpenCLKernel *kernel; - bool infer_shape_done; - if (primitive != nullptr) { - infer_shape_done = primitive->infer_flag(); - } else { - bool output_shape_setted = true; - for (auto output : outputs) { - if (output->shape().empty() || output->ElementsNum() < 0) { - output_shape_setted = false; - break; - } - } - infer_shape_done = output_shape_setted; - } + bool infer_shape_done = opParameter->infer_flag_; if (infer_shape_done && IsUseStrassenMatmul(inputs)) { MS_LOG(DEBUG) << "use_matmul_strassen"; - kernel = new (std::nothrow) StrassenOpenCLKernel(opParameter, inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) StrassenOpenCLKernel(opParameter, inputs, outputs, ctx); } else { - kernel = new (std::nothrow) MatMulOpenCLKernel(opParameter, inputs, outputs, ctx, primitive); + kernel = new (std::nothrow) MatMulOpenCLKernel(opParameter, inputs, outputs, ctx); } if (kernel == nullptr) { MS_LOG(ERROR) << "kernel " << opParameter->name_ << "is nullptr."; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/pad.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/pad.cc index 25cbbce922..49b5ca0d7c 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/pad.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/pad.cc @@ -29,14 +29,14 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::schema::PaddingMode_CONSTANT; -using mindspore::schema::PrimitiveType_Pad; +using mindspore::schema::PrimitiveType_PadFusion; namespace mindspore::kernel { int PadOpenCLKernel::CheckSpecs() { auto param = reinterpret_cast<PadParameter *>(op_parameter_); MS_ASSERT(param); - if (in_tensors_.size() != 1) { + if (in_tensors_.size() != 2) { MS_LOG(ERROR) << "Pad only support 1 input Tensor."; return RET_ERROR; } @@ -110,6 +110,6 @@ int PadOpenCLKernel::Run() { return RET_OK; } -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Pad, OpenCLKernelCreator<PadOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Pad, OpenCLKernelCreator<PadOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_PadFusion, OpenCLKernelCreator<PadOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_PadFusion, OpenCLKernelCreator<PadOpenCLKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/pad.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/pad.h index 33e05cf89d..a3ef183955 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/pad.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/pad.h @@ -29,10 +29,8 @@ namespace mindspore::kernel { class PadOpenCLKernel : public OpenCLKernel { public: PadOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : OpenCLKernel(parameter, inputs, outputs, ctx, primitive), - param_(reinterpret_cast<PadParameter *>(op_parameter_)) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : OpenCLKernel(parameter, inputs, outputs, ctx), param_(reinterpret_cast<PadParameter *>(op_parameter_)) {} ~PadOpenCLKernel() override = default; int CheckSpecs() override; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc index 044b8d0b98..416f5405e4 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.cc @@ -31,7 +31,8 @@ using mindspore::lite::RET_INVALID_OP_NAME; using mindspore::lite::RET_MEMORY_FAILED; using mindspore::lite::RET_OK; using mindspore::lite::opencl::MemType; -using mindspore::schema::PrimitiveType_Pooling; +using mindspore::schema::PrimitiveType_AvgPoolFusion; +using mindspore::schema::PrimitiveType_MaxPoolFusion; namespace mindspore { namespace kernel { @@ -120,7 +121,9 @@ int PoolingOpenCLKernel::Run() { return mindspore::lite::RET_OK; } -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Pooling, OpenCLKernelCreator<PoolingOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Pooling, OpenCLKernelCreator<PoolingOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_AvgPoolFusion, OpenCLKernelCreator<PoolingOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_MaxPoolFusion, OpenCLKernelCreator<PoolingOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_AvgPoolFusion, OpenCLKernelCreator<PoolingOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_MaxPoolFusion, OpenCLKernelCreator<PoolingOpenCLKernel>) } // namespace kernel } // namespace mindspore diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.h index 58641347de..b0063beb09 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/pooling2d.h @@ -27,10 +27,8 @@ namespace mindspore::kernel { class PoolingOpenCLKernel : public OpenCLKernel { public: PoolingOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : OpenCLKernel(parameter, inputs, outputs, ctx, primitive), - parameter_(reinterpret_cast<PoolingParameter *>(parameter)) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : OpenCLKernel(parameter, inputs, outputs, ctx), parameter_(reinterpret_cast<PoolingParameter *>(parameter)) {} ~PoolingOpenCLKernel() override = default; int Run() override; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/power.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/power.cc index a8e800682b..a17dbea353 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/power.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/power.cc @@ -27,7 +27,7 @@ using mindspore::kernel::KERNEL_ARCH::kGPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Power; +using mindspore::schema::PrimitiveType_PowFusion; namespace mindspore::kernel { @@ -139,6 +139,6 @@ int PowerOpenCLKernel::Run() { return RET_OK; } -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Power, OpenCLKernelCreator<PowerOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Power, OpenCLKernelCreator<PowerOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_PowFusion, OpenCLKernelCreator<PowerOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_PowFusion, OpenCLKernelCreator<PowerOpenCLKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc index 1a500babbf..e34f05d8d0 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/prelu.cc @@ -29,7 +29,7 @@ using mindspore::kernel::KERNEL_ARCH::kGPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_PReLU; +using mindspore::schema::PrimitiveType_PReLUFusion; namespace mindspore::kernel { @@ -157,6 +157,6 @@ int PReluOpenCLKernel::Run() { return mindspore::lite::RET_OK; } -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_PReLU, OpenCLKernelCreator<PReluOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_PReLU, OpenCLKernelCreator<PReluOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_PReLUFusion, OpenCLKernelCreator<PReluOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_PReLUFusion, OpenCLKernelCreator<PReluOpenCLKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc index e9ae98baae..5c9433d4e7 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.cc @@ -15,11 +15,13 @@ */ #include <set> +#include <algorithm> #include <string> #include <map> #include "include/errorcode.h" #include "src/kernel_registry.h" #include "src/runtime/kernel/opencl/kernel/reduce.h" +#include "src/runtime/kernel/opencl/utils.h" #include "src/runtime/kernel/opencl/cl/reduce.cl.inc" using mindspore::kernel::KERNEL_ARCH::kGPU; @@ -28,7 +30,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_NULL_PTR; using mindspore::lite::RET_OK; using mindspore::lite::RET_PARAM_INVALID; -using mindspore::schema::PrimitiveType_Reduce; +using mindspore::schema::PrimitiveType_ReduceFusion; using mindspore::schema::ReduceMode; using mindspore::schema::ReduceMode_ReduceMax; using mindspore::schema::ReduceMode_ReduceMean; @@ -65,8 +67,10 @@ cl_float4 ReduceOpenCLKernel::GenC4Mask() { return mask; } +bool hw_reduce(const int *axes_) { return (axes_[0] == 1 && axes_[1] == 2) || (axes_[0] == 2 && axes_[1] == 1); } + int ReduceOpenCLKernel::CheckSpecs() { - if (in_tensors_.size() != 1 || out_tensors_.size() != 1) { + if (in_tensors_.size() != 2 || out_tensors_.size() != 1) { MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } @@ -79,19 +83,36 @@ int ReduceOpenCLKernel::CheckSpecs() { MS_LOG(ERROR) << "not supported reduce type:" << reduce_param->mode_; return RET_PARAM_INVALID; } - if (reduce_param->num_axes_ == 1 && reduce_param->axes_[0] == 3 && in_tensors_[0]->shape()[2] == 1) { - reduce_param->num_axes_ = 2; - reduce_param->axes_[1] = 2; + + // axes is input tensor + // get num_axes + int num_axes = 0; + auto *axes_tensor = in_tensors_.at(1); + if (axes_tensor->shape().size() != 1) { + MS_LOG(ERROR) << "in Reduce: axes tensor's ndim should be 1."; + return RET_ERROR; + } else { + num_axes = axes_tensor->shape().front(); } - if (reduce_param->num_axes_ != 2) { - MS_LOG(ERROR) << "reduce op only support axes=2"; + // check axes tensor + if (CheckParamLikeTensor("Reduce", "axes", axes_tensor, kNumberTypeInt32, {num_axes}) != RET_OK) { + return RET_ERROR; + } + // copy axes from tensor to private var + for (int i = 0; i < std::min(num_axes, MAX_SHAPE_SIZE); ++i) { + axes_[i] = reinterpret_cast<int *>(axes_tensor->data_c())[i]; + } + if (num_axes == 1 && axes_[0] == 3 && in_tensors_[0]->shape()[2] == 1) { + num_axes = 2; + axes_[1] = 2; + } + if (num_axes != 2) { + MS_LOG(ERROR) << "reduce op only support num_axes=2"; return RET_PARAM_INVALID; } - bool hw_reduce = (reduce_param->axes_[0] == 1 && reduce_param->axes_[1] == 2) || - (reduce_param->axes_[0] == 2 && reduce_param->axes_[1] == 1); - wc_reduce_ = (reduce_param->axes_[0] == 2 && reduce_param->axes_[1] == 3) || - (reduce_param->axes_[0] == 3 && reduce_param->axes_[1] == 2); - if (!hw_reduce && !wc_reduce_) { + + wc_reduce_ = (axes_[0] == 2 && axes_[1] == 3) || (axes_[0] == 3 && axes_[1] == 2); + if (!hw_reduce(axes_) && !wc_reduce_) { MS_LOG(ERROR) << "reduce op only support axis (1,2) or (2,3)"; return RET_PARAM_INVALID; } @@ -103,15 +124,14 @@ int ReduceOpenCLKernel::CheckSpecs() { } int ReduceOpenCLKernel::Prepare() { - outShape = GpuTensorInfo(out_tensors_[0]); auto reduce_param = reinterpret_cast<ReduceParameter *>(op_parameter_); if (reduce_param == nullptr) { return RET_NULL_PTR; } std::string kernel_name; - if (in_tensors_[0]->shape()[reduce_param->axes_[0]] >= LOCAL_CACHE_THREAD || - in_tensors_[0]->shape()[reduce_param->axes_[1]] >= LOCAL_CACHE_THREAD) { + if (in_tensors_[0]->shape()[axes_[0]] >= LOCAL_CACHE_THREAD || + in_tensors_[0]->shape()[axes_[1]] >= LOCAL_CACHE_THREAD) { use_local_ = true; kernel_name += "Local"; } else { @@ -182,6 +202,6 @@ int ReduceOpenCLKernel::Run() { return mindspore::lite::RET_OK; } -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Reduce, OpenCLKernelCreator<ReduceOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Reduce, OpenCLKernelCreator<ReduceOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_ReduceFusion, OpenCLKernelCreator<ReduceOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_ReduceFusion, OpenCLKernelCreator<ReduceOpenCLKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.h index ebcafe4d5f..3a1e97d1e7 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/reduce.h @@ -43,6 +43,7 @@ class ReduceOpenCLKernel : public OpenCLKernel { bool use_local_{false}; bool wc_reduce_{false}; static const size_t LOCAL_CACHE_THREAD{16}; + int axes_[MAX_SHAPE_SIZE]; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc index b37cda1d00..7e2911dbf9 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.cc @@ -43,11 +43,11 @@ int ReshapeOpenCLKernel::CheckSpecs() { return RET_ERROR; } if (in_tensors_[0]->shape().size() > 4) { - MS_LOG(ERROR) << "Reshape input size should in 0-4, actual: " << in_tensors_[0]->shape(); + MS_LOG(ERROR) << "Reshape input size should in 0-4, actual: " << in_tensors_[0]->shape().size(); return RET_ERROR; } if (out_tensors_[0]->shape().size() > 4) { - MS_LOG(ERROR) << "Reshape output size should in 1-4, actual: " << out_tensors_[0]->shape(); + MS_LOG(ERROR) << "Reshape output size should in 0-4, actual: " << out_tensors_[0]->shape().size(); return RET_ERROR; } return RET_OK; diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.h index 527d08ca7d..020a514d95 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/reshape.h @@ -34,8 +34,6 @@ class ReshapeOpenCLKernel : public OpenCLKernel { void SetConstArgs() override; void SetGlobalLocal() override; int PreProcess() override; - - private: }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc index 67cdd3552a..71f0006b5f 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/scale.cc @@ -32,7 +32,7 @@ using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::opencl::ImageSize; using mindspore::lite::opencl::MemType; -using mindspore::schema::PrimitiveType_Scale; +using mindspore::schema::PrimitiveType_ScaleFusion; namespace mindspore::kernel { @@ -219,6 +219,6 @@ int ScaleOpenCLKernel::Run() { return RET_OK; } -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Scale, OpenCLKernelCreator<ScaleOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Scale, OpenCLKernelCreator<ScaleOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_ScaleFusion, OpenCLKernelCreator<ScaleOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_ScaleFusion, OpenCLKernelCreator<ScaleOpenCLKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc index b3d7b8a817..62e50637ba 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.cc @@ -29,7 +29,7 @@ using mindspore::kernel::KERNEL_ARCH::kGPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_SoftMax; +using mindspore::schema::PrimitiveType_Softmax; namespace mindspore::kernel { @@ -47,14 +47,15 @@ int SoftmaxOpenCLKernel::CheckSpecs() { MS_LOG(ERROR) << "in size: " << in_tensors_.size() << ", out size: " << out_tensors_.size(); return RET_ERROR; } - axis_ = parameter_->axis_; + SoftmaxParameter *parameter = reinterpret_cast<SoftmaxParameter *>(op_parameter_); + axis_ = parameter->axis_; auto in_shape = in_tensors_[0]->shape(); if (in_shape.size() > 4) { - MS_LOG(ERROR) << "Init `Softmax` kernel failed: Unsupported shape size: " << in_shape.size(); + MS_LOG(ERROR) << "Init Softmax kernel failed: Unsupported shape size: " << in_shape.size(); return RET_ERROR; } if (in_shape[0] > 1) { - MS_LOG(ERROR) << "Init `Softmax` kernel failed: Unsupported multi-batch."; + MS_LOG(ERROR) << "Init Softmax kernel failed: Unsupported multi-batch."; return RET_ERROR; } if (axis_ < 0) { @@ -62,18 +63,18 @@ int SoftmaxOpenCLKernel::CheckSpecs() { } axis_ += 4 - in_shape.size(); if (axis_ != 1 && axis_ != 2 && axis_ != 3) { - MS_LOG(ERROR) << "Init `Softmax` kernel failed: softmax axis should be H W or C"; + MS_LOG(ERROR) << "Init Softmax kernel failed: softmax axis should be H W or C"; return RET_ERROR; } return RET_OK; } int SoftmaxOpenCLKernel::Prepare() { - std::string kernel_name = "SoftMax"; + std::string kernel_name = "Softmax"; - out_shape = GpuTensorInfo(out_tensors_[0]); + out_shape_ = GpuTensorInfo(out_tensors_[0]); std::string source = softmax_source; - if (out_shape.H == 1 && out_shape.W == 1 && axis_ == 3) { + if (out_shape_.H == 1 && out_shape_.W == 1 && axis_ == 3) { // support 4d tensor onexone_flag_ = true; kernel_name += "1x1"; @@ -85,7 +86,7 @@ int SoftmaxOpenCLKernel::Prepare() { #ifdef PROGRAM_WITH_IL kernel_ = ocl_runtime->GetKernelFromBinary(kernel_name); #else - std::string program_name = "SoftMax"; + std::string program_name = "Softmax"; ocl_runtime_->LoadSource(program_name, source); std::vector<std::string> ext_build_opt; if (out_tensors_[0]->data_type() == kNumberTypeFloat32) { @@ -108,14 +109,14 @@ void SoftmaxOpenCLKernel::SetGlobalLocal() { } else { size_t global_x, global_y; if (axis_ == 1) { - global_x = out_shape.Slice; - global_y = out_shape.W; + global_x = out_shape_.Slice; + global_y = out_shape_.W; } else if (axis_ == 2) { - global_x = out_shape.Slice; - global_y = out_shape.H; + global_x = out_shape_.Slice; + global_y = out_shape_.H; } else if (axis_ == 3) { - global_x = out_shape.W; - global_y = out_shape.H; + global_x = out_shape_.W; + global_y = out_shape_.H; } else { global_x = 1; global_y = 1; @@ -135,12 +136,12 @@ int SoftmaxOpenCLKernel::Tune() { void SoftmaxOpenCLKernel::SetConstArgs() { int arg_idx = 2; - int channel = out_shape.C; - int c4 = out_shape.Slice; + int channel = out_shape_.C; + int c4 = out_shape_.Slice; auto mask_ = GetMaskForLastChannel(channel); cl_float4 mask = {mask_[0], mask_[1], mask_[2], mask_[3]}; ocl_runtime_->SetKernelArg(kernel_, arg_idx++, mask); - cl_int4 input_shape = {static_cast<int>(out_shape.N), static_cast<int>(out_shape.H), static_cast<int>(out_shape.W), + cl_int4 input_shape = {static_cast<int>(out_shape_.N), static_cast<int>(out_shape_.H), static_cast<int>(out_shape_.W), c4}; ocl_runtime_->SetKernelArg(kernel_, arg_idx, input_shape); } @@ -154,6 +155,6 @@ int SoftmaxOpenCLKernel::Run() { return lite::RET_OK; } -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_SoftMax, OpenCLKernelCreator<SoftmaxOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_SoftMax, OpenCLKernelCreator<SoftmaxOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Softmax, OpenCLKernelCreator<SoftmaxOpenCLKernel>) +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Softmax, OpenCLKernelCreator<SoftmaxOpenCLKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.h index 8e629d8de0..a66e4abb3c 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/softmax.h @@ -26,14 +26,9 @@ namespace mindspore::kernel { class SoftmaxOpenCLKernel : public OpenCLKernel { public: - SoftmaxOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : OpenCLKernel(parameter, inputs, outputs, ctx, primitive) { - parameter_ = reinterpret_cast<SoftmaxParameter *>(parameter); - } - + using OpenCLKernel::OpenCLKernel; ~SoftmaxOpenCLKernel() override = default; + int Run() override; int Prepare() override; int CheckSpecs() override; @@ -42,17 +37,13 @@ class SoftmaxOpenCLKernel : public OpenCLKernel { int Tune() override; private: - int InitGlobalSize(); - int SetWorkGroupSize1x1(); - int SetWorkGroupSize(); std::vector<float> GetMaskForLastChannel(int channels); - SoftmaxParameter *parameter_; bool onexone_flag_{false}; std::vector<size_t> local_size_; std::vector<size_t> global_size_; int axis_{0}; - GpuTensorInfo out_shape; + GpuTensorInfo out_shape_; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/strided_slice.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/strided_slice.cc index 556558b6cd..c8d772570c 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/strided_slice.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/strided_slice.cc @@ -22,33 +22,49 @@ #include "src/runtime/kernel/opencl/kernel/strided_slice.h" #include "src/runtime/kernel/opencl/utils.h" #include "src/runtime/kernel/opencl/cl/strided_slice.cl.inc" -#include "nnacl/strided_slice_parameter.h" using mindspore::kernel::KERNEL_ARCH::kGPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Slice; +using mindspore::schema::PrimitiveType_SliceFusion; using mindspore::schema::PrimitiveType_StridedSlice; namespace mindspore::kernel { int StridedSliceOpenCLKernel::CheckSpecs() { - if (Type() == PrimitiveType_Slice) { + if (Type() == PrimitiveType_SliceFusion) { if (in_tensors_.size() != 3) { MS_LOG(ERROR) << "Slice only supports 3 input Tensor."; return RET_ERROR; } + int in_ndim = in_tensors_.front()->shape().size(); + if (CheckParamLikeTensor("Slice", "begin", in_tensors_.at(1), kNumberTypeInt32, {in_ndim}) != RET_OK) { + return RET_ERROR; + } + if (CheckParamLikeTensor("Slice", "size", in_tensors_.at(2), kNumberTypeInt32, {in_ndim}) != RET_OK) { + return RET_ERROR; + } } else if (Type() == PrimitiveType_StridedSlice) { if (in_tensors_.size() != 4) { MS_LOG(ERROR) << "StridedSlice only supports 4 input Tensor."; return RET_ERROR; } + int in_ndim = in_tensors_.front()->shape().size(); + if (CheckParamLikeTensor("StridedSlice", "begin", in_tensors_.at(1), kNumberTypeInt32, {in_ndim}) != RET_OK) { + return RET_ERROR; + } + if (CheckParamLikeTensor("StridedSlice", "end", in_tensors_.at(2), kNumberTypeInt32, {in_ndim}) != RET_OK) { + return RET_ERROR; + } + if (CheckParamLikeTensor("StridedSlice", "stride", in_tensors_.at(3), kNumberTypeInt32, {in_ndim}) != RET_OK) { + return RET_ERROR; + } } else { MS_LOG(ERROR) << "Type error."; return RET_ERROR; } - const std::string kernel_name = Type() == PrimitiveType_Slice ? "Slice" : "StridedSlice"; + const std::string kernel_name = Type() == PrimitiveType_SliceFusion ? "Slice" : "StridedSlice"; if (out_tensors_.size() != 1) { MS_LOG(ERROR) << kernel_name + " only supports 1 output Tensor."; return RET_ERROR; @@ -88,11 +104,11 @@ int StridedSliceOpenCLKernel::InitConstArgs() { static_cast<cl_int>(output_info.W), static_cast<cl_int>(output_info.C)}; io_slices_ = {static_cast<cl_int>(input_info.Slice), static_cast<cl_int>(output_info.Slice)}; - if (Type() == PrimitiveType_Slice) { - auto param = reinterpret_cast<SliceParameter *>(op_parameter_); - MS_ASSERT(param); - Broadcast2GpuShape(begin_.s, param->begin_, param->param_length_, 0); - Broadcast2GpuShape(size_.s, param->size_, param->param_length_, -1); + if (Type() == PrimitiveType_SliceFusion) { + auto *begin = reinterpret_cast<int32_t *>(in_tensors_.at(1)->data_c()); + auto *size = reinterpret_cast<int32_t *>(in_tensors_.at(2)->data_c()); + Broadcast2GpuShape(begin_.s, begin, input_info.NDim, 0); + Broadcast2GpuShape(size_.s, size, input_info.NDim, -1); for (int i = 0; i < 4; ++i) { if (begin_.s[i] < 0) { begin_.s[i] += input_shape_.s[i]; @@ -111,12 +127,13 @@ int StridedSliceOpenCLKernel::InitConstArgs() { } } } else { - auto param = reinterpret_cast<StridedSliceParameter *>(op_parameter_); - MS_ASSERT(param); - cl_int4 end = input_shape_; - Broadcast2GpuShape(begin_.s, param->begins_, param->num_axes_, 0); - Broadcast2GpuShape(stride_.s, param->strides_, param->num_axes_, 1); - Broadcast2GpuShape(end.s, param->ends_, param->num_axes_); + auto *begin = reinterpret_cast<int32_t *>(in_tensors_.at(1)->data_c()); + auto *end = reinterpret_cast<int32_t *>(in_tensors_.at(2)->data_c()); + auto *stride = reinterpret_cast<int32_t *>(in_tensors_.at(3)->data_c()); + cl_int4 end_ = input_shape_; + Broadcast2GpuShape(begin_.s, begin, input_info.NDim, 0); + Broadcast2GpuShape(end_.s, end, input_info.NDim); + Broadcast2GpuShape(stride_.s, stride, input_info.NDim, 1); for (int i = 0; i < 4; ++i) { // begin is negative @@ -126,20 +143,20 @@ int StridedSliceOpenCLKernel::InitConstArgs() { // avoid begin is out of range begin_.s[i] = std::clamp(begin_.s[i], 0, input_shape_.s[i] - 1); // end is negative - if (end.s[i] < 0) { - end.s[i] += input_shape_.s[i]; + if (end_.s[i] < 0) { + end_.s[i] += input_shape_.s[i]; } // avoid end is out of range - end.s[i] = std::clamp(end.s[i], -1, input_shape_.s[i]); + end_.s[i] = std::clamp(end_.s[i], -1, input_shape_.s[i]); // check stride begin end if (stride_.s[i] > 0) { - if (begin_.s[i] >= end.s[i]) { + if (begin_.s[i] >= end_.s[i]) { MS_LOG(ERROR) << "StridedSlice kernel only supports begin_<end when stride>0"; return RET_ERROR; } } else if (stride_.s[i] < 0) { - if (begin_.s[i] <= end.s[i]) { + if (begin_.s[i] <= end_.s[i]) { MS_LOG(ERROR) << "StridedSlice kernel only supports begin_>end when stride<0"; return RET_ERROR; } @@ -147,7 +164,7 @@ int StridedSliceOpenCLKernel::InitConstArgs() { MS_LOG(ERROR) << "StridedSlice kernel only supports stride!=0"; return RET_ERROR; } - size_.s[i] = std::ceil(static_cast<float>(end.s[i] - begin_.s[i]) / static_cast<float>(stride_.s[i])); + size_.s[i] = std::ceil(static_cast<float>(end_.s[i] - begin_.s[i]) / static_cast<float>(stride_.s[i])); } } @@ -197,8 +214,8 @@ int StridedSliceOpenCLKernel::Run() { return RET_OK; } -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Slice, OpenCLKernelCreator<StridedSliceOpenCLKernel>); -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Slice, OpenCLKernelCreator<StridedSliceOpenCLKernel>); +REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_SliceFusion, OpenCLKernelCreator<StridedSliceOpenCLKernel>); +REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_SliceFusion, OpenCLKernelCreator<StridedSliceOpenCLKernel>); REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_StridedSlice, OpenCLKernelCreator<StridedSliceOpenCLKernel>); REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_StridedSlice, OpenCLKernelCreator<StridedSliceOpenCLKernel>); } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc index e0512be8e2..2935580a05 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/to_format.cc @@ -28,7 +28,6 @@ using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::opencl::MemType; -using mindspore::schema::PrimitiveType_ToFormat; namespace mindspore::kernel { @@ -125,6 +124,4 @@ int ToFormatOpenCLKernel::InferShape() { return RET_OK; } -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_ToFormat, OpenCLKernelCreator<ToFormatOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_ToFormat, OpenCLKernelCreator<ToFormatOpenCLKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc index f8996d896d..ea39694db7 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.cc @@ -22,53 +22,62 @@ #ifndef PROGRAM_WITH_IL #include "src/runtime/kernel/opencl/cl/transpose.cl.inc" #endif +#include "src/runtime/kernel/opencl/utils.h" using mindspore::kernel::KERNEL_ARCH::kGPU; using mindspore::lite::KernelRegistrar; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; -using mindspore::schema::PrimitiveType_Nchw2Nhwc; -using mindspore::schema::PrimitiveType_Nhwc2Nchw; using mindspore::schema::PrimitiveType_Transpose; namespace mindspore::kernel { int TransposeOpenCLKernel::CheckSpecs() { - if ((in_tensors_.size() != 1 && in_tensors_.size() != 2) || out_tensors_.size() != 1) { + if (in_tensors_.size() != 2 || out_tensors_.size() != 1) { MS_LOG(ERROR) << "Transpose input output size unsupported."; return RET_ERROR; } - tensor_size_ = GpuTensorInfo(out_tensors_[0]); - if (tensor_size_.NDim > 4) { + int in_ndim = in_tensors_.at(0)->shape().size(); + int out_ndim = out_tensors_.at(0)->shape().size(); + if (in_ndim != out_ndim) { + MS_LOG(ERROR) << "Transpose only support in_ndim equal to out_ndim."; + return RET_ERROR; + } + if (in_ndim > 4) { MS_LOG(ERROR) << "Transpose don't support 5d tensor or higher."; return RET_ERROR; } + if (CheckParamLikeTensor("Transpose", "perm", in_tensors_.at(1), kNumberTypeInt32, {in_ndim}) != RET_OK) { + return RET_ERROR; + } return RET_OK; } int TransposeOpenCLKernel::Prepare() { - auto param = reinterpret_cast<TransposeParameter *>(op_parameter_); + tensor_size_ = GpuTensorInfo(out_tensors_.front()); + auto *perm = reinterpret_cast<int32_t *>(in_tensors_.at(1)->data_c()); + int num_axes = in_tensors_.at(1)->shape().at(0); if (tensor_size_.NDim == 2) { - perm_4d_[0] = tensor_size_.AlignAxis(param->perm_[0]); + perm_4d_[0] = tensor_size_.AlignAxis(perm[0]); perm_4d_[1] = 1; perm_4d_[2] = 2; - perm_4d_[3] = tensor_size_.AlignAxis(param->perm_[1]); - if (param->num_axes_ != tensor_size_.NDim) { + perm_4d_[3] = tensor_size_.AlignAxis(perm[1]); + if (num_axes != tensor_size_.NDim) { perm_4d_[0] = 0; perm_4d_[1] = 1; perm_4d_[2] = 2; perm_4d_[3] = 3; } } else if (tensor_size_.NDim == 3) { - perm_4d_[0] = tensor_size_.AlignAxis(param->perm_[0]); + perm_4d_[0] = tensor_size_.AlignAxis(perm[0]); perm_4d_[1] = 1; - perm_4d_[2] = tensor_size_.AlignAxis(param->perm_[1]); - perm_4d_[3] = tensor_size_.AlignAxis(param->perm_[2]); + perm_4d_[2] = tensor_size_.AlignAxis(perm[1]); + perm_4d_[3] = tensor_size_.AlignAxis(perm[2]); } else if (tensor_size_.NDim == 4) { - perm_4d_[0] = tensor_size_.AlignAxis(param->perm_[0]); - perm_4d_[1] = tensor_size_.AlignAxis(param->perm_[1]); - perm_4d_[2] = tensor_size_.AlignAxis(param->perm_[2]); - perm_4d_[3] = tensor_size_.AlignAxis(param->perm_[3]); + perm_4d_[0] = tensor_size_.AlignAxis(perm[0]); + perm_4d_[1] = tensor_size_.AlignAxis(perm[1]); + perm_4d_[2] = tensor_size_.AlignAxis(perm[2]); + perm_4d_[3] = tensor_size_.AlignAxis(perm[3]); } else { perm_4d_[0] = 0; perm_4d_[1] = 1; @@ -161,8 +170,4 @@ int TransposeOpenCLKernel::Run() { REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Transpose, OpenCLKernelCreator<TransposeOpenCLKernel>) REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Transpose, OpenCLKernelCreator<TransposeOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Nhwc2Nchw, OpenCLKernelCreator<TransposeOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Nhwc2Nchw, OpenCLKernelCreator<TransposeOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat32, PrimitiveType_Nchw2Nhwc, OpenCLKernelCreator<TransposeOpenCLKernel>) -REG_KERNEL(kGPU, kNumberTypeFloat16, PrimitiveType_Nchw2Nhwc, OpenCLKernelCreator<TransposeOpenCLKernel>) } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.h index b337a1218e..49e041c2f8 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/transpose.h @@ -40,7 +40,7 @@ class TransposeOpenCLKernel : public OpenCLKernel { private: TransposeType type_{TransposeType::AXIS0312}; - GpuTensorInfo tensor_size_{GpuTensorInfo(nullptr)}; + GpuTensorInfo tensor_size_; int perm_4d_[4]; }; } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/kernel/winograd.h b/mindspore/lite/src/runtime/kernel/opencl/kernel/winograd.h index e174c594ec..e2a48d4c63 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/kernel/winograd.h +++ b/mindspore/lite/src/runtime/kernel/opencl/kernel/winograd.h @@ -26,9 +26,9 @@ namespace mindspore::kernel { class WinogradOpenCLKernel : public Conv2DOpenCLKernel { public: WinogradOpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : Conv2DOpenCLKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : Conv2DOpenCLKernel(parameter, inputs, outputs, ctx) { + use_winograd_ = true; filter_type_ = MemType::BUF; } diff --git a/mindspore/lite/src/runtime/kernel/opencl/opencl_fusion.cc b/mindspore/lite/src/runtime/kernel/opencl/opencl_fusion.cc index 7ae93c0390..8302a2b456 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/opencl_fusion.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/opencl_fusion.cc @@ -213,7 +213,7 @@ inline void MergeRemoveB(LiteKernel *a, LiteKernel *b, std::set<LiteKernel *> *r } } -// Pad + Conv2D +// Pad + Conv2D(no_winograd) // Pad + DepthwiseConv2D // Pad + DeConv2D // Pad + Pooling @@ -221,7 +221,12 @@ template <typename ParamType> void TryMergePadXxx(LiteKernel *node, std::set<LiteKernel *> *removed_set, std::vector<LiteKernel *> *nodes) { MS_ASSERT(node); MS_ASSERT(removed_set); - if (!PredIs(node, schema::PrimitiveType_Pad, nodes)) { + if (node->Type() == schema::PrimitiveType_Conv2DFusion) { + if (reinterpret_cast<Conv2DOpenCLKernel *>(node)->use_winograd_) { + return; + } + } + if (!PredIs(node, schema::PrimitiveType_PadFusion, nodes)) { return; } LiteKernel *pad = node->in_kernels().front(); @@ -242,7 +247,7 @@ void TryMergePadXxx(LiteKernel *node, std::set<LiteKernel *> *removed_set, std:: conv_param->pad_d_ += pad_param->paddings_[3]; conv_param->pad_l_ += pad_param->paddings_[4]; conv_param->pad_r_ += pad_param->paddings_[5]; - + pad->set_in_tensors({pad->in_tensors().front()}); MergeRemoveA(pad, node, removed_set); MS_LOG(DEBUG) << "Merge Pad and " + GetTypeName(node) + " success"; } @@ -251,12 +256,20 @@ void TryMergePadXxx(LiteKernel *node, std::set<LiteKernel *> *removed_set, std:: void TryMergeConvReshape(LiteKernel *reshape, std::set<LiteKernel *> *removed_set, std::vector<LiteKernel *> *nodes) { MS_ASSERT(reshape); MS_ASSERT(removed_set); - if (!PredIs(reshape, schema::PrimitiveType_Conv2D, nodes)) { + if (!PredIs(reshape, schema::PrimitiveType_Conv2DFusion, nodes)) { + return; + } + + // group must be 1 + LiteKernel *conv = reshape->in_kernels().front(); + MS_ASSERT(conv); + auto *param = reinterpret_cast<ConvParameter *>(reinterpret_cast<OpenCLKernel *>(conv)->GetParameter()); + MS_ASSERT(param); + if (param->group_ != 1) { return; } + if (N11C_NC(reshape)) { - LiteKernel *conv = reshape->in_kernels().front(); - MS_ASSERT(conv); MergeRemoveB(conv, reshape, removed_set); MS_LOG(DEBUG) << "Merge Conv2D and Reshape(N11C->NC) success"; } @@ -327,6 +340,16 @@ void TryMergeXxxActivation(LiteKernel *act, std::set<LiteKernel *> *removed_set) LiteKernel *node = act->in_kernels().front(); auto *param = reinterpret_cast<ParamType *>(reinterpret_cast<OpenCLKernel *>(node)->GetParameter()); MS_ASSERT(param); + + // if xxx is conv, group must be 1 + if (node->Type() == schema::PrimitiveType_Conv2DFusion) { + auto *conv_param = reinterpret_cast<ConvParameter *>(param); + if (conv_param->group_ != 1) { + return; + } + } + + // conv/fc must not have act function if (param->act_type_ == ActType_No) { std::string act_name; if (act_param->type_ == ActivationType_RELU) { @@ -346,11 +369,16 @@ void TryMergeXxxActivation(LiteKernel *act, std::set<LiteKernel *> *removed_set) } } -// Conv2D(NO_ACTIVATION) + PReLU(weight is scalar) +// Conv2D(NO_ACTIVATION/no_winograd) + PReLU(weight is scalar) void TryMergeConvPReLU(LiteKernel *prelu, std::set<LiteKernel *> *removed_set, std::vector<LiteKernel *> *nodes) { MS_ASSERT(prelu); MS_ASSERT(removed_set); - if (!PredIs(prelu, schema::PrimitiveType_Conv2D, nodes)) { + if (!PredIs(prelu, schema::PrimitiveType_Conv2DFusion, nodes)) { + return; + } + LiteKernel *conv = prelu->in_kernels().front(); + MS_ASSERT(conv); + if (reinterpret_cast<Conv2DOpenCLKernel *>(conv)->use_winograd_) { return; } @@ -367,10 +395,10 @@ void TryMergeConvPReLU(LiteKernel *prelu, std::set<LiteKernel *> *removed_set, s return; } - LiteKernel *conv = prelu->in_kernels().front(); auto *param = reinterpret_cast<ConvParameter *>(reinterpret_cast<OpenCLKernel *>(conv)->GetParameter()); MS_ASSERT(param); - if (param->act_type_ == ActType_No) { + // group must be 1 & have not act function + if (param->group_ == 1 && param->act_type_ == ActType_No) { param->act_type_ = static_cast<ActType>(ActivationType_LEAKY_RELU); reinterpret_cast<Conv2DOpenCLKernel *>(conv)->alpha_ = *reinterpret_cast<float *>(prelu_weight->data_c()); MergeRemoveB(conv, prelu, removed_set); @@ -441,7 +469,7 @@ int TryFusionConvScaleWeight(LiteKernel *conv_kernel, LiteKernel *scale_kernel) void TryMergeDeconvScale(LiteKernel *scale, std::set<LiteKernel *> *removed_set, std::vector<LiteKernel *> *nodes) { MS_ASSERT(scale); MS_ASSERT(removed_set); - if (!PredIs(scale, schema::PrimitiveType_DeConv2D, nodes)) { + if (!PredIs(scale, schema::PrimitiveType_Conv2dTransposeFusion, nodes)) { return; } LiteKernel *deconv = scale->in_kernels().front(); @@ -486,8 +514,8 @@ void CreateEltwiseKernelReplaceOld(FusionEltwiseParameter *param, LiteKernel *ol MS_ASSERT(old); MS_ASSERT(nodes); MS_ASSERT(removed_set); - auto *eltwise = new (std::nothrow) FusionEltwiseOpenCLKernel(reinterpret_cast<OpParameter *>(param), - old->in_tensors(), old->out_tensors(), nullptr, nullptr); + auto *eltwise = new (std::nothrow) + FusionEltwiseOpenCLKernel(reinterpret_cast<OpParameter *>(param), old->in_tensors(), old->out_tensors(), nullptr); if (eltwise == nullptr) { MS_LOG(ERROR) << "create FusionEltwiseOpenCLKernel error."; return; @@ -561,13 +589,13 @@ int TryMergeEltwiseEltwise(LiteKernel *node, std::set<LiteKernel *> *removed_set void DoSpecificFusion(LiteKernel *node, std::set<LiteKernel *> *removed_set, std::vector<LiteKernel *> *nodes) { switch (node->Type()) { - case schema::PrimitiveType_Conv2D: - case schema::PrimitiveType_DepthwiseConv2D: - case schema::PrimitiveType_DeConv2D: { + case schema::PrimitiveType_Conv2DFusion: + case schema::PrimitiveType_Conv2dTransposeFusion: { TryMergePadXxx<ConvParameter>(node, removed_set, nodes); break; } - case schema::PrimitiveType_Pooling: { + case schema::PrimitiveType_AvgPoolFusion: + case schema::PrimitiveType_MaxPoolFusion: { TryMergePadXxx<PoolingParameter>(node, removed_set, nodes); break; } @@ -583,7 +611,7 @@ void DoSpecificFusion(LiteKernel *node, std::set<LiteKernel *> *removed_set, std case schema::PrimitiveType_Activation: { // try merge Conv2D/FC(without act) + RELU/RELU6/TANH // try merge Arithmetic(without act) + RELU/RELU6 - if (PredIs(node, schema::PrimitiveType_Conv2D, nodes)) { + if (PredIs(node, schema::PrimitiveType_Conv2DFusion, nodes)) { TryMergeXxxActivation<ConvParameter>(node, removed_set); } else if (PredIs(node, schema::PrimitiveType_FullConnection, nodes)) { TryMergeXxxActivation<MatMulParameter>(node, removed_set); @@ -593,11 +621,11 @@ void DoSpecificFusion(LiteKernel *node, std::set<LiteKernel *> *removed_set, std } break; } - case schema::PrimitiveType_PReLU: { + case schema::PrimitiveType_PReLUFusion: { TryMergeConvPReLU(node, removed_set, nodes); break; } - case schema::PrimitiveType_Scale: { + case schema::PrimitiveType_ScaleFusion: { TryMergeDeconvScale(node, removed_set, nodes); break; } diff --git a/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.cc b/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.cc index f923dc7541..ff26eb29c6 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.cc @@ -14,6 +14,7 @@ * limitations under the License. */ +#include <mindspore/lite/src/runtime/infer_manager.h> #include "src/runtime/kernel/opencl/opencl_kernel.h" #include "mindspore/lite/src/dequant.h" @@ -193,14 +194,12 @@ int OpenCLKernel::InferShape() { if (infer_shape_flag_) { return RET_OK; } - if (primitive_ == nullptr) { - return RET_ERROR; - } - (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->set_infer_flag(true); - auto ret = (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->InferShape(in_tensors_, out_tensors_); + op_parameter_->infer_flag_ = true; + auto ret = lite::KernelInferShape(in_tensors_, &out_tensors_, op_parameter_); if (ret != RET_OK) { - (const_cast<mindspore::lite::PrimitiveC *>(primitive_))->set_infer_flag(false); - MS_LOG(ERROR) << "InferShape fail!"; + MS_LOG(ERROR) << "InferShape failed, type: " + << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(Type())); + op_parameter_->infer_flag_ = false; return ret; } infer_shape_flag_ = true; @@ -287,8 +286,8 @@ int OpenCLKernel::Tune() { if (mode == lite::opencl::TuningMode::DEFAULT) { return RET_OK; } - static const std::set<int> FAST_MODE_OPS = {schema::PrimitiveType_Conv2D, schema::PrimitiveType_DepthwiseConv2D, - schema::PrimitiveType_DeConv2D}; + static const std::set<int> FAST_MODE_OPS = {schema::PrimitiveType_Conv2DFusion, + schema::PrimitiveType_Conv2dTransposeFusion}; if (mode == lite::opencl::TuningMode::FAST && FAST_MODE_OPS.find(op_parameter_->type_) == FAST_MODE_OPS.end()) { return RET_OK; } diff --git a/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.h b/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.h index d0a6849303..8d10f28016 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.h +++ b/mindspore/lite/src/runtime/kernel/opencl/opencl_kernel.h @@ -164,22 +164,10 @@ struct BaseTuningParameter { class OpenCLKernel : public LiteKernel { public: OpenCLKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const mindspore::lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) { + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) { ocl_runtime_ = ocl_runtime_wrap_.GetInstance(); - if (primitive != nullptr) { - infer_shape_flag_ = primitive->infer_flag(); - } else { - bool output_shape_setted = true; - for (auto output : outputs) { - if (output->shape().empty() || output->ElementsNum() < 0) { - output_shape_setted = false; - break; - } - } - infer_shape_flag_ = output_shape_setted; - } + infer_shape_flag_ = parameter->infer_flag_; } ~OpenCLKernel() override = default; int AlignGlobalLocal(const std::vector<size_t> &global, const std::vector<size_t> &local); @@ -246,9 +234,8 @@ class OpenCLKernel : public LiteKernel { template <class T> kernel::LiteKernel *OpenCLKernelCreator(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, OpParameter *opParameter, - const lite::InnerContext *ctx, const kernel::KernelKey &desc, - const mindspore::lite::PrimitiveC *primitive) { - auto *kernel = new (std::nothrow) T(reinterpret_cast<OpParameter *>(opParameter), inputs, outputs, ctx, primitive); + const lite::InnerContext *ctx, const kernel::KernelKey &desc) { + auto *kernel = new (std::nothrow) T(reinterpret_cast<OpParameter *>(opParameter), inputs, outputs, ctx); if (kernel == nullptr) { MS_LOG(ERROR) << "kernel " << opParameter->name_ << "is nullptr."; free(opParameter); diff --git a/mindspore/lite/src/runtime/kernel/opencl/opencl_subgraph.cc b/mindspore/lite/src/runtime/kernel/opencl/opencl_subgraph.cc index 778347c265..5095f1a1ce 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/opencl_subgraph.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/opencl_subgraph.cc @@ -20,10 +20,13 @@ #include <string> #include "src/runtime/gpu/opencl/opencl_executor.h" #include "src/runtime/kernel/opencl/utils.h" +#include "src/runtime/kernel/opencl/kernel/to_format.h" #include "include/errorcode.h" #include "src/common/utils.h" +#include "src/common/prim_inner.h" namespace mindspore::kernel { +using mindspore::lite::PRIM_TO_FORMAT; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::opencl::MemType; @@ -136,7 +139,7 @@ int OpenCLSubGraph::GenToFormatOp(const std::vector<lite::Tensor *> &in_tensors, } out_tensors->emplace_back(new_tensor); - KernelKey desc{kGPU, kNumberTypeFloat32, schema::PrimitiveType_ToFormat}; + KernelKey desc{kGPU, kNumberTypeFloat32, PRIM_TO_FORMAT}; if (mem_type == MemType::IMG && ocl_runtime_->GetFp16Enable()) { desc.data_type = kNumberTypeFloat16; new_tensor->set_data_type(kNumberTypeFloat16); @@ -149,18 +152,26 @@ int OpenCLSubGraph::GenToFormatOp(const std::vector<lite::Tensor *> &in_tensors, new_tensor = nullptr; return RET_ERROR; } - parameter->op_parameter.type_ = mindspore::schema::PrimitiveType_ToFormat; + parameter->op_parameter.type_ = PRIM_TO_FORMAT; + bool output_shape_setted = true; + for (auto output : *out_tensors) { + if (output->shape().empty() || output->ElementsNum() < 0) { + output_shape_setted = false; + break; + } + } + parameter->op_parameter.infer_flag_ = output_shape_setted; parameter->src_format = src_format; parameter->dst_format = dst_format; parameter->out_mem_type = mem_type; out_parameters->emplace_back(parameter); LiteKernel *in_convert_op = nullptr; if (mem_type == MemType::IMG) { - in_convert_op = - lite::GetOpenCLKernel({in_tensor}, {new_tensor}, reinterpret_cast<OpParameter *>(parameter), context_, desc); + in_convert_op = OpenCLKernelCreator<ToFormatOpenCLKernel>( + {in_tensor}, {new_tensor}, reinterpret_cast<OpParameter *>(parameter), context_, desc); } else { - in_convert_op = - lite::GetOpenCLKernel({new_tensor}, {in_tensor}, reinterpret_cast<OpParameter *>(parameter), context_, desc); + in_convert_op = OpenCLKernelCreator<ToFormatOpenCLKernel>( + {new_tensor}, {in_tensor}, reinterpret_cast<OpParameter *>(parameter), context_, desc); } MS_ASSERT(in_convert_op); if (in_convert_op == nullptr) { @@ -252,10 +263,10 @@ int OpenCLSubGraph::UpdateTensorDataTypePass() { MS_ASSERT(iv); auto cur_outs = iv->out_tensors(); // if softmax is last kernel, output fp32 tensor - if (iv->Type() == schema::PrimitiveType_SoftMax) { + if (iv->Type() == schema::PrimitiveType_Softmax) { bool last_kernel = true; for (auto k : iv->out_kernels()) { - if (k->Type() != schema::PrimitiveType_ToFormat) { + if (static_cast<int>(k->Type()) != lite::PRIM_TO_FORMAT) { last_kernel = false; break; } diff --git a/mindspore/lite/src/runtime/kernel/opencl/utils.cc b/mindspore/lite/src/runtime/kernel/opencl/utils.cc index 1be45d6357..a3fd9dfcdd 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/utils.cc +++ b/mindspore/lite/src/runtime/kernel/opencl/utils.cc @@ -18,6 +18,7 @@ #include <fstream> #include <algorithm> #include <vector> +#include <map> #include "src/kernel_registry.h" #include "src/common/file_utils.h" @@ -32,7 +33,7 @@ kernel::LiteKernel *GetOpenCLKernel(const std::vector<Tensor *> &in_tensors, con OpParameter *parameter, const InnerContext *ctx, const kernel::KernelKey &key) { auto creator = KernelRegistry::GetInstance()->GetCreator(key); if (creator != nullptr) { - auto kernel = creator(in_tensors, out_tensors, parameter, nullptr, key, nullptr); + auto kernel = creator(in_tensors, out_tensors, parameter, nullptr, key); return kernel; } return nullptr; @@ -41,10 +42,10 @@ kernel::LiteKernel *GetOpenCLKernel(const std::vector<Tensor *> &in_tensors, con namespace mindspore::kernel { -const std::set<schema::PrimitiveType> ArithmeticPrimitives = {schema::PrimitiveType_Mul, - schema::PrimitiveType_Add, - schema::PrimitiveType_Sub, - schema::PrimitiveType_Div, +const std::set<schema::PrimitiveType> ArithmeticPrimitives = {schema::PrimitiveType_MulFusion, + schema::PrimitiveType_AddFusion, + schema::PrimitiveType_SubFusion, + schema::PrimitiveType_DivFusion, schema::PrimitiveType_LogicalAnd, schema::PrimitiveType_LogicalOr, schema::PrimitiveType_Maximum, @@ -63,7 +64,7 @@ const std::set<schema::PrimitiveType> ArithmeticPrimitives = {schema::PrimitiveT const std::set<schema::PrimitiveType> ArithmeticSelfPrimitives = { schema::PrimitiveType_Abs, schema::PrimitiveType_Ceil, schema::PrimitiveType_Cos, - schema::PrimitiveType_Exp, schema::PrimitiveType_Floor, schema::PrimitiveType_Log, + schema::PrimitiveType_ExpFusion, schema::PrimitiveType_Floor, schema::PrimitiveType_Log, schema::PrimitiveType_LogicalNot, schema::PrimitiveType_Round, schema::PrimitiveType_Rsqrt, schema::PrimitiveType_Sin, schema::PrimitiveType_Neg, schema::PrimitiveType_Sqrt, schema::PrimitiveType_Square}; @@ -123,134 +124,76 @@ int GetMaxDivisorStrategy1(int x, int divisor) { } } +std::map<cl_int, std::string> error_infos = { + {CL_SUCCESS, "Success"}, + {CL_DEVICE_NOT_FOUND, "Device not found"}, + {CL_DEVICE_NOT_AVAILABLE, "Device not available"}, + {CL_COMPILER_NOT_AVAILABLE, "Compiler not available"}, + {CL_MEM_OBJECT_ALLOCATION_FAILURE, "Memory object allocation failure"}, + {CL_OUT_OF_RESOURCES, "Out of resources"}, + {CL_OUT_OF_HOST_MEMORY, "Out of host memory"}, + {CL_PROFILING_INFO_NOT_AVAILABLE, "Profiling information not available"}, + {CL_MEM_COPY_OVERLAP, "Memory copy overlap"}, + {CL_IMAGE_FORMAT_MISMATCH, "Image format mismatch"}, + {CL_IMAGE_FORMAT_NOT_SUPPORTED, "Image format not supported"}, + {CL_BUILD_PROGRAM_FAILURE, "Build program failure"}, + {CL_MAP_FAILURE, "Mapping failure"}, + {CL_MISALIGNED_SUB_BUFFER_OFFSET, "Misaligned sub-buffer offset"}, + {CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST, "Execution status error for events in wait list"}, + {CL_COMPILE_PROGRAM_FAILURE, "Compile program failure"}, + {CL_LINKER_NOT_AVAILABLE, "Linker not available"}, + {CL_LINK_PROGRAM_FAILURE, "Link program failure"}, + {CL_DEVICE_PARTITION_FAILED, "Device partition failed"}, + {CL_KERNEL_ARG_INFO_NOT_AVAILABLE, "Kernel argument information not available"}, + {CL_INVALID_VALUE, "Invalid value"}, + {CL_INVALID_DEVICE_TYPE, "Invalid device type"}, + {CL_INVALID_PLATFORM, "Invalid platform"}, + {CL_INVALID_DEVICE, "Invalid device"}, + {CL_INVALID_CONTEXT, "Invalid context"}, + {CL_INVALID_QUEUE_PROPERTIES, "Invalid queue properties"}, + {CL_INVALID_COMMAND_QUEUE, "Invalid command queue"}, + {CL_INVALID_HOST_PTR, "Invalid host pointer"}, + {CL_INVALID_MEM_OBJECT, "Invalid memory object"}, + {CL_INVALID_IMAGE_FORMAT_DESCRIPTOR, "Invalid image format descriptor"}, + {CL_INVALID_IMAGE_SIZE, "Invalid image size"}, + {CL_INVALID_SAMPLER, "Invalid sampler"}, + {CL_INVALID_BINARY, "Invalid binary"}, + {CL_INVALID_BUILD_OPTIONS, "Invalid build options"}, + {CL_INVALID_PROGRAM, "Invalid program"}, + {CL_INVALID_PROGRAM_EXECUTABLE, "Invalid program executable"}, + {CL_INVALID_KERNEL_NAME, "Invalid kernel name"}, + {CL_INVALID_KERNEL_DEFINITION, "Invalid kernel definition"}, + {CL_INVALID_KERNEL, "Invalid kernel"}, + {CL_INVALID_ARG_INDEX, "Invalid argument index"}, + {CL_INVALID_ARG_VALUE, "Invalid argument value"}, + {CL_INVALID_ARG_SIZE, "Invalid argument size"}, + {CL_INVALID_KERNEL_ARGS, "Invalid kernel arguments"}, + {CL_INVALID_WORK_DIMENSION, "Invalid work dimension"}, + {CL_INVALID_WORK_GROUP_SIZE, "Invalid work group size"}, + {CL_INVALID_WORK_ITEM_SIZE, "Invalid work item size"}, + {CL_INVALID_GLOBAL_OFFSET, "Invalid global offset"}, + {CL_INVALID_EVENT_WAIT_LIST, "Invalid event wait list"}, + {CL_INVALID_EVENT, "Invalid event"}, + {CL_INVALID_OPERATION, "Invalid operation"}, + {CL_INVALID_GL_OBJECT, "Invalid GL object"}, + {CL_INVALID_BUFFER_SIZE, "Invalid buffer size"}, + {CL_INVALID_MIP_LEVEL, "Invalid mip-level"}, + {CL_INVALID_GLOBAL_WORK_SIZE, "Invalid global work size"}, + {CL_INVALID_PROPERTY, "Invalid property"}, + {CL_INVALID_IMAGE_DESCRIPTOR, "Invalid image descriptor"}, + {CL_INVALID_COMPILER_OPTIONS, "Invalid compiler options"}, + {CL_INVALID_LINKER_OPTIONS, "Invalid linker options"}, + {CL_INVALID_DEVICE_PARTITION_COUNT, "Invalid device partition count"}, + {CL_INVALID_PIPE_SIZE, "Invalid pipe size"}, + {CL_INVALID_DEVICE_QUEUE, "Invalid device queue"}, + {CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR, "Invalid GL share group reference KHR"}}; + std::string CLErrorCode(cl_int error_code) { - switch (error_code) { - case CL_SUCCESS: - return "Success"; - case CL_DEVICE_NOT_FOUND: - return "Device not found"; - case CL_DEVICE_NOT_AVAILABLE: - return "Device not available"; - case CL_COMPILER_NOT_AVAILABLE: - return "Compiler not available"; - case CL_MEM_OBJECT_ALLOCATION_FAILURE: - return "Memory object allocation failure"; - case CL_OUT_OF_RESOURCES: - return "Out of resources"; - case CL_OUT_OF_HOST_MEMORY: - return "Out of host memory"; - case CL_PROFILING_INFO_NOT_AVAILABLE: - return "Profiling information not available"; - case CL_MEM_COPY_OVERLAP: - return "Memory copy overlap"; - case CL_IMAGE_FORMAT_MISMATCH: - return "Image format mismatch"; - case CL_IMAGE_FORMAT_NOT_SUPPORTED: - return "Image format not supported"; - case CL_BUILD_PROGRAM_FAILURE: - return "Build program failure"; - case CL_MAP_FAILURE: - return "Mapping failure"; - case CL_MISALIGNED_SUB_BUFFER_OFFSET: - return "Misaligned sub-buffer offset"; - case CL_EXEC_STATUS_ERROR_FOR_EVENTS_IN_WAIT_LIST: - return "Execution status error for events in wait list"; - case CL_COMPILE_PROGRAM_FAILURE: - return "Compile program failure"; - case CL_LINKER_NOT_AVAILABLE: - return "Linker not available"; - case CL_LINK_PROGRAM_FAILURE: - return "Link program failure"; - case CL_DEVICE_PARTITION_FAILED: - return "Device partition failed"; - case CL_KERNEL_ARG_INFO_NOT_AVAILABLE: - return "Kernel argument information not available"; - case CL_INVALID_VALUE: - return "Invalid value"; - case CL_INVALID_DEVICE_TYPE: - return "Invalid device type"; - case CL_INVALID_PLATFORM: - return "Invalid platform"; - case CL_INVALID_DEVICE: - return "Invalid device"; - case CL_INVALID_CONTEXT: - return "Invalid context"; - case CL_INVALID_QUEUE_PROPERTIES: - return "Invalid queue properties"; - case CL_INVALID_COMMAND_QUEUE: - return "Invalid command queue"; - case CL_INVALID_HOST_PTR: - return "Invalid host pointer"; - case CL_INVALID_MEM_OBJECT: - return "Invalid memory object"; - case CL_INVALID_IMAGE_FORMAT_DESCRIPTOR: - return "Invalid image format descriptor"; - case CL_INVALID_IMAGE_SIZE: - return "Invalid image size"; - case CL_INVALID_SAMPLER: - return "Invalid sampler"; - case CL_INVALID_BINARY: - return "Invalid binary"; - case CL_INVALID_BUILD_OPTIONS: - return "Invalid build options"; - case CL_INVALID_PROGRAM: - return "Invalid program"; - case CL_INVALID_PROGRAM_EXECUTABLE: - return "Invalid program executable"; - case CL_INVALID_KERNEL_NAME: - return "Invalid kernel name"; - case CL_INVALID_KERNEL_DEFINITION: - return "Invalid kernel definition"; - case CL_INVALID_KERNEL: - return "Invalid kernel"; - case CL_INVALID_ARG_INDEX: - return "Invalid argument index"; - case CL_INVALID_ARG_VALUE: - return "Invalid argument value"; - case CL_INVALID_ARG_SIZE: - return "Invalid argument size"; - case CL_INVALID_KERNEL_ARGS: - return "Invalid kernel arguments"; - case CL_INVALID_WORK_DIMENSION: - return "Invalid work dimension"; - case CL_INVALID_WORK_GROUP_SIZE: - return "Invalid work group size"; - case CL_INVALID_WORK_ITEM_SIZE: - return "Invalid work item size"; - case CL_INVALID_GLOBAL_OFFSET: - return "Invalid global offset"; - case CL_INVALID_EVENT_WAIT_LIST: - return "Invalid event wait list"; - case CL_INVALID_EVENT: - return "Invalid event"; - case CL_INVALID_OPERATION: - return "Invalid operation"; - case CL_INVALID_GL_OBJECT: - return "Invalid GL object"; - case CL_INVALID_BUFFER_SIZE: - return "Invalid buffer size"; - case CL_INVALID_MIP_LEVEL: - return "Invalid mip-level"; - case CL_INVALID_GLOBAL_WORK_SIZE: - return "Invalid global work size"; - case CL_INVALID_PROPERTY: - return "Invalid property"; - case CL_INVALID_IMAGE_DESCRIPTOR: - return "Invalid image descriptor"; - case CL_INVALID_COMPILER_OPTIONS: - return "Invalid compiler options"; - case CL_INVALID_LINKER_OPTIONS: - return "Invalid linker options"; - case CL_INVALID_DEVICE_PARTITION_COUNT: - return "Invalid device partition count"; - case CL_INVALID_PIPE_SIZE: - return "Invalid pipe size"; - case CL_INVALID_DEVICE_QUEUE: - return "Invalid device queue"; - case CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR: - return "Invalid GL share group reference KHR"; - default: - return "Unknown OpenCL error code"; + auto it = error_infos.find(error_code); + if (it == error_infos.end()) { + return "Unknown OpenCL error code"; + } else { + return it->second; } } @@ -322,4 +265,35 @@ void PackNHWCToNHWC4(void *src, void *dst, bool src_is_fp16, bool dst_is_fp16, c } } +int CheckParamLikeTensor(const std::string &kernel_name, const std::string &tensor_name, lite::Tensor *tensor, + TypeId expect_data_type, const std::vector<int> &expect_shape) { + if (!tensor->IsConst()) { + MS_LOG(ERROR) << "in " << kernel_name << ": tensor " << tensor_name << " must be Const."; + return RET_ERROR; + } + if (tensor->data_type() != expect_data_type) { + MS_LOG(ERROR) << "in " << kernel_name << ": tensor's data_type must be " << expect_data_type; + return RET_ERROR; + } + if (tensor->shape() != expect_shape) { + std::string expect_shape_str = "("; + for (auto i : expect_shape) { + expect_shape_str += std::to_string(i) + ","; + } + expect_shape_str += ")"; + + std::string tensor_shape_str = "("; + for (auto i : tensor->shape()) { + tensor_shape_str += std::to_string(i) + ","; + } + tensor_shape_str += ")"; + + MS_LOG(ERROR) << "in " << kernel_name + << ": tensor's shape is error. expect_shape: " + expect_shape_str + + " tensor->shape(): " + tensor_shape_str; + return RET_ERROR; + } + return RET_OK; +} + } // namespace mindspore::kernel diff --git a/mindspore/lite/src/runtime/kernel/opencl/utils.h b/mindspore/lite/src/runtime/kernel/opencl/utils.h index 680355bc31..7e4ce5822c 100644 --- a/mindspore/lite/src/runtime/kernel/opencl/utils.h +++ b/mindspore/lite/src/runtime/kernel/opencl/utils.h @@ -60,6 +60,9 @@ int GetBroadcastGpuAxis(int ndim, int ori_axis); void PackNHWCToNHWC4(void *src, void *dst, bool src_is_fp16, bool dst_is_fp16, const GpuTensorInfo &tensor); +int CheckParamLikeTensor(const std::string &kernel_name, const std::string &tensor_name, lite::Tensor *tensor, + TypeId expect_data_type, const std::vector<int> &expect_shape); + template <class T1, class T2> void PackNCHWToNC4HW4(void *src, void *dst, int batch, int plane_in, int plane_out, int channel, const std::function<T2(T1)> &to_dtype) { diff --git a/mindspore/lite/src/scheduler.cc b/mindspore/lite/src/scheduler.cc index 9e022daa2d..810e96c54b 100644 --- a/mindspore/lite/src/scheduler.cc +++ b/mindspore/lite/src/scheduler.cc @@ -21,12 +21,15 @@ #include <vector> #include <algorithm> #include "src/tensorlist.h" -#include "src/ops/partial.h" #include "include/errorcode.h" #include "src/common/graph_util.h" #include "src/common/utils.h" #include "src/kernel_registry.h" #include "src/sub_graph_kernel.h" +#include "src/ops/populate/populate_register.h" +#include "src/common/version_manager.h" +#include "src/common/prim_util.h" +#include "src/runtime/infer_manager.h" #include "src/dequant.h" #if GPU_OPENCL #include "src/runtime/kernel/opencl/opencl_subgraph.h" @@ -44,7 +47,9 @@ namespace mindspore::lite { using kernel::KERNEL_ARCH::kCPU; using kernel::KERNEL_ARCH::kGPU; using kernel::KERNEL_ARCH::kNPU; +namespace { constexpr int kMainSubGraphIndex = 0; +} // namespace int Scheduler::Schedule(std::vector<kernel::LiteKernel *> *dst_kernels) { if (src_model_ == nullptr) { @@ -64,6 +69,7 @@ int Scheduler::Schedule(std::vector<kernel::LiteKernel *> *dst_kernels) { return ret; } ret = ScheduleSubGraphToKernels(kMainSubGraphIndex, dst_kernels, nullptr, nullptr); + op_parameters_.clear(); if (ret != RET_OK) { MS_LOG(ERROR) << "Schedule main subgraph to kernels failed."; return ret; @@ -104,7 +110,7 @@ int Scheduler::InferNodeShape(const lite::Model::Node *node, bool *infer_shape_i MS_ASSERT(infer_shape_interrupt != nullptr); auto primitive = node->primitive_; MS_ASSERT(primitive != nullptr); - if (primitive->Type() == schema::PrimitiveType_Partial) { + if (IsPartialNode(primitive)) { return InferPartialShape(node, infer_shape_interrupt); } std::vector<Tensor *> inputs; @@ -117,10 +123,23 @@ int Scheduler::InferNodeShape(const lite::Model::Node *node, bool *infer_shape_i if (!infer_valid) { *infer_shape_interrupt = true; } - primitive->set_infer_flag(!(*infer_shape_interrupt)); - auto ret = primitive->InferShape(inputs, outputs); + int schema_version = VersionManager::GetInstance()->GetSchemaVersion(); + auto parame_gen = + PopulateRegistry::GetInstance()->GetParameterCreator(GetPrimitiveType(node->primitive_), schema_version); + if (parame_gen == nullptr) { + MS_LOG(ERROR) << "parameter generator is nullptr."; + return RET_NULL_PTR; + } + auto parameter = parame_gen(primitive); + if (parameter == nullptr) { + MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " << PrimitiveTypeName(GetPrimitiveType(primitive)); + return RET_ERROR; + } + op_parameters_[node->output_indices_.at(0)] = parameter; + parameter->infer_flag_ = !(*infer_shape_interrupt); + auto ret = KernelInferShape(inputs, &outputs, parameter); if (ret == RET_INFER_INVALID) { - primitive->set_infer_flag(false); + parameter->infer_flag_ = false; *infer_shape_interrupt = true; } if (ret == RET_OK) { @@ -138,14 +157,11 @@ int Scheduler::InferPartialShape(const lite::Model::Node *node, bool *infer_shap MS_ASSERT(src_model_ != nullptr); MS_ASSERT(node != nullptr); MS_ASSERT(infer_shape_interrupt != nullptr); - auto primitive = node->primitive_; - MS_ASSERT(primitive != nullptr); - if (primitive->Type() != schema::PrimitiveType_Partial) { + if (!IsPartialNode(node->primitive_)) { MS_LOG(ERROR) << "Node is not a partial"; return RET_PARAM_INVALID; } - auto partial_primitive = reinterpret_cast<lite::Partial *>(node->primitive_); - return InferSubGraphShape(partial_primitive->GetSubGraphIndex(), infer_shape_interrupt); + return InferSubGraphShape(GetPartialGraphIndex(node->primitive_), infer_shape_interrupt); } int Scheduler::InferSubGraphShape(size_t subgraph_index, bool *infer_shape_interrupt) { @@ -162,16 +178,14 @@ int Scheduler::InferSubGraphShape(size_t subgraph_index, bool *infer_shape_inter MS_LOG(ERROR) << "Op " << node->name_ << " should exist in model!"; return RET_ERROR; } + auto type = GetPrimitiveType(primitive); auto ret = InferNodeShape(node, infer_shape_interrupt); if (ret == RET_INFER_INVALID) { - MS_LOG(INFO) << "InferShape interrupted, name: " << node->name_ - << ", type: " << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type())) + MS_LOG(INFO) << "InferShape interrupted, name: " << node->name_ << ", type: " << PrimitiveTypeName(type) << ", set infer flag to false."; - primitive->set_infer_flag(false); *infer_shape_interrupt = true; } else if (ret != RET_OK) { - MS_LOG(ERROR) << "InferShape failed, name: " << node->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type())); + MS_LOG(ERROR) << "InferShape failed, name: " << node->name_ << ", type: " << PrimitiveTypeName(type); return RET_INFER_ERR; } } @@ -179,31 +193,45 @@ int Scheduler::InferSubGraphShape(size_t subgraph_index, bool *infer_shape_inter } kernel::LiteKernel *Scheduler::FindBackendKernel(const std::vector<Tensor *> &in_tensors, - const std::vector<Tensor *> &out_tensors, - const mindspore::lite::PrimitiveC *primitive, - const Model::Node *node) { - MS_ASSERT(primitive != nullptr); + const std::vector<Tensor *> &out_tensors, const Model::Node *node) { + kernel::LiteKernel *kernel = nullptr; TypeId data_type = GetFirstFp32Fp16OrInt8Type(in_tensors); + OpParameter *op_parameter = op_parameters_[node->output_indices_.at(0)]; + if (op_parameter == nullptr) { + MS_LOG(ERROR) << "Can not find OpParameter!type: " << PrimitiveTypeName(GetPrimitiveType(node->primitive_)); + return nullptr; + } + bool infer_shape_interrupt = !op_parameter->infer_flag_; bool need_restore = true; - if (primitive->quant_type() == schema::QuantType_WeightQuant) { + if (node->quant_type_ == schema::QuantType_WeightQuant) { data_type = kNumberTypeFloat32; } - if (!IsPackedOp((schema::PrimitiveType)primitive->Type())) { + if (!IsPackedOp(op_parameter->type_)) { need_restore = false; } - kernel::KernelKey desc{kCPU, data_type, static_cast<schema::PrimitiveType>(primitive->Type())}; + kernel::KernelKey desc{kCPU, data_type, static_cast<schema::PrimitiveType>(op_parameter->type_)}; #if SUPPORT_GPU if (context_->IsGpuEnabled()) { // support more data type like int32 kernel::KernelKey gpu_desc{kGPU, kNumberTypeFloat32, desc.type}; if (context_->IsGpuFloat16Enabled()) gpu_desc.data_type = kNumberTypeFloat16; - auto *kernel = KernelRegistry::GetInstance()->GetKernel(in_tensors, out_tensors, primitive, context_, gpu_desc); - if (kernel != nullptr) { - MS_LOG(DEBUG) << "Get gpu op success: " << schema::EnumNamePrimitiveType(gpu_desc.type) << " " << node->name_; + auto ret = + KernelRegistry::GetInstance()->GetKernel(in_tensors, out_tensors, context_, gpu_desc, op_parameter, &kernel); + if (ret == RET_OK) { + MS_LOG(DEBUG) << "Get gpu op success: " << PrimitiveCurVersionTypeName(gpu_desc.type) << " " << node->name_; return kernel; } else { - MS_LOG(DEBUG) << "Get gpu op failed, scheduler to cpu: " << schema::EnumNamePrimitiveType(gpu_desc.type) << " " + MS_LOG(DEBUG) << "Get gpu op failed, scheduler to cpu: " << PrimitiveCurVersionTypeName(gpu_desc.type) << " " << node->name_; + if (ret == RET_ERROR) { + ret = InferNodeShape(node, &infer_shape_interrupt); + if (ret == RET_INFER_INVALID || ret == RET_OK) { + op_parameter = op_parameters_[node->output_indices_.at(0)]; + } else { + MS_LOG(ERROR) << "Try repeat infer fail: " << node->name_; + return nullptr; + } + } } } #endif @@ -218,13 +246,23 @@ kernel::LiteKernel *Scheduler::FindBackendKernel(const std::vector<Tensor *> &in } } kernel::KernelKey npu_desc{kNPU, desc.data_type, desc.type}; - auto *kernel = KernelRegistry::GetInstance()->GetKernel(in_tensors, out_tensors, primitive, context_, npu_desc); - if (kernel != nullptr) { - MS_LOG(DEBUG) << "Get npu op success: " << schema::EnumNamePrimitiveType(npu_desc.type) << " " << node->name_; + auto ret = + KernelRegistry::GetInstance()->GetKernel(in_tensors, out_tensors, context_, npu_desc, op_parameter, &kernel); + if (ret == RET_OK) { + MS_LOG(DEBUG) << "Get npu op success: " << PrimitiveCurVersionTypeName(npu_desc.type) << " " << node->name_; return kernel; } else { - MS_LOG(DEBUG) << "Get npu op failed, scheduler to cpu: " << schema::EnumNamePrimitiveType(npu_desc.type) << " " + MS_LOG(DEBUG) << "Get npu op failed, scheduler to cpu: " << PrimitiveCurVersionTypeName(npu_desc.type) << " " << node->name_; + if (ret == RET_ERROR) { + ret = InferNodeShape(node, &infer_shape_interrupt); + if (ret == RET_INFER_INVALID || ret == RET_OK) { + op_parameter = op_parameters_[node->output_indices_.at(0)]; + } else { + MS_LOG(ERROR) << "Try repeat infer fail: " << node->name_; + return nullptr; + } + } } } #endif @@ -232,25 +270,41 @@ kernel::LiteKernel *Scheduler::FindBackendKernel(const std::vector<Tensor *> &in ((context_->IsCpuFloat16Enabled() && data_type == kNumberTypeFloat32) || data_type == kNumberTypeFloat16)) { kernel::KernelKey fp16_cpu_desc{desc.arch, kNumberTypeFloat16, desc.type}; auto tensor_origin_data_map = - DequantUtil::DequantTensor(primitive, in_tensors, fp16_cpu_desc.data_type, need_restore); - auto *kernel = - KernelRegistry::GetInstance()->GetKernel(in_tensors, out_tensors, primitive, context_, fp16_cpu_desc); + DequantUtil::DequantTensor(op_parameter, in_tensors, fp16_cpu_desc.data_type, need_restore); + auto ret = + KernelRegistry::GetInstance()->GetKernel(in_tensors, out_tensors, context_, fp16_cpu_desc, op_parameter, &kernel); DequantUtil::RestoreTensorData(tensor_origin_data_map); - if (kernel != nullptr) { - MS_LOG(DEBUG) << "Get fp16 op success: " << schema::EnumNamePrimitiveType(fp16_cpu_desc.type) << " " - << node->name_; + if (ret == RET_OK) { + MS_LOG(DEBUG) << "Get fp16 op success: " << PrimitiveCurVersionTypeName(fp16_cpu_desc.type) << " " << node->name_; return kernel; + } else { + MS_LOG(DEBUG) << "Get fp16 op failed, scheduler to cpu: " << PrimitiveCurVersionTypeName(fp16_cpu_desc.type) + << " " << node->name_; + if (ret == RET_ERROR) { + ret = InferNodeShape(node, &infer_shape_interrupt); + if (ret == RET_INFER_INVALID || ret == RET_OK) { + op_parameter = op_parameters_[node->output_indices_.at(0)]; + } else { + MS_LOG(ERROR) << "Try repeat infer fail: " << node->name_; + return nullptr; + } + } } } if (data_type == kNumberTypeFloat16) { MS_LOG(DEBUG) << "Get fp16 op failed, back to fp32 op."; desc.data_type = kNumberTypeFloat32; } - auto tensor_origin_data_map = DequantUtil::DequantTensor(primitive, in_tensors, desc.data_type, need_restore); - auto *kernel = KernelRegistry::GetInstance()->GetKernel(in_tensors, out_tensors, primitive, context_, desc); + auto tensor_origin_data_map = DequantUtil::DequantTensor(op_parameter, in_tensors, desc.data_type, need_restore); + auto ret = KernelRegistry::GetInstance()->GetKernel(in_tensors, out_tensors, context_, desc, op_parameter, &kernel); DequantUtil::RestoreTensorData(tensor_origin_data_map); - if (kernel != nullptr) { + if (ret == RET_OK) { return kernel; + } else if (ret == RET_ERROR) { + ret = InferNodeShape(node, &infer_shape_interrupt); + if (!(ret == RET_INFER_INVALID || ret == RET_OK)) { + MS_LOG(ERROR) << "Try repeat infer fail: " << node->name_; + } } return nullptr; } @@ -260,11 +314,10 @@ kernel::LiteKernel *Scheduler::SchedulePartialToKernel(const lite::Model::Node * MS_ASSERT(src_node != nullptr); auto *primitive = src_node->primitive_; MS_ASSERT(primitive != nullptr); - if (primitive->Type() != schema::PrimitiveType_Partial) { + if (!IsPartialNode(primitive)) { return nullptr; } - auto partial_primitive = reinterpret_cast<lite::Partial *>(primitive); - auto sub_graph_index = partial_primitive->GetSubGraphIndex(); + auto sub_graph_index = GetPartialGraphIndex(src_node->primitive_); std::vector<kernel::LiteKernel *> sub_kernels; std::vector<lite::Tensor *> in_tensors; std::vector<lite::Tensor *> out_tensors; @@ -280,15 +333,13 @@ kernel::LiteKernel *Scheduler::SchedulePartialToKernel(const lite::Model::Node * } kernel::LiteKernel *Scheduler::ScheduleNodeToKernel(const lite::Model::Node *src_node) { - auto *primitive = src_node->primitive_; - MS_ASSERT(primitive != nullptr); std::vector<Tensor *> inputs; std::vector<Tensor *> outputs; FindNodeInoutTensors(*src_node, &inputs, &outputs); - auto *kernel = this->FindBackendKernel(inputs, outputs, primitive, src_node); + auto *kernel = this->FindBackendKernel(inputs, outputs, src_node); if (kernel == nullptr) { MS_LOG(ERROR) << "FindBackendKernel return nullptr, name: " << src_node->name_ - << ", type: " << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type())); + << ", type: " << PrimitiveTypeName(GetPrimitiveType(src_node->primitive_)); return nullptr; } SetKernelTensorDataType(kernel); @@ -311,14 +362,15 @@ int Scheduler::ScheduleSubGraphToKernels(size_t subgraph_index, std::vector<kern auto *primitive = node->primitive_; MS_ASSERT(primitive != nullptr); kernel::LiteKernel *kernel = nullptr; - if (primitive->Type() == schema::PrimitiveType_Partial) { // sub_graph + auto prim_type = GetPrimitiveType(primitive); + if (IsPartialNode(primitive)) { // sub_graph kernel = SchedulePartialToKernel(node); } else { // kernel kernel = ScheduleNodeToKernel(node); } if (kernel == nullptr) { - MS_LOG(ERROR) << "FindBackendKernel return nullptr, name: " << node->name_ << ", type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type())); + MS_LOG(ERROR) << "FindBackendKernel return nullptr, name: " << node->name_ + << ", type: " << PrimitiveTypeName(prim_type); return RET_ERROR; } kernel->set_is_model_output(IsContain(graph_output_node_indexes_, size_t(node_index))); diff --git a/mindspore/lite/src/scheduler.h b/mindspore/lite/src/scheduler.h index bd5c9fac17..ebf0fbe28b 100644 --- a/mindspore/lite/src/scheduler.h +++ b/mindspore/lite/src/scheduler.h @@ -23,7 +23,6 @@ #include "src/sub_graph_kernel.h" #include "src/inner_context.h" #include "include/model.h" -#include "src/ops/primitive_c.h" namespace mindspore::lite { class Scheduler { @@ -46,8 +45,7 @@ class Scheduler { // schedule a node to kernel according to context and kernels registered kernel::LiteKernel *FindBackendKernel(const std::vector<Tensor *> &in_tensors, - const std::vector<Tensor *> &out_tensors, - const mindspore::lite::PrimitiveC *primitive, const Model::Node *node); + const std::vector<Tensor *> &out_tensors, const Model::Node *node); // schedule a partial node to a subgraph_kernel kernel::LiteKernel *SchedulePartialToKernel(const lite::Model::Node *src_node); // schedule a node to a kernel @@ -87,6 +85,7 @@ class Scheduler { Model *src_model_ = nullptr; std::vector<Tensor *> *src_tensors_; std::vector<size_t> graph_output_node_indexes_; + std::map<int, OpParameter *> op_parameters_; }; } // namespace mindspore::lite diff --git a/mindspore/lite/src/sub_graph_kernel.cc b/mindspore/lite/src/sub_graph_kernel.cc index bb1cc7efe8..b1452faf73 100644 --- a/mindspore/lite/src/sub_graph_kernel.cc +++ b/mindspore/lite/src/sub_graph_kernel.cc @@ -20,6 +20,8 @@ #if defined(ENABLE_ARM64) && defined(ENABLE_FP16) #include "src/runtime/kernel/arm/fp16/fp16_op_handler.h" #endif +#include "src/common/version_manager.h" +#include "src/runtime/infer_manager.h" namespace mindspore::kernel { using mindspore::lite::RET_ERROR; @@ -107,9 +109,9 @@ int SubGraphKernel::ReSize(bool is_interrupt) { MS_LOG(ERROR) << "all nodes in should be kernel"; return RET_ERROR; } - auto primitive = const_cast<mindspore::lite::PrimitiveC *>(kernel->GetPrimitive()); - if (primitive == nullptr) { - MS_LOG(ERROR) << "kernel(" << kernel->name() << ")'s primitive is nullptr!"; + auto parameter = kernel->op_parameter(); + if (parameter == nullptr) { + MS_LOG(ERROR) << "kernel(" << kernel->name() << ")'s op_parameter is nullptr!"; return RET_ERROR; } std::vector<lite::Tensor *> inputs = kernel->in_tensors(); @@ -117,17 +119,18 @@ int SubGraphKernel::ReSize(bool is_interrupt) { for (auto &output : outputs) { output->FreeData(); } - primitive->set_infer_flag(!is_interrupt); - auto ret = primitive->InferShape(inputs, outputs); + parameter->infer_flag_ = !is_interrupt; + + auto ret = lite::KernelInferShape(inputs, &outputs, parameter); if (ret == RET_INFER_INVALID) { MS_LOG(INFO) << "InferShape shouldn't be done before runtime, type:" - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type())) + << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(kernel->Type())) << "flag set to false."; - primitive->set_infer_flag(false); + parameter->infer_flag_ = false; is_interrupt = true; } else if (ret != RET_OK) { MS_LOG(ERROR) << "InferShape failed, type: " - << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(primitive->Type())); + << schema::EnumNamePrimitiveType(static_cast<schema::PrimitiveType>(kernel->Type())); return RET_INFER_ERR; } if (!is_interrupt) { diff --git a/mindspore/lite/src/sub_graph_kernel.h b/mindspore/lite/src/sub_graph_kernel.h index a303c7ce37..e291d1e43d 100644 --- a/mindspore/lite/src/sub_graph_kernel.h +++ b/mindspore/lite/src/sub_graph_kernel.h @@ -57,7 +57,7 @@ class SubGraphKernel : public LiteKernel { SubGraphKernel(const std::vector<lite::Tensor *> &inputs, const std::vector<lite::Tensor *> &outputs, std::vector<LiteKernel *> in_kernels, std::vector<LiteKernel *> out_kernels, std::vector<LiteKernel *> nodes, const lite::InnerContext *ctx) - : LiteKernel(nullptr, inputs, outputs, ctx, nullptr), + : LiteKernel(nullptr, inputs, outputs, ctx), nodes_(std::move(nodes)), in_nodes_(std::move(in_kernels)), out_nodes_(std::move(out_kernels)) { diff --git a/mindspore/lite/src/train/loss_kernel.h b/mindspore/lite/src/train/loss_kernel.h index 0df3522a52..37f6f59949 100644 --- a/mindspore/lite/src/train/loss_kernel.h +++ b/mindspore/lite/src/train/loss_kernel.h @@ -23,9 +23,8 @@ class LossKernel : public LiteKernel { public: LossKernel() = default; LossKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const lite::PrimitiveC *primitive) - : LiteKernel(parameter, inputs, outputs, ctx, primitive) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx) + : LiteKernel(parameter, inputs, outputs, ctx) {} ~LossKernel() = default; }; diff --git a/mindspore/lite/src/train/optimizer_kernel.h b/mindspore/lite/src/train/optimizer_kernel.h index 308d12ee56..10e0842335 100644 --- a/mindspore/lite/src/train/optimizer_kernel.h +++ b/mindspore/lite/src/train/optimizer_kernel.h @@ -27,9 +27,8 @@ class OptimizerKernel : public LiteKernel { public: OptimizerKernel() = default; OptimizerKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs, - const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, - const lite::PrimitiveC *primitive, int lr_idx, int grad_idx) - : LiteKernel(parameter, inputs, outputs, ctx, primitive), lr_idx_(lr_idx), grad_idx_(grad_idx) {} + const std::vector<lite::Tensor *> &outputs, const lite::InnerContext *ctx, int lr_idx, int grad_idx) + : LiteKernel(parameter, inputs, outputs, ctx), lr_idx_(lr_idx), grad_idx_(grad_idx) {} ~OptimizerKernel() = default; enum class WeightUpdateMode { NORMAL, VIRTUAL_BATCH }; diff --git a/mindspore/lite/src/train/train_loop.h b/mindspore/lite/src/train/train_loop.h index 392452fdd9..b0c04a516b 100644 --- a/mindspore/lite/src/train/train_loop.h +++ b/mindspore/lite/src/train/train_loop.h @@ -20,13 +20,13 @@ #include <tuple> #include <memory> #include <unordered_map> -#include "src/ops/primitive_c.h" #include "include/train/train_loop.h" #include "include/train/metrics.h" #include "include/train_session.h" - +#include "include/errorcode.h" #include "include/datasets.h" #include "include/iterator.h" +#include "src/common/log_adapter.h" namespace mindspore { namespace lite { diff --git a/mindspore/lite/src/train/train_model.cc b/mindspore/lite/src/train/train_model.cc index 7ca498cf70..bab09e3206 100644 --- a/mindspore/lite/src/train/train_model.cc +++ b/mindspore/lite/src/train/train_model.cc @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include "src/ops/primitive_c.h" #include "src/train/train_model.h" #include "src/common/log_adapter.h" #include "include/errorcode.h" diff --git a/mindspore/lite/src/train/train_populate_parameter.cc b/mindspore/lite/src/train/train_populate_parameter.cc index 36216fd690..ebeef7d222 100644 --- a/mindspore/lite/src/train/train_populate_parameter.cc +++ b/mindspore/lite/src/train/train_populate_parameter.cc @@ -13,318 +13,233 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - #include "src/train/train_populate_parameter.h" #include <algorithm> #include "src/ops/populate/populate_register.h" -#include "src/ops/pooling_grad.h" +#include "src/ops/populate/default_populate.h" +#include "src/ops/populate/strided_slice_populate.h" #include "nnacl/pooling_parameter.h" -#include "src/ops/softmax_cross_entropy.h" -#include "src/ops/sparse_softmax_cross_entropy.h" #include "nnacl/fp32_grad/softmax_grad.h" -#include "src/ops/activation_grad.h" #include "nnacl/fp32/activation_fp32.h" -#include "src/ops/conv2d_grad_filter.h" -#include "src/ops/conv2d_grad_input.h" -#include "src/ops/group_conv2d_grad_input.h" #include "nnacl/conv_parameter.h" -#include "src/ops/power_grad.h" #include "nnacl/power_parameter.h" -#include "src/ops/bias_grad.h" #include "nnacl/arithmetic.h" #include "nnacl/fp32_grad/optimizer.h" -#include "src/ops/apply_momentum.h" -#include "src/ops/sgd.h" -#include "src/ops/bn_grad.h" #include "nnacl/fp32_grad/batch_norm.h" -#include "src/ops/adam.h" #include "nnacl/fp32_grad/dropout_parameter.h" -#include "src/ops/dropout.h" -#include "src/ops/dropout_grad.h" -#include "src/ops/arithmetic.h" -#include "src/ops/oneslike.h" -#include "src/ops/binary_cross_entropy.h" -#include "src/ops/binary_cross_entropy_grad.h" -#include "src/ops/smooth_l1_loss.h" -#include "src/ops/smooth_l1_loss_grad.h" #include "nnacl/fp32_grad/smooth_l1_loss.h" -#include "src/ops/arithmetic_grad.h" -#include "src/ops/populate/strided_slice_populate.h" -namespace mindspore::kernel { - -OpParameter *DefaultPopulateParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } - - OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); - if (param == nullptr) { - MS_LOG(ERROR) << "malloc Param for primitive failed."; - return nullptr; - } - - param->type_ = primitive->Type(); - return param; -} -OpParameter *PopulateSmoothL1LossParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } +namespace mindspore::kernel { +OpParameter *PopulateSmoothL1LossParameter(const void *prim) { SmoothL1LossParameter *p = reinterpret_cast<SmoothL1LossParameter *>(malloc(sizeof(SmoothL1LossParameter))); if (p == nullptr) { MS_LOG(ERROR) << "malloc SmoothL1LossParameter failed."; return nullptr; } - p->op_parameter_.type_ = primitive->Type(); - - auto smooth_l1_primitive = - reinterpret_cast<mindspore::lite::SmoothL1Loss *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - - p->beta_ = smooth_l1_primitive->GetBeta(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_SmoothL1Loss(); + p->op_parameter_.type_ = primitive->value_type(); + p->beta_ = value->beta(); return reinterpret_cast<OpParameter *>(p); } -OpParameter *PopulateSmoothL1LossGradParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } +OpParameter *PopulateSmoothL1LossGradParameter(const void *prim) { SmoothL1LossParameter *p = reinterpret_cast<SmoothL1LossParameter *>(malloc(sizeof(SmoothL1LossParameter))); if (p == nullptr) { MS_LOG(ERROR) << "malloc SmoothL1LossParameter failed."; return nullptr; } - p->op_parameter_.type_ = primitive->Type(); - - auto smooth_l1_primitive = - reinterpret_cast<mindspore::lite::SmoothL1LossGrad *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - - p->beta_ = smooth_l1_primitive->GetBeta(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_SmoothL1LossGrad(); + p->op_parameter_.type_ = primitive->value_type(); + p->beta_ = value->beta(); return reinterpret_cast<OpParameter *>(p); } -OpParameter *PopulateApplyMomentumParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } +OpParameter *PopulateApplyMomentumParameter(const void *prim) { ApplyMomentumParameter *p = reinterpret_cast<ApplyMomentumParameter *>(malloc(sizeof(ApplyMomentumParameter))); if (p == nullptr) { MS_LOG(ERROR) << "malloc ApplyMomentumParameter failed."; return nullptr; } - p->op_parameter_.type_ = primitive->Type(); - - auto apply_momentum_primitive = - reinterpret_cast<mindspore::lite::ApplyMomentum *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - - p->grad_scale_ = apply_momentum_primitive->GetGradientScale(); - p->use_nesterov_ = apply_momentum_primitive->GetUseNesterov(); - + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_ApplyMomentum(); + p->op_parameter_.type_ = primitive->value_type(); + p->grad_scale_ = value->gradient_scale(); + p->use_nesterov_ = value->use_nesterov(); return reinterpret_cast<OpParameter *>(p); } -OpParameter *PopulateBCEParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateBCEParameter(const void *prim) { int32_t *reduction = reinterpret_cast<int32_t *>(malloc(sizeof(int32_t))); if (reduction == nullptr) { MS_LOG(ERROR) << "malloc reduction failed."; return nullptr; } - auto param = - reinterpret_cast<mindspore::lite::BinaryCrossEntropy *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - *reduction = param->GetReduction(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_BinaryCrossEntropy(); + *reduction = value->reduction(); return reinterpret_cast<OpParameter *>(reduction); } -OpParameter *PopulateBCEGradParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateBCEGradParameter(const void *prim) { int32_t *reduction = reinterpret_cast<int32_t *>(malloc(sizeof(int32_t))); if (reduction == nullptr) { MS_LOG(ERROR) << "malloc reduction failed."; return nullptr; } - auto param = - reinterpret_cast<mindspore::lite::BinaryCrossEntropyGrad *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - *reduction = param->GetReduction(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_BinaryCrossEntropyGrad(); + *reduction = value->reduction(); return reinterpret_cast<OpParameter *>(reduction); } -OpParameter *PopulateAdamParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } +OpParameter *PopulateAdamParameter(const void *prim) { AdamParameter *p = reinterpret_cast<AdamParameter *>(malloc(sizeof(AdamParameter))); if (p == nullptr) { MS_LOG(ERROR) << "new AdamParameter failed."; return nullptr; } - p->op_parameter_.type_ = primitive->Type(); - - auto apply_momentum_primitive = - reinterpret_cast<mindspore::lite::Adam *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - p->use_nesterov_ = apply_momentum_primitive->GetUseNesterov(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_Adam(); + p->op_parameter_.type_ = primitive->value_type(); + p->use_nesterov_ = value->use_nesterov(); return reinterpret_cast<OpParameter *>(p); } -OpParameter *PopulateSgdParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } +OpParameter *PopulateSgdParameter(const void *prim) { SgdParameter *p = reinterpret_cast<SgdParameter *>(malloc(sizeof(SgdParameter))); if (p == nullptr) { MS_LOG(ERROR) << "malloc SgdParameter failed."; return nullptr; } - p->op_parameter_.type_ = primitive->Type(); - - auto sgd_primitive = reinterpret_cast<mindspore::lite::Sgd *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - - p->weight_decay_ = sgd_primitive->GetWeightDecay(); - p->dampening_ = sgd_primitive->GetDampening(); - p->use_nesterov_ = sgd_primitive->GetUseNesterov(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_SGD(); + p->op_parameter_.type_ = primitive->value_type(); + p->weight_decay_ = value->weight_decay(); + p->dampening_ = value->dampening(); + p->use_nesterov_ = value->nesterov(); return reinterpret_cast<OpParameter *>(p); } -OpParameter *PopulateSparseSoftmaxCrossEntropyParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } +OpParameter *PopulateSparseSoftmaxCrossEntropyParameter(const void *prim) { SoftmaxCrossEntropyParameter *sce_param = reinterpret_cast<SoftmaxCrossEntropyParameter *>(malloc(sizeof(SoftmaxCrossEntropyParameter))); if (sce_param == nullptr) { MS_LOG(ERROR) << "malloc SoftmaxCrossEntropyParameter failed."; return nullptr; } - auto sce_primitive = reinterpret_cast<mindspore::lite::SparseSoftmaxCrossEntropy *>( - const_cast<mindspore::lite::PrimitiveC *>(primitive)); - - sce_param->is_grad = sce_primitive->GetIsGrad(); - - sce_param->op_parameter_.type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_SparseSoftmaxCrossEntropy(); + sce_param->op_parameter_.type_ = primitive->value_type(); + sce_param->is_grad = value->grad(); return reinterpret_cast<OpParameter *>(sce_param); } -OpParameter *PopulateSoftmaxCrossEntropyParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } +OpParameter *PopulateSoftmaxCrossEntropyParameter(const void *prim) { SoftmaxCrossEntropyParameter *sce_param = reinterpret_cast<SoftmaxCrossEntropyParameter *>(malloc(sizeof(SoftmaxCrossEntropyParameter))); if (sce_param == nullptr) { MS_LOG(ERROR) << "malloc SoftmaxCrossEntropyParameter failed."; return nullptr; } + auto primitive = static_cast<const schema::Primitive *>(prim); + sce_param->op_parameter_.type_ = primitive->value_type(); sce_param->is_grad = 0; - sce_param->op_parameter_.type_ = primitive->Type(); return reinterpret_cast<OpParameter *>(sce_param); } -OpParameter *PopulatePoolingGradParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } +OpParameter *PopulateMaxPoolGradParameter(const void *prim) { PoolingParameter *pooling_param = reinterpret_cast<PoolingParameter *>(malloc(sizeof(PoolingParameter))); if (pooling_param == nullptr) { MS_LOG(ERROR) << "malloc PoolingParameter failed."; return nullptr; } - pooling_param->op_parameter_.type_ = primitive->Type(); - auto pooling_primitive = - reinterpret_cast<mindspore::lite::PoolingGrad *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - - pooling_param->global_ = pooling_primitive->GetGlobal(); - pooling_param->window_w_ = pooling_primitive->GetWindowW(); - pooling_param->window_h_ = pooling_primitive->GetWindowH(); - - pooling_param->pad_u_ = pooling_primitive->GetPadUp(); - pooling_param->pad_d_ = pooling_primitive->GetPadDown(); - pooling_param->pad_l_ = pooling_primitive->GetPadLeft(); - pooling_param->pad_r_ = pooling_primitive->GetPadRight(); - pooling_param->stride_w_ = pooling_primitive->GetStrideW(); - pooling_param->stride_h_ = pooling_primitive->GetStrideH(); - - pooling_param->pool_mode_ = PoolMode_No; - pooling_param->round_mode_ = RoundMode_No; + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_MaxPoolGrad(); + pooling_param->op_parameter_.type_ = primitive->value_type(); - switch (pooling_primitive->GetPoolingMode()) { - case schema::PoolMode_MAX_POOLING: - pooling_param->pool_mode_ = PoolMode_MaxPool; - break; - case schema::PoolMode_MEAN_POOLING: - pooling_param->pool_mode_ = PoolMode_AvgPool; - break; - default: - break; - } + pooling_param->global_ = false; + pooling_param->window_w_ = static_cast<int>(value->kernel_size()->Get(1)); + pooling_param->window_h_ = static_cast<int>(value->kernel_size()->Get(0)); - switch (pooling_primitive->GetRoundMode()) { - case schema::RoundMode_FLOOR: - pooling_param->round_mode_ = RoundMode_Floor; - break; - case schema::RoundMode_CEIL: - pooling_param->round_mode_ = RoundMode_Ceil; - break; - default: - break; - } + pooling_param->pad_u_ = 0; + pooling_param->pad_d_ = 0; + pooling_param->pad_l_ = 0; + pooling_param->pad_r_ = 0; + pooling_param->stride_w_ = static_cast<int>(value->strides()->Get(1)); + pooling_param->stride_h_ = static_cast<int>(value->strides()->Get(0)); + + pooling_param->round_mode_ = RoundMode_No; + pooling_param->pool_mode_ = PoolMode_MaxPool; return reinterpret_cast<OpParameter *>(pooling_param); } -OpParameter *PopulateActivationGradParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; +OpParameter *PopulateAvgPoolGradParameter(const void *prim) { + PoolingParameter *pooling_param = reinterpret_cast<PoolingParameter *>(malloc(sizeof(PoolingParameter))); + if (pooling_param == nullptr) { + MS_LOG(ERROR) << "malloc PoolingParameter failed."; return nullptr; } + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_AvgPoolGrad(); + pooling_param->op_parameter_.type_ = primitive->value_type(); + + pooling_param->global_ = false; + pooling_param->window_w_ = static_cast<int>(value->kernel_size()->Get(1)); + pooling_param->window_h_ = static_cast<int>(value->kernel_size()->Get(0)); + + pooling_param->pad_u_ = 0; + pooling_param->pad_d_ = 0; + pooling_param->pad_l_ = 0; + pooling_param->pad_r_ = 0; + pooling_param->stride_w_ = static_cast<int>(value->strides()->Get(1)); + pooling_param->stride_h_ = static_cast<int>(value->strides()->Get(0)); + pooling_param->round_mode_ = RoundMode_No; + pooling_param->pool_mode_ = PoolMode_AvgPool; + return reinterpret_cast<OpParameter *>(pooling_param); +} + +OpParameter *PopulateActivationGradParameter(const void *prim) { ActivationParameter *act_param = reinterpret_cast<ActivationParameter *>(malloc(sizeof(ActivationParameter))); if (act_param == nullptr) { MS_LOG(ERROR) << "malloc ActivationParameter failed."; return nullptr; } - act_param->op_parameter_.type_ = primitive->Type(); - auto activation = - reinterpret_cast<mindspore::lite::ActivationGrad *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - act_param->type_ = static_cast<int>(activation->GetType()); - act_param->alpha_ = activation->GetAlpha(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_ActivationGrad(); + act_param->op_parameter_.type_ = primitive->value_type(); + act_param->type_ = static_cast<int>(value->activation_type()); + act_param->alpha_ = value->alpha(); return reinterpret_cast<OpParameter *>(act_param); } -OpParameter *PopulateConvolutionGradFilterParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } - +OpParameter *PopulateConvolutionGradFilterParameter(const void *prim) { ConvParameter *param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); if (param == nullptr) { MS_LOG(ERROR) << "malloc Param for conv grad filter failed."; return nullptr; } - param->op_parameter_.type_ = primitive->Type(); - - auto convg_primitive = - reinterpret_cast<mindspore::lite::Conv2DGradFilter *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - param->kernel_h_ = convg_primitive->GetKernelH(); - param->kernel_w_ = convg_primitive->GetKernelW(); - param->stride_h_ = convg_primitive->GetStrideH(); - param->stride_w_ = convg_primitive->GetStrideW(); - param->dilation_h_ = convg_primitive->GetDilateH(); - param->dilation_w_ = convg_primitive->GetDilateW(); - param->pad_u_ = convg_primitive->GetPadUp(); - param->pad_d_ = convg_primitive->GetPadDown(); - param->pad_l_ = convg_primitive->GetPadLeft(); - param->pad_r_ = convg_primitive->GetPadRight(); - param->group_ = convg_primitive->GetGroup(); + memset(param, 0, sizeof(ConvParameter)); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_Conv2DBackpropFilterFusion(); + param->op_parameter_.type_ = primitive->value_type(); + + param->kernel_h_ = value->kernel_size()->Get(0); + param->kernel_w_ = value->kernel_size()->Get(1); + param->stride_h_ = value->stride()->Get(0); + param->stride_w_ = value->stride()->Get(1); + param->dilation_h_ = value->dilation()->Get(0); + param->dilation_w_ = value->dilation()->Get(1); + param->pad_u_ = value->pad_list()->Get(0); + param->pad_d_ = value->pad_list()->Get(1); + param->pad_l_ = value->pad_list()->Get(2); + param->pad_r_ = value->pad_list()->Get(3); + param->group_ = value->group(); param->act_type_ = ActType_No; - switch (convg_primitive->GetActivationType()) { + switch (value->activation_type()) { case schema::ActivationType_RELU: param->act_type_ = ActType_Relu; break; @@ -338,75 +253,29 @@ OpParameter *PopulateConvolutionGradFilterParameter(const mindspore::lite::Primi return reinterpret_cast<OpParameter *>(param); } -OpParameter *PopulateConvolutionGradInputParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } - +OpParameter *PopulateConvolutionGradInputParameter(const void *prim) { ConvParameter *param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); if (param == nullptr) { MS_LOG(ERROR) << "malloc Param for conv grad filter failed."; return nullptr; } - param->op_parameter_.type_ = primitive->Type(); - - auto convg_primitive = - reinterpret_cast<mindspore::lite::Conv2DGradInput *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - param->kernel_h_ = convg_primitive->GetKernelH(); - param->kernel_w_ = convg_primitive->GetKernelW(); - param->stride_h_ = convg_primitive->GetStrideH(); - param->stride_w_ = convg_primitive->GetStrideW(); - param->dilation_h_ = convg_primitive->GetDilateH(); - param->dilation_w_ = convg_primitive->GetDilateW(); - param->pad_u_ = convg_primitive->GetPadUp(); - param->pad_d_ = convg_primitive->GetPadDown(); - param->pad_l_ = convg_primitive->GetPadLeft(); - param->pad_r_ = convg_primitive->GetPadRight(); - param->group_ = convg_primitive->GetGroup(); - param->act_type_ = ActType_No; - switch (convg_primitive->GetActivationType()) { - case schema::ActivationType_RELU: - param->act_type_ = ActType_Relu; - break; - case schema::ActivationType_RELU6: - param->act_type_ = ActType_Relu6; - break; - default: - break; - } - - return reinterpret_cast<OpParameter *>(param); -} - -OpParameter *PopulateGroupConvolutionGradInputParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } - - ConvParameter *param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); - if (param == nullptr) { - MS_LOG(ERROR) << "new Param for conv grad filter failed."; - return nullptr; - } - param->op_parameter_.type_ = primitive->Type(); - - auto convg_primitive = - reinterpret_cast<mindspore::lite::GroupConv2DGradInput *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - param->kernel_h_ = convg_primitive->GetKernelH(); - param->kernel_w_ = convg_primitive->GetKernelW(); - param->stride_h_ = convg_primitive->GetStrideH(); - param->stride_w_ = convg_primitive->GetStrideW(); - param->dilation_h_ = convg_primitive->GetDilateH(); - param->dilation_w_ = convg_primitive->GetDilateW(); - param->pad_u_ = convg_primitive->GetPadUp(); - param->pad_d_ = convg_primitive->GetPadDown(); - param->pad_l_ = convg_primitive->GetPadLeft(); - param->pad_r_ = convg_primitive->GetPadRight(); - param->group_ = convg_primitive->GetGroup(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_Conv2DBackpropInputFusion(); + param->op_parameter_.type_ = primitive->value_type(); + + param->kernel_h_ = value->kernel_size()->Get(0); + param->kernel_w_ = value->kernel_size()->Get(1); + param->stride_h_ = value->stride()->Get(0); + param->stride_w_ = value->stride()->Get(1); + param->dilation_h_ = value->dilation()->Get(0); + param->dilation_w_ = value->dilation()->Get(1); + param->pad_u_ = value->pad_list()->Get(0); + param->pad_d_ = value->pad_list()->Get(1); + param->pad_l_ = value->pad_list()->Get(2); + param->pad_r_ = value->pad_list()->Get(3); + param->group_ = value->group(); param->act_type_ = ActType_No; - switch (convg_primitive->GetActivationType()) { + switch (value->activation_type()) { case schema::ActivationType_RELU: param->act_type_ = ActType_Relu; break; @@ -420,68 +289,56 @@ OpParameter *PopulateGroupConvolutionGradInputParameter(const mindspore::lite::P return reinterpret_cast<OpParameter *>(param); } -OpParameter *PopulatePowerGradParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } - +OpParameter *PopulatePowerGradParameter(const void *prim) { PowerParameter *power_param = reinterpret_cast<PowerParameter *>(malloc(sizeof(PowerParameter))); if (power_param == nullptr) { MS_LOG(ERROR) << "malloc PowerParameter failed."; return nullptr; } - power_param->op_parameter_.type_ = primitive->Type(); - auto power = reinterpret_cast<mindspore::lite::PowerGrad *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - power_param->power_ = power->GetPower(); - power_param->scale_ = power->GetScale(); - power_param->shift_ = power->GetShift(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_PowerGrad(); + power_param->op_parameter_.type_ = primitive->value_type(); + power_param->power_ = value->power(); + power_param->scale_ = value->scale(); + power_param->shift_ = value->shift(); return reinterpret_cast<OpParameter *>(power_param); } -OpParameter *PopulateBiasGradParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } - +OpParameter *PopulateBiasGradParameter(const void *prim) { ArithmeticParameter *arithmetic_param = reinterpret_cast<ArithmeticParameter *>(malloc(sizeof(ArithmeticParameter))); if (arithmetic_param == nullptr) { MS_LOG(ERROR) << "malloc ArithmeticParameter failed."; return nullptr; } - arithmetic_param->op_parameter_.type_ = primitive->Type(); + auto primitive = static_cast<const schema::Primitive *>(prim); + arithmetic_param->op_parameter_.type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(arithmetic_param); } -OpParameter *PopulateBNGradParameter(const mindspore::lite::PrimitiveC *primitive) { - if (primitive == nullptr) { - MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; - return nullptr; - } - +OpParameter *PopulateBNGradParameter(const void *prim) { BNGradParameter *bnGrad_param = reinterpret_cast<BNGradParameter *>(malloc(sizeof(BNGradParameter))); if (bnGrad_param == nullptr) { MS_LOG(ERROR) << "malloc BNGradParameter failed."; return nullptr; } - bnGrad_param->op_parameter_.type_ = primitive->Type(); - auto bngrad = reinterpret_cast<mindspore::lite::BNGrad *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - bnGrad_param->epsilon_ = bngrad->GetEps(); - bnGrad_param->momentum_ = bngrad->GetMomentum(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_BatchNormGrad(); + bnGrad_param->op_parameter_.type_ = primitive->value_type(); + bnGrad_param->epsilon_ = value->epsilon(); return reinterpret_cast<OpParameter *>(bnGrad_param); } -OpParameter *PopulateDropoutParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateDropoutParameter(const void *prim) { DropoutParameter *dropout_parameter = reinterpret_cast<DropoutParameter *>(malloc(sizeof(DropoutParameter))); if (dropout_parameter == nullptr) { MS_LOG(ERROR) << "malloc Dropout Parameter failed."; return nullptr; } memset(dropout_parameter, 0, sizeof(DropoutParameter)); - dropout_parameter->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::Dropout *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - dropout_parameter->ratio_ = param->GetRatio(); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_Dropout(); + dropout_parameter->op_parameter_.type_ = primitive->value_type(); + dropout_parameter->ratio_ = value->keep_prob(); if (dropout_parameter->ratio_ < 0.f || dropout_parameter->ratio_ > 1.f) { MS_LOG(ERROR) << "Dropout ratio must be between 0 to 1, got " << dropout_parameter->ratio_; free(dropout_parameter); @@ -490,89 +347,97 @@ OpParameter *PopulateDropoutParameter(const mindspore::lite::PrimitiveC *primiti return reinterpret_cast<OpParameter *>(dropout_parameter); } -OpParameter *PopulateDropoutGradParameter(const mindspore::lite::PrimitiveC *primitive) { - DropoutParameter *dropoutGrad_parameter = reinterpret_cast<DropoutParameter *>(malloc(sizeof(DropoutParameter))); - if (dropoutGrad_parameter == nullptr) { +OpParameter *PopulateDropoutGradParameter(const void *prim) { + DropoutParameter *dropoutgrad_parameter = reinterpret_cast<DropoutParameter *>(malloc(sizeof(DropoutParameter))); + if (dropoutgrad_parameter == nullptr) { MS_LOG(ERROR) << "malloc Dropout Grad Parameter failed."; return nullptr; } - memset(dropoutGrad_parameter, 0, sizeof(DropoutParameter)); - dropoutGrad_parameter->op_parameter_.type_ = primitive->Type(); - auto param = reinterpret_cast<mindspore::lite::DropoutGrad *>(const_cast<mindspore::lite::PrimitiveC *>(primitive)); - dropoutGrad_parameter->ratio_ = param->GetRatio(); - if (dropoutGrad_parameter->ratio_ < 0.f || dropoutGrad_parameter->ratio_ > 1.f) { - MS_LOG(ERROR) << "Dropout Grad ratio must be between 0 to 1, got " << dropoutGrad_parameter->ratio_; - free(dropoutGrad_parameter); + memset(dropoutgrad_parameter, 0, sizeof(DropoutParameter)); + auto primitive = static_cast<const schema::Primitive *>(prim); + auto value = primitive->value_as_DropoutGrad(); + dropoutgrad_parameter->op_parameter_.type_ = primitive->value_type(); + dropoutgrad_parameter->ratio_ = value->keep_prob(); + if (dropoutgrad_parameter->ratio_ < 0.f || dropoutgrad_parameter->ratio_ > 1.f) { + MS_LOG(ERROR) << "Dropout Grad ratio must be between 0 to 1, got " << dropoutgrad_parameter->ratio_; + free(dropoutgrad_parameter); return nullptr; } - return reinterpret_cast<OpParameter *>(dropoutGrad_parameter); + return reinterpret_cast<OpParameter *>(dropoutgrad_parameter); } -OpParameter *PopulateArithmeticGradParameter(const mindspore::lite::PrimitiveC *primitive) { +OpParameter *PopulateArithmeticGradParameter(const void *prim) { ArithmeticParameter *arithmetic_param = reinterpret_cast<ArithmeticParameter *>(malloc(sizeof(ArithmeticParameter))); if (arithmetic_param == nullptr) { MS_LOG(ERROR) << "malloc ArithmeticParameter failed."; return nullptr; } memset(arithmetic_param, 0, sizeof(ArithmeticParameter)); - arithmetic_param->op_parameter_.type_ = primitive->Type(); - arithmetic_param->broadcasting_ = ((lite::ArithmeticGrad *)primitive)->Broadcasting(); - arithmetic_param->ndim_ = ((lite::ArithmeticGrad *)primitive)->NDims(); - - auto shape = ((lite::ArithmeticGrad *)primitive)->x1Shape(); - auto source = static_cast<int *>(shape.data()); - std::copy(source, source + shape.size(), arithmetic_param->in_shape0_); - shape = ((lite::ArithmeticGrad *)primitive)->x2Shape(); - source = static_cast<int *>(shape.data()); - std::copy(source, source + shape.size(), arithmetic_param->in_shape1_); - shape = ((lite::ArithmeticGrad *)primitive)->dyShape(); - source = static_cast<int *>(shape.data()); - std::copy(source, source + shape.size(), arithmetic_param->out_shape_); + auto primitive = static_cast<const schema::Primitive *>(prim); + arithmetic_param->op_parameter_.type_ = primitive->value_type(); return reinterpret_cast<OpParameter *>(arithmetic_param); } void PopulateTrainParameters() { - lite::Registry ApplyMomentumParameterRegistry(schema::PrimitiveType_ApplyMomentum, PopulateApplyMomentumParameter); - lite::Registry BiasGradParameterRegistry(schema::PrimitiveType_BiasGrad, PopulateBiasGradParameter); - lite::Registry SoftmaxCrossEntropyParameterRegistry(schema::PrimitiveType_SoftmaxCrossEntropy, - PopulateSoftmaxCrossEntropyParameter); - lite::Registry SparseSoftmaxCrossEntropyParameterRegistry(schema::PrimitiveType_SparseSoftmaxCrossEntropy, - PopulateSparseSoftmaxCrossEntropyParameter); - lite::Registry ActivationParameterRegistry(schema::PrimitiveType_ActivationGrad, PopulateActivationGradParameter); - lite::Registry TupleGetItemParameterRegistry(schema::PrimitiveType_TupleGetItem, DefaultPopulateParameter); - lite::Registry DependParameterRegistry(schema::PrimitiveType_Depend, DefaultPopulateParameter); - lite::Registry Conv2DGradFilterParameterRegistry(schema::PrimitiveType_Conv2DGradFilter, - PopulateConvolutionGradFilterParameter); - lite::Registry Conv2DGradInputParameterRegistry(schema::PrimitiveType_Conv2DGradInput, - PopulateConvolutionGradInputParameter); - lite::Registry GroupConv2DGradInputParameterRegistry(schema::PrimitiveType_GroupConv2DGradInput, - PopulateGroupConvolutionGradInputParameter); - lite::Registry PoolingParameterRegistry(schema::PrimitiveType_PoolingGrad, PopulatePoolingGradParameter); - lite::Registry PowerGradParameterRegistry(schema::PrimitiveType_PowerGrad, PopulatePowerGradParameter); - lite::Registry SgdParameterRegistry(schema::PrimitiveType_Sgd, PopulateSgdParameter); - lite::Registry BNGradParameterRegistry(schema::PrimitiveType_BNGrad, PopulateBNGradParameter); - lite::Registry AdamParameterRegistry(schema::PrimitiveType_Adam, PopulateAdamParameter); - lite::Registry AssignParameterRegistry(schema::PrimitiveType_Assign, DefaultPopulateParameter); - lite::Registry AssignAddParameterRegistry(schema::PrimitiveType_AssignAdd, DefaultPopulateParameter); - lite::Registry BinaryCrossEntropyParameterRegistry(schema::PrimitiveType_BinaryCrossEntropy, PopulateBCEParameter); + lite::Registry ApplyMomentumParameterRegistry(schema::PrimitiveType_ApplyMomentum, PopulateApplyMomentumParameter, + lite::SCHEMA_CUR); + lite::Registry BiasGradParameterRegistry(schema::PrimitiveType_BiasAddGrad, PopulateBiasGradParameter, + lite::SCHEMA_CUR); + lite::Registry SoftmaxCrossEntropyParameterRegistry(schema::PrimitiveType_SoftmaxCrossEntropyWithLogits, + PopulateSoftmaxCrossEntropyParameter, lite::SCHEMA_CUR); + lite::Registry SparseSoftmaxCrossEntropyParameterRegistry( + schema::PrimitiveType_SparseSoftmaxCrossEntropy, PopulateSparseSoftmaxCrossEntropyParameter, lite::SCHEMA_CUR); + lite::Registry ActivationParameterRegistry(schema::PrimitiveType_ActivationGrad, PopulateActivationGradParameter, + lite::SCHEMA_CUR); + lite::Registry DependParameterRegistry(schema::PrimitiveType_Depend, lite::DefaultPopulateParameter, + lite::SCHEMA_CUR); + lite::Registry Conv2DGradFilterParameterRegistry(schema::PrimitiveType_Conv2DBackpropFilterFusion, + PopulateConvolutionGradFilterParameter, lite::SCHEMA_CUR); + lite::Registry Conv2DGradInputParameterRegistry(schema::PrimitiveType_Conv2DBackpropInputFusion, + PopulateConvolutionGradInputParameter, lite::SCHEMA_CUR); + lite::Registry avgPoolParameterRegistry(schema::PrimitiveType_AvgPoolGrad, PopulateAvgPoolGradParameter, + lite::SCHEMA_CUR); + lite::Registry maxPoolParameterRegistry(schema::PrimitiveType_MaxPoolGrad, PopulateMaxPoolGradParameter, + lite::SCHEMA_CUR); + lite::Registry PowerGradParameterRegistry(schema::PrimitiveType_PowerGrad, PopulatePowerGradParameter, + lite::SCHEMA_CUR); + lite::Registry SgdParameterRegistry(schema::PrimitiveType_SGD, PopulateSgdParameter, lite::SCHEMA_CUR); + lite::Registry BNGradParameterRegistry(schema::PrimitiveType_BatchNormGrad, PopulateBNGradParameter, + lite::SCHEMA_CUR); + lite::Registry AdamParameterRegistry(schema::PrimitiveType_Adam, PopulateAdamParameter, lite::SCHEMA_CUR); + lite::Registry AssignParameterRegistry(schema::PrimitiveType_Assign, lite::DefaultPopulateParameter, + lite::SCHEMA_CUR); + lite::Registry AssignAddParameterRegistry(schema::PrimitiveType_AssignAdd, lite::DefaultPopulateParameter, + lite::SCHEMA_CUR); + lite::Registry BinaryCrossEntropyParameterRegistry(schema::PrimitiveType_BinaryCrossEntropy, PopulateBCEParameter, + lite::SCHEMA_CUR); lite::Registry BinaryCrossEntropyGradParameterRegistry(schema::PrimitiveType_BinaryCrossEntropyGrad, - PopulateBCEGradParameter); - lite::Registry OnesLikeParameterRegistry(schema::PrimitiveType_OnesLike, DefaultPopulateParameter); + PopulateBCEGradParameter, lite::SCHEMA_CUR); + lite::Registry OnesLikeParameterRegistry(schema::PrimitiveType_OnesLike, lite::DefaultPopulateParameter, + lite::SCHEMA_CUR); lite::Registry UnsortedSegmentSumParameterRegistry(schema::PrimitiveType_UnsortedSegmentSum, - DefaultPopulateParameter); - lite::Registry DropoutParameterRegistry(schema::PrimitiveType_Dropout, PopulateDropoutParameter); - lite::Registry DropGradParameterRegistry(schema::PrimitiveType_DropoutGrad, PopulateDropoutGradParameter); - lite::Registry MaximumGradParameterRegistry(schema::PrimitiveType_MaximumGrad, PopulateArithmeticGradParameter); - lite::Registry MinimumGradParameterRegistry(schema::PrimitiveType_MinimumGrad, PopulateArithmeticGradParameter); - lite::Registry SmoothL1LossRegistry(schema::PrimitiveType_SmoothL1Loss, PopulateSmoothL1LossParameter); - lite::Registry SmoothL1LossGradRegistry(schema::PrimitiveType_SmoothL1LossGrad, PopulateSmoothL1LossGradParameter); + lite::DefaultPopulateParameter, lite::SCHEMA_CUR); + lite::Registry DropoutParameterRegistry(schema::PrimitiveType_Dropout, PopulateDropoutParameter, lite::SCHEMA_CUR); + lite::Registry DropGradParameterRegistry(schema::PrimitiveType_DropoutGrad, PopulateDropoutGradParameter, + lite::SCHEMA_CUR); + lite::Registry MaximumGradParameterRegistry(schema::PrimitiveType_MaximumGrad, PopulateArithmeticGradParameter, + lite::SCHEMA_CUR); + lite::Registry MinimumGradParameterRegistry(schema::PrimitiveType_MinimumGrad, PopulateArithmeticGradParameter, + lite::SCHEMA_CUR); + lite::Registry SmoothL1LossRegistry(schema::PrimitiveType_SmoothL1Loss, PopulateSmoothL1LossParameter, + lite::SCHEMA_CUR); + lite::Registry SmoothL1LossGradRegistry(schema::PrimitiveType_SmoothL1LossGrad, PopulateSmoothL1LossGradParameter, + lite::SCHEMA_CUR); lite::Registry SigmoidCrossEntropyWithLogitsRegistry(schema::PrimitiveType_SigmoidCrossEntropyWithLogits, - DefaultPopulateParameter); + lite::DefaultPopulateParameter, lite::SCHEMA_CUR); lite::Registry SigmoidCrossEntropyWithLogitsGradRegistry(schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad, - DefaultPopulateParameter); - lite::Registry FlattenGradParameterRegistry(schema::PrimitiveType_FlattenGrad, DefaultPopulateParameter); + lite::DefaultPopulateParameter, lite::SCHEMA_CUR); + lite::Registry FlattenGradParameterRegistry(schema::PrimitiveType_FlattenGrad, lite::DefaultPopulateParameter, + lite::SCHEMA_CUR); lite::Registry StridedSliceGradParameterRegistry(schema::PrimitiveType_StridedSliceGrad, - mindspore::lite::PopulateStridedSliceParameter); + lite::PopulateStridedSliceParameter, lite::SCHEMA_CUR); + lite::Registry AbsGradParameterRegistry(schema::PrimitiveType_AbsGrad, lite::DefaultPopulateParameter, + lite::SCHEMA_CUR); } } // namespace mindspore::kernel diff --git a/mindspore/lite/src/train/train_populate_parameter.h b/mindspore/lite/src/train/train_populate_parameter.h index 0829efbe4f..12fa0dbd78 100644 --- a/mindspore/lite/src/train/train_populate_parameter.h +++ b/mindspore/lite/src/train/train_populate_parameter.h @@ -17,8 +17,6 @@ #ifndef MINDSPORE_LITE_SRC_TRAIN_TRAIN_POPULATE_PARAMETER_H_ #define MINDSPORE_LITE_SRC_TRAIN_TRAIN_POPULATE_PARAMETER_H_ -#include "src/ops/primitive_c.h" - namespace mindspore::kernel { void PopulateTrainParameters(); diff --git a/mindspore/lite/src/train/train_populate_parameter_v0.cc b/mindspore/lite/src/train/train_populate_parameter_v0.cc new file mode 100644 index 0000000000..e9497472fa --- /dev/null +++ b/mindspore/lite/src/train/train_populate_parameter_v0.cc @@ -0,0 +1,661 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/train/train_populate_parameter_v0.h" +#include <vector> +#include "src/ops/populate/populate_register.h" +#include "schema/model_v0_generated.h" +#include "nnacl/pooling_parameter.h" +#include "nnacl/fp32_grad/softmax_grad.h" +#include "nnacl/fp32/activation_fp32.h" +#include "nnacl/conv_parameter.h" +#include "nnacl/power_parameter.h" +#include "nnacl/arithmetic.h" +#include "nnacl/fp32_grad/optimizer.h" +#include "nnacl/fp32_grad/batch_norm.h" +#include "nnacl/fp32_grad/dropout_parameter.h" +#include "nnacl/fp32_grad/smooth_l1_loss.h" +#include "nnacl/infer/conv2d_grad_filter_infer.h" +#include "nnacl/infer/conv2d_grad_input_infer.h" +#include "nnacl/infer/group_conv2d_grad_input_infer.h" + +namespace mindspore::kernel { +namespace { +OpParameter *DefaultPopulateParameter(const void *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + + OpParameter *param = reinterpret_cast<OpParameter *>(malloc(sizeof(OpParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "malloc Param for primitive failed."; + return nullptr; + } + auto type = prim->value_type(); + switch (prim->value_type()) { + case schema::v0::PrimitiveType_Depend: + param->type_ = schema::PrimitiveType_Depend; + break; + case schema::v0::PrimitiveType_Assign: + param->type_ = schema::PrimitiveType_Assign; + break; + case schema::v0::PrimitiveType_AssignAdd: + param->type_ = schema::PrimitiveType_AssignAdd; + break; + case schema::v0::PrimitiveType_OnesLike: + param->type_ = schema::PrimitiveType_OnesLike; + break; + case schema::v0::PrimitiveType_UnsortedSegmentSum: + param->type_ = schema::PrimitiveType_UnsortedSegmentSum; + break; + case schema::v0::PrimitiveType_SigmoidCrossEntropyWithLogits: + param->type_ = schema::PrimitiveType_SigmoidCrossEntropyWithLogits; + break; + case schema::v0::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad: + param->type_ = schema::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad; + break; + case schema::v0::PrimitiveType_AddGrad: + param->type_ = schema::PrimitiveType_AddGrad; + break; + case schema::v0::PrimitiveType_SubGrad: + param->type_ = schema::PrimitiveType_SubGrad; + break; + case schema::v0::PrimitiveType_MulGrad: + param->type_ = schema::PrimitiveType_MulGrad; + break; + case schema::v0::PrimitiveType_DivGrad: + param->type_ = schema::PrimitiveType_DivGrad; + break; + default: + MS_LOG(ERROR) << "unsupported type: " << schema::v0::EnumNamePrimitiveType(type); + free(param); + return nullptr; + } + + return param; +} + +OpParameter *PopulateSmoothL1LossParameter(const void *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + SmoothL1LossParameter *p = reinterpret_cast<SmoothL1LossParameter *>(malloc(sizeof(SmoothL1LossParameter))); + if (p == nullptr) { + MS_LOG(ERROR) << "malloc SmoothL1LossParameter failed."; + return nullptr; + } + p->op_parameter_.type_ = schema::PrimitiveType_SmoothL1Loss; + + auto smoothL1Loss_prim = prim->value_as_SmoothL1Loss(); + + p->beta_ = smoothL1Loss_prim->beta(); + return reinterpret_cast<OpParameter *>(p); +} + +OpParameter *PopulateSmoothL1LossGradParameter(const void *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + SmoothL1LossParameter *p = reinterpret_cast<SmoothL1LossParameter *>(malloc(sizeof(SmoothL1LossParameter))); + if (p == nullptr) { + MS_LOG(ERROR) << "malloc SmoothL1LossParameter failed."; + return nullptr; + } + p->op_parameter_.type_ = schema::PrimitiveType_SmoothL1LossGrad; + + auto smoothL1LossGrad_prim = prim->value_as_SmoothL1LossGrad(); + + p->beta_ = smoothL1LossGrad_prim->beta(); + return reinterpret_cast<OpParameter *>(p); +} + +OpParameter *PopulateApplyMomentumParameter(const void *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + ApplyMomentumParameter *p = reinterpret_cast<ApplyMomentumParameter *>(malloc(sizeof(ApplyMomentumParameter))); + if (p == nullptr) { + MS_LOG(ERROR) << "malloc ApplyMomentumParameter failed."; + return nullptr; + } + p->op_parameter_.type_ = schema::PrimitiveType_ApplyMomentum; + + auto applyMomentum_prim = prim->value_as_ApplyMomentum(); + + p->grad_scale_ = applyMomentum_prim->gradientScale(); + p->use_nesterov_ = applyMomentum_prim->useNesterov(); + + return reinterpret_cast<OpParameter *>(p); +} + +OpParameter *PopulateBCEParameter(const void *primitive) { + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + int32_t *reduction = reinterpret_cast<int32_t *>(malloc(sizeof(int32_t))); + if (reduction == nullptr) { + MS_LOG(ERROR) << "malloc reduction failed."; + return nullptr; + } + auto bCE_prim = prim->value_as_BinaryCrossEntropy(); + *reduction = bCE_prim->reduction(); + return reinterpret_cast<OpParameter *>(reduction); +} + +OpParameter *PopulateBCEGradParameter(const void *primitive) { + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + int32_t *reduction = reinterpret_cast<int32_t *>(malloc(sizeof(int32_t))); + if (reduction == nullptr) { + MS_LOG(ERROR) << "malloc reduction failed."; + return nullptr; + } + auto bCEGrad_prim = prim->value_as_BinaryCrossEntropyGrad(); + + *reduction = bCEGrad_prim->reduction(); + return reinterpret_cast<OpParameter *>(reduction); +} + +OpParameter *PopulateAdamParameter(const void *primitive) { + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + AdamParameter *p = reinterpret_cast<AdamParameter *>(malloc(sizeof(AdamParameter))); + if (p == nullptr) { + MS_LOG(ERROR) << "new AdamParameter failed."; + return nullptr; + } + p->op_parameter_.type_ = schema::PrimitiveType_Adam; + + auto adam_prim = prim->value_as_Adam(); + + p->use_nesterov_ = adam_prim->useNesterov(); + return reinterpret_cast<OpParameter *>(p); +} + +OpParameter *PopulateSgdParameter(const void *primitive) { + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + SgdParameter *p = reinterpret_cast<SgdParameter *>(malloc(sizeof(SgdParameter))); + if (p == nullptr) { + MS_LOG(ERROR) << "malloc SgdParameter failed."; + return nullptr; + } + p->op_parameter_.type_ = schema::PrimitiveType_SGD; + + auto sgd_prim = prim->value_as_Sgd(); + + p->weight_decay_ = sgd_prim->weightDecay(); + p->dampening_ = sgd_prim->dampening(); + p->use_nesterov_ = sgd_prim->useNesterov(); + + return reinterpret_cast<OpParameter *>(p); +} + +OpParameter *PopulateSparseSoftmaxCrossEntropyParameter(const void *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + SoftmaxCrossEntropyParameter *sce_param = + reinterpret_cast<SoftmaxCrossEntropyParameter *>(malloc(sizeof(SoftmaxCrossEntropyParameter))); + if (sce_param == nullptr) { + MS_LOG(ERROR) << "malloc SoftmaxCrossEntropyParameter failed."; + return nullptr; + } + auto sparseSoftmaxCrossEntropy_prim = prim->value_as_SparseSoftmaxCrossEntropy(); + + sce_param->is_grad = sparseSoftmaxCrossEntropy_prim->isGrad(); + + sce_param->op_parameter_.type_ = schema::PrimitiveType_SparseSoftmaxCrossEntropy; + return reinterpret_cast<OpParameter *>(sce_param); +} + +OpParameter *PopulateSoftmaxCrossEntropyParameter(const void *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + SoftmaxCrossEntropyParameter *sce_param = + reinterpret_cast<SoftmaxCrossEntropyParameter *>(malloc(sizeof(SoftmaxCrossEntropyParameter))); + if (sce_param == nullptr) { + MS_LOG(ERROR) << "malloc SoftmaxCrossEntropyParameter failed."; + return nullptr; + } + sce_param->is_grad = 0; + sce_param->op_parameter_.type_ = schema::PrimitiveType_SoftmaxCrossEntropyWithLogits; + return reinterpret_cast<OpParameter *>(sce_param); +} + +OpParameter *PopulatePoolingGradParameter(const void *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + PoolingParameter *pooling_param = reinterpret_cast<PoolingParameter *>(malloc(sizeof(PoolingParameter))); + if (pooling_param == nullptr) { + MS_LOG(ERROR) << "malloc PoolingParameter failed."; + return nullptr; + } + + auto poolingGrad_prim = prim->value_as_PoolingGrad(); + + pooling_param->global_ = poolingGrad_prim->global(); + pooling_param->window_w_ = poolingGrad_prim->windowW(); + pooling_param->window_h_ = poolingGrad_prim->windowH(); + + pooling_param->pad_u_ = poolingGrad_prim->padUp(); + pooling_param->pad_d_ = poolingGrad_prim->padDown(); + pooling_param->pad_l_ = poolingGrad_prim->padLeft(); + pooling_param->pad_r_ = poolingGrad_prim->padRight(); + pooling_param->stride_w_ = poolingGrad_prim->strideW(); + pooling_param->stride_h_ = poolingGrad_prim->strideH(); + + pooling_param->pool_mode_ = PoolMode_No; + pooling_param->round_mode_ = RoundMode_No; + + switch (poolingGrad_prim->poolingMode()) { + case schema::v0::PoolMode_MAX_POOLING: + pooling_param->pool_mode_ = PoolMode_MaxPool; + pooling_param->op_parameter_.type_ = schema::PrimitiveType_MaxPoolGrad; + break; + case schema::v0::PoolMode_MEAN_POOLING: + pooling_param->pool_mode_ = PoolMode_AvgPool; + pooling_param->op_parameter_.type_ = schema::PrimitiveType_AvgPoolGrad; + break; + default: + MS_LOG(ERROR) << "unknown pooling mode: " << poolingGrad_prim->poolingMode(); + return nullptr; + } + + switch (poolingGrad_prim->roundMode()) { + case schema::v0::RoundMode_FLOOR: + pooling_param->round_mode_ = RoundMode_Floor; + break; + case schema::v0::RoundMode_CEIL: + pooling_param->round_mode_ = RoundMode_Ceil; + break; + default: + break; + } + return reinterpret_cast<OpParameter *>(pooling_param); +} + +OpParameter *PopulateActivationGradParameter(const void *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + + ActivationParameter *act_param = reinterpret_cast<ActivationParameter *>(malloc(sizeof(ActivationParameter))); + if (act_param == nullptr) { + MS_LOG(ERROR) << "malloc ActivationParameter failed."; + return nullptr; + } + act_param->op_parameter_.type_ = schema::PrimitiveType_ActivationGrad; + auto activationGrad_prim = prim->value_as_ActivationGrad(); + + act_param->type_ = static_cast<int>(activationGrad_prim->type()); + act_param->alpha_ = activationGrad_prim->alpha(); + return reinterpret_cast<OpParameter *>(act_param); +} + +OpParameter *PopulateConvolutionGradFilterParameter(const void *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + + ConvParameter *param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "malloc Param for conv grad filter failed."; + return nullptr; + } + param->op_parameter_.type_ = schema::PrimitiveType_Conv2DBackpropFilterFusion; + + auto convolutionGradFilter_prim = prim->value_as_Conv2DGradFilter(); + auto fb_vector = convolutionGradFilter_prim->filter_shape(); + auto filter_shape = std::vector<int>(fb_vector->begin(), fb_vector->end()); + if (filter_shape.size() > MAX_SHAPE_SIZE) { + free(param); + MS_LOG(ERROR) << "ConvolutionGradFilter filter shape too big."; + return nullptr; + } + param->kernel_h_ = convolutionGradFilter_prim->kernelH(); + param->kernel_w_ = convolutionGradFilter_prim->kernelW(); + param->stride_h_ = convolutionGradFilter_prim->strideH(); + param->stride_w_ = convolutionGradFilter_prim->strideW(); + param->dilation_h_ = convolutionGradFilter_prim->dilateH(); + param->dilation_w_ = convolutionGradFilter_prim->dilateW(); + param->pad_u_ = convolutionGradFilter_prim->padUp(); + param->pad_d_ = convolutionGradFilter_prim->padDown(); + param->pad_l_ = convolutionGradFilter_prim->padLeft(); + param->pad_r_ = convolutionGradFilter_prim->padRight(); + param->group_ = convolutionGradFilter_prim->group(); + param->act_type_ = ActType_No; + switch (convolutionGradFilter_prim->activationType()) { + case schema::v0::ActivationType_RELU: + param->act_type_ = ActType_Relu; + break; + case schema::v0::ActivationType_RELU6: + param->act_type_ = ActType_Relu6; + break; + default: + break; + } + + return reinterpret_cast<OpParameter *>(param); +} + +OpParameter *PopulateConvolutionGradInputParameter(const void *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + + ConvParameter *param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "malloc Param for conv grad filter failed."; + return nullptr; + } + param->op_parameter_.type_ = schema::PrimitiveType_Conv2DBackpropInputFusion; + + auto convolutionGradInput_prim = prim->value_as_Conv2DGradInput(); + auto fb_vector = convolutionGradInput_prim->input_shape(); + auto filter_shape = std::vector<int>(fb_vector->begin(), fb_vector->end()); + if (filter_shape.size() > MAX_SHAPE_SIZE) { + free(param); + MS_LOG(ERROR) << "ConvolutionGradInput input shape too big."; + return nullptr; + } + param->kernel_h_ = convolutionGradInput_prim->kernelH(); + param->kernel_w_ = convolutionGradInput_prim->kernelW(); + param->stride_h_ = convolutionGradInput_prim->strideH(); + param->stride_w_ = convolutionGradInput_prim->strideW(); + param->dilation_h_ = convolutionGradInput_prim->dilateH(); + param->dilation_w_ = convolutionGradInput_prim->dilateW(); + param->pad_u_ = convolutionGradInput_prim->padUp(); + param->pad_d_ = convolutionGradInput_prim->padDown(); + param->pad_l_ = convolutionGradInput_prim->padLeft(); + param->pad_r_ = convolutionGradInput_prim->padRight(); + param->group_ = convolutionGradInput_prim->group(); + param->act_type_ = ActType_No; + switch (convolutionGradInput_prim->activationType()) { + case schema::v0::ActivationType_RELU: + param->act_type_ = ActType_Relu; + break; + case schema::v0::ActivationType_RELU6: + param->act_type_ = ActType_Relu6; + break; + default: + break; + } + + return reinterpret_cast<OpParameter *>(param); +} + +OpParameter *PopulateGroupConvolutionGradInputParameter(const void *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + + ConvParameter *param = reinterpret_cast<ConvParameter *>(malloc(sizeof(ConvParameter))); + if (param == nullptr) { + MS_LOG(ERROR) << "new Param for conv grad filter failed."; + return nullptr; + } + param->op_parameter_.type_ = schema::PrimitiveType_Conv2DBackpropInputFusion; + + auto groupConvolutionGradInput_prim = prim->value_as_GroupConv2DGradInput(); + auto fb_vector = groupConvolutionGradInput_prim->input_shape(); + auto filter_shape = std::vector<int>(fb_vector->begin(), fb_vector->end()); + if (filter_shape.size() > MAX_SHAPE_SIZE) { + free(param); + MS_LOG(ERROR) << "GroupConvolutionGradInput input shape too big."; + return nullptr; + } + param->kernel_h_ = groupConvolutionGradInput_prim->kernelH(); + param->kernel_w_ = groupConvolutionGradInput_prim->kernelW(); + param->stride_h_ = groupConvolutionGradInput_prim->strideH(); + param->stride_w_ = groupConvolutionGradInput_prim->strideW(); + param->dilation_h_ = groupConvolutionGradInput_prim->dilateH(); + param->dilation_w_ = groupConvolutionGradInput_prim->dilateW(); + param->pad_u_ = groupConvolutionGradInput_prim->padUp(); + param->pad_d_ = groupConvolutionGradInput_prim->padDown(); + param->pad_l_ = groupConvolutionGradInput_prim->padLeft(); + param->pad_r_ = groupConvolutionGradInput_prim->padRight(); + param->group_ = groupConvolutionGradInput_prim->group(); + param->act_type_ = ActType_No; + switch (groupConvolutionGradInput_prim->activationType()) { + case schema::v0::ActivationType_RELU: + param->act_type_ = ActType_Relu; + break; + case schema::v0::ActivationType_RELU6: + param->act_type_ = ActType_Relu6; + break; + default: + break; + } + + return reinterpret_cast<OpParameter *>(param); +} + +OpParameter *PopulatePowerGradParameter(const void *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + + PowerParameter *power_param = reinterpret_cast<PowerParameter *>(malloc(sizeof(PowerParameter))); + if (power_param == nullptr) { + MS_LOG(ERROR) << "malloc PowerParameter failed."; + return nullptr; + } + power_param->op_parameter_.type_ = schema::PrimitiveType_PowerGrad; + auto powerGrad_prim = prim->value_as_PowerGrad(); + + power_param->power_ = powerGrad_prim->power(); + power_param->scale_ = powerGrad_prim->scale(); + power_param->shift_ = powerGrad_prim->shift(); + return reinterpret_cast<OpParameter *>(power_param); +} + +OpParameter *PopulateBiasGradParameter(const void *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + + ArithmeticParameter *arithmetic_param = reinterpret_cast<ArithmeticParameter *>(malloc(sizeof(ArithmeticParameter))); + if (arithmetic_param == nullptr) { + MS_LOG(ERROR) << "malloc ArithmeticParameter failed."; + return nullptr; + } + arithmetic_param->op_parameter_.type_ = schema::PrimitiveType_BiasAddGrad; + return reinterpret_cast<OpParameter *>(arithmetic_param); +} + +OpParameter *PopulateBNGradParameter(const void *primitive) { + if (primitive == nullptr) { + MS_LOG(ERROR) << "Primitive is nullptr when populating parameter for op."; + return nullptr; + } + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + + BNGradParameter *bnGrad_param = reinterpret_cast<BNGradParameter *>(malloc(sizeof(BNGradParameter))); + if (bnGrad_param == nullptr) { + MS_LOG(ERROR) << "malloc BNGradParameter failed."; + return nullptr; + } + bnGrad_param->op_parameter_.type_ = schema::PrimitiveType_BatchNormGrad; + auto bNGrad_prim = prim->value_as_BNGrad(); + + bnGrad_param->epsilon_ = bNGrad_prim->eps(); + return reinterpret_cast<OpParameter *>(bnGrad_param); +} + +OpParameter *PopulateDropoutParameter(const void *primitive) { + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + DropoutParameter *dropout_parameter = reinterpret_cast<DropoutParameter *>(malloc(sizeof(DropoutParameter))); + if (dropout_parameter == nullptr) { + MS_LOG(ERROR) << "malloc Dropout Parameter failed."; + return nullptr; + } + memset(dropout_parameter, 0, sizeof(DropoutParameter)); + dropout_parameter->op_parameter_.type_ = schema::PrimitiveType_Dropout; + auto dropout_prim = prim->value_as_Dropout(); + + dropout_parameter->ratio_ = dropout_prim->ratio(); + if (dropout_parameter->ratio_ < 0.f || dropout_parameter->ratio_ > 1.f) { + MS_LOG(ERROR) << "Dropout ratio must be between 0 to 1, got " << dropout_parameter->ratio_; + free(dropout_parameter); + return nullptr; + } + return reinterpret_cast<OpParameter *>(dropout_parameter); +} + +OpParameter *PopulateDropoutGradParameter(const void *primitive) { + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + DropoutParameter *dropoutGrad_parameter = reinterpret_cast<DropoutParameter *>(malloc(sizeof(DropoutParameter))); + if (dropoutGrad_parameter == nullptr) { + MS_LOG(ERROR) << "malloc Dropout Grad Parameter failed."; + return nullptr; + } + memset(dropoutGrad_parameter, 0, sizeof(DropoutParameter)); + dropoutGrad_parameter->op_parameter_.type_ = schema::PrimitiveType_DropoutGrad; + auto dropoutGrad_prim = prim->value_as_DropoutGrad(); + + dropoutGrad_parameter->ratio_ = dropoutGrad_prim->ratio(); + if (dropoutGrad_parameter->ratio_ < 0.f || dropoutGrad_parameter->ratio_ > 1.f) { + MS_LOG(ERROR) << "Dropout Grad ratio must be between 0 to 1, got " << dropoutGrad_parameter->ratio_; + free(dropoutGrad_parameter); + return nullptr; + } + return reinterpret_cast<OpParameter *>(dropoutGrad_parameter); +} + +OpParameter *PopulateArithmeticGradParameter(const void *primitive) { + ArithmeticParameter *arithmetic_param = reinterpret_cast<ArithmeticParameter *>(malloc(sizeof(ArithmeticParameter))); + if (arithmetic_param == nullptr) { + MS_LOG(ERROR) << "malloc ArithmeticParameter failed."; + return nullptr; + } + memset(arithmetic_param, 0, sizeof(ArithmeticParameter)); + auto *prim = static_cast<const schema::v0::Primitive *>(primitive); + if (prim->value_type() == schema::v0::PrimitiveType_MaximumGrad) { + arithmetic_param->op_parameter_.type_ = schema::PrimitiveType_MaximumGrad; + } else if (prim->value_type() == schema::v0::PrimitiveType_MinimumGrad) { + arithmetic_param->op_parameter_.type_ = schema::PrimitiveType_MinimumGrad; + } else { + MS_LOG(ERROR) << "unsupported type: " << schema::v0::EnumNamePrimitiveType(prim->value_type()); + free(arithmetic_param); + return nullptr; + } + return reinterpret_cast<OpParameter *>(arithmetic_param); +} + +} // namespace + +void PopulateTrainV0Parameters() { + lite::Registry g_applyMomentumV0ParameterRegistry(schema::v0::PrimitiveType_ApplyMomentum, + PopulateApplyMomentumParameter, mindspore::lite::SCHEMA_V0); + lite::Registry g_addGradV0ParameterRegistry(schema::v0::PrimitiveType_AddGrad, DefaultPopulateParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_subGradV0ParameterRegistry(schema::v0::PrimitiveType_SubGrad, DefaultPopulateParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_mulGradV0ParameterRegistry(schema::v0::PrimitiveType_MulGrad, DefaultPopulateParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_divGradV0ParameterRegistry(schema::v0::PrimitiveType_DivGrad, DefaultPopulateParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_biasGradV0ParameterRegistry(schema::v0::PrimitiveType_BiasGrad, PopulateBiasGradParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_softmaxCrossEntropyV0ParameterRegistry( + schema::v0::PrimitiveType_SoftmaxCrossEntropy, PopulateSoftmaxCrossEntropyParameter, mindspore::lite::SCHEMA_V0); + lite::Registry g_sparseSoftmaxCrossEntropyV0ParameterRegistry(schema::v0::PrimitiveType_SparseSoftmaxCrossEntropy, + PopulateSparseSoftmaxCrossEntropyParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_activationV0ParameterRegistry(schema::v0::PrimitiveType_ActivationGrad, + PopulateActivationGradParameter, mindspore::lite::SCHEMA_V0); + lite::Registry g_tupleGetItemV0ParameterRegistry(schema::v0::PrimitiveType_TupleGetItem, DefaultPopulateParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_dependV0ParameterRegistry(schema::v0::PrimitiveType_Depend, DefaultPopulateParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_conv2DGradFilterV0ParameterRegistry( + schema::v0::PrimitiveType_Conv2DGradFilter, PopulateConvolutionGradFilterParameter, mindspore::lite::SCHEMA_V0); + lite::Registry g_conv2DGradInputV0ParameterRegistry( + schema::v0::PrimitiveType_Conv2DGradInput, PopulateConvolutionGradInputParameter, mindspore::lite::SCHEMA_V0); + lite::Registry g_groupConv2DGradInputV0ParameterRegistry(schema::v0::PrimitiveType_GroupConv2DGradInput, + PopulateGroupConvolutionGradInputParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_poolingV0ParameterRegistry(schema::v0::PrimitiveType_PoolingGrad, PopulatePoolingGradParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_powerGradV0ParameterRegistry(schema::v0::PrimitiveType_PowerGrad, PopulatePowerGradParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_sgdV0ParameterRegistry(schema::v0::PrimitiveType_Sgd, PopulateSgdParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_bNGradV0ParameterRegistry(schema::v0::PrimitiveType_BNGrad, PopulateBNGradParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_adamV0ParameterRegistry(schema::v0::PrimitiveType_Adam, PopulateAdamParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_assignV0ParameterRegistry(schema::v0::PrimitiveType_Assign, DefaultPopulateParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_assignAddV0ParameterRegistry(schema::v0::PrimitiveType_AssignAdd, DefaultPopulateParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_binaryCrossEntropyV0ParameterRegistry(schema::v0::PrimitiveType_BinaryCrossEntropy, + PopulateBCEParameter, mindspore::lite::SCHEMA_V0); + lite::Registry g_binaryCrossEntropyGradV0ParameterRegistry(schema::v0::PrimitiveType_BinaryCrossEntropyGrad, + PopulateBCEGradParameter, mindspore::lite::SCHEMA_V0); + lite::Registry g_onesLikeV0ParameterRegistry(schema::v0::PrimitiveType_OnesLike, DefaultPopulateParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_unsortedSegmentSumV0ParameterRegistry(schema::v0::PrimitiveType_UnsortedSegmentSum, + DefaultPopulateParameter, mindspore::lite::SCHEMA_V0); + lite::Registry g_dropoutV0ParameterRegistry(schema::v0::PrimitiveType_Dropout, PopulateDropoutParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_dropGradV0ParameterRegistry(schema::v0::PrimitiveType_DropoutGrad, PopulateDropoutGradParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_maximumGradV0ParameterRegistry(schema::v0::PrimitiveType_MaximumGrad, + PopulateArithmeticGradParameter, mindspore::lite::SCHEMA_V0); + lite::Registry g_minimumGradV0ParameterRegistry(schema::v0::PrimitiveType_MinimumGrad, + PopulateArithmeticGradParameter, mindspore::lite::SCHEMA_V0); + lite::Registry g_smoothL1LossRegistry(schema::v0::PrimitiveType_SmoothL1Loss, PopulateSmoothL1LossParameter, + mindspore::lite::SCHEMA_V0); + lite::Registry g_smoothL1LossGradRegistry(schema::v0::PrimitiveType_SmoothL1LossGrad, + PopulateSmoothL1LossGradParameter, mindspore::lite::SCHEMA_V0); + lite::Registry g_sigmoidCrossEntropyWithLogitsRegistry(schema::v0::PrimitiveType_SigmoidCrossEntropyWithLogits, + DefaultPopulateParameter, mindspore::lite::SCHEMA_V0); + lite::Registry g_sigmoidCrossEntropyWithLogitsGradRegistry( + schema::v0::PrimitiveType_SigmoidCrossEntropyWithLogitsGrad, DefaultPopulateParameter, mindspore::lite::SCHEMA_V0); +} + +} // namespace mindspore::kernel diff --git a/mindspore/lite/src/train/train_populate_parameter_v0.h b/mindspore/lite/src/train/train_populate_parameter_v0.h new file mode 100644 index 0000000000..6a07083408 --- /dev/null +++ b/mindspore/lite/src/train/train_populate_parameter_v0.h @@ -0,0 +1,25 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_SRC_TRAIN_TRAIN_POPULATE_PARAMETER_V0_H_ +#define MINDSPORE_LITE_SRC_TRAIN_TRAIN_POPULATE_PARAMETER_V0_H_ + +namespace mindspore::kernel { + +void PopulateTrainV0Parameters(); + +} // namespace mindspore::kernel +#endif // MINDSPORE_LITE_SRC_TRAIN_TRAIN_POPULATE_PARAMETER_V0_H_ diff --git a/mindspore/lite/src/train/train_session.cc b/mindspore/lite/src/train/train_session.cc index def7e18fe9..badd1fdd34 100644 --- a/mindspore/lite/src/train/train_session.cc +++ b/mindspore/lite/src/train/train_session.cc @@ -29,6 +29,7 @@ #include "src/train/optimizer_kernel.h" #include "src/sub_graph_kernel.h" #include "src/train/train_populate_parameter.h" +#include "src/train/train_populate_parameter_v0.h" #include "src/runtime/runtime_api.h" #include "src/executor.h" #include "src/kernel_registry.h" @@ -92,7 +93,16 @@ static kernel::LiteKernel *TSFindKernel(const std::vector<kernel::LiteKernel *> [&searchParameter](const kernel::LiteKernel *k) { return (k->name() == searchParameter); }); return *it; } -TrainSession::TrainSession() { kernel::PopulateTrainParameters(); } +TrainSession::TrainSession() { +#ifdef ENABLE_V0 + if (VersionManager::GetInstance()->CheckV0Schema()) { + kernel::PopulateTrainV0Parameters(); + } +#endif + if (!VersionManager::GetInstance()->CheckV0Schema()) { + kernel::PopulateTrainParameters(); + } +} std::vector<CreatorOp> TrainSession::ReplaceOps() { const std::vector<CreatorOp> replace = { @@ -443,7 +453,7 @@ int TrainSession::OptimizerStep() { } bool TrainSession::IsLossKernel(const kernel::LiteKernel *kernel) const { - return (kernel->Type() == schema::PrimitiveType_SoftmaxCrossEntropy || + return (kernel->Type() == schema::PrimitiveType_SoftmaxCrossEntropyWithLogits || kernel->Type() == schema::PrimitiveType_SparseSoftmaxCrossEntropy || kernel->Type() == schema::PrimitiveType_SmoothL1Loss || kernel->Type() == schema::PrimitiveType_SmoothL1LossGrad || @@ -457,7 +467,7 @@ bool TrainSession::IsGradKernel(const kernel::LiteKernel *kernel) const { } bool TrainSession::IsOptimizer(kernel::LiteKernel *kernel) const { - return ((kernel->Type() == schema::PrimitiveType_Adam) || (kernel->Type() == schema::PrimitiveType_Sgd) || + return ((kernel->Type() == schema::PrimitiveType_Adam) || (kernel->Type() == schema::PrimitiveType_SGD) || (kernel->Type() == schema::PrimitiveType_ApplyMomentum)); } diff --git a/mindspore/lite/src/train/train_session.h b/mindspore/lite/src/train/train_session.h index 0d0073e287..266baef07a 100644 --- a/mindspore/lite/src/train/train_session.h +++ b/mindspore/lite/src/train/train_session.h @@ -20,7 +20,6 @@ #include <tuple> #include <unordered_map> #include <memory> -#include "src/ops/primitive_c.h" #include "include/train_session.h" #include "src/train/train_model.h" #include "src/lite_session.h" diff --git a/mindspore/lite/src/train/transfer_session.h b/mindspore/lite/src/train/transfer_session.h index 0f99bd3eb3..7a9548ce33 100644 --- a/mindspore/lite/src/train/transfer_session.h +++ b/mindspore/lite/src/train/transfer_session.h @@ -20,7 +20,6 @@ #include <tuple> #include <unordered_map> #include <utility> -#include "src/ops/primitive_c.h" #include "include/train_session.h" #include "src/train/train_model.h" #include "src/lite_session.h" diff --git a/mindspore/lite/test/CMakeLists.txt b/mindspore/lite/test/CMakeLists.txt index 8144da213e..0ff1decd2a 100644 --- a/mindspore/lite/test/CMakeLists.txt +++ b/mindspore/lite/test/CMakeLists.txt @@ -32,6 +32,7 @@ file(GLOB KERNEL_OP_SRC ${LITE_DIR}/nnacl/fp32/*.c ${LITE_DIR}/nnacl/int8/*.c ${LITE_DIR}/nnacl/base/*.c + ${LITE_DIR}/nnacl/infer/*.c ) file(GLOB KERNEL_OP_TRAIN_SRC @@ -132,6 +133,7 @@ set(TEST_LITE_SRC ${LITE_DIR}/src/runtime/runtime_api.cc ${LITE_DIR}/src/runtime/thread_pool.c ${LITE_DIR}/src/runtime/parallel_executor.cc + ${LITE_DIR}/src/runtime/infer_manager.cc ${LITE_DIR}/src/tensor.cc ${LITE_DIR}/src/tensorlist.cc ${LITE_DIR}/src/executor.cc @@ -145,6 +147,8 @@ set(TEST_LITE_SRC ${LITE_DIR}/src/lite_model.cc ${LITE_DIR}/src/scheduler.cc ${LITE_DIR}/src/common/graph_util.cc + ${LITE_DIR}/src/common/prim_util.cc + ${LITE_DIR}/src/common/tensor_util.cc ${LITE_DIR}/src/common/file_utils.cc ${LITE_DIR}/src/common/utils.cc ${LITE_DIR}/src/common/string_util.cc @@ -201,7 +205,6 @@ if(ENABLE_CONVERTER) set(TEST_LITE_SRC ${TEST_LITE_SRC} ${TEST_CASE_TFLITE_PARSERS_SRC} - ${TOP_DIR}/mindspore/core/utils/flags.cc ${LITE_DIR}/tools/common/protobuf_utils.cc ${LITE_DIR}/tools/converter/optimizer.cc ${LITE_DIR}/tools/converter/anf_transform.cc @@ -231,7 +234,7 @@ if(ENABLE_CONVERTER) ${LITE_DIR}/tools/optimizer/graph/weight_format_hardcode_pass.cc ${LITE_DIR}/tools/optimizer/graph/clip_convert_activation_pass.cc ${LITE_DIR}/tools/optimizer/graph/group_depthwise_op_convert_pass.cc - ${LITE_DIR}/tools/optimizer/graph/tflite_inputs_order_exchange_pass.cc + ${LITE_DIR}/tools/optimizer/graph/tflite_inputs_adjust_pass.cc ${LITE_DIR}/tools/optimizer/graph/update_conv2d_param_pass.cc ${LITE_DIR}/tools/optimizer/graph/unused_cast_node_remove_pass.cc ${LITE_DIR}/tools/optimizer/graph/unused_transpose_node_remove_pass.cc @@ -239,13 +242,13 @@ if(ENABLE_CONVERTER) ${LITE_DIR}/tools/optimizer/graph/infershape_pass.cc ${LITE_DIR}/tools/optimizer/graph/slice_prepose_pass.cc ${LITE_DIR}/tools/optimizer/graph/mindir_adjust_pass.cc - ${LITE_DIR}/tools/optimizer/graph/mindir_inputs_adjust_pass.cc ${LITE_DIR}/tools/optimizer/graph/onnx_inputs_adjust_pass.cc ${LITE_DIR}/tools/optimizer/graph/while_pass.cc ${LITE_DIR}/tools/optimizer/graph/if_pass.cc ${LITE_DIR}/tools/optimizer/graph/functionalize_control_op_pass.cc ${LITE_DIR}/tools/optimizer/graph/functionalize_while.cc ${LITE_DIR}/tools/optimizer/graph/inputs_adjust_pass.cc + ${LITE_DIR}/tools/optimizer/graph/primitive_adjust_pass.cc ) endif() ### train @@ -253,6 +256,7 @@ if(SUPPORT_TRAIN) set(TEST_LITE_SRC ${TEST_LITE_SRC} ${LITE_DIR}/src/train/train_populate_parameter.cc + ${LITE_DIR}/src/train/train_populate_parameter_v0.cc ${LITE_DIR}/src/train/train_session.cc ${LITE_DIR}/src/train/transfer_session.cc ${LITE_DIR}/src/train/train_model.cc @@ -261,6 +265,8 @@ if(SUPPORT_TRAIN) else() set(TEST_LITE_SRC ${TEST_LITE_SRC} + ${LITE_DIR}/src/train/train_populate_parameter.cc + ${LITE_DIR}/src/train/train_populate_parameter_v0.cc ${LITE_DIR}/src/lite_session.cc ) endif() @@ -270,6 +276,7 @@ file(GLOB_RECURSE TEST_CASE_KERNEL_SRC ${TEST_DIR}/ut/src/runtime/kernel/arm/fp32/*.cc ${TEST_DIR}/ut/src/runtime/kernel/arm/int8/*.cc ${TEST_DIR}/ut/src/runtime/kernel/arm/string/*.cc + ${TEST_DIR}/ut/nnacl/infer/*.cc ) file(GLOB_RECURSE TEST_CASE_KERNEL_TRAIN_SRC @@ -293,6 +300,7 @@ if(ENABLE_CONVERTER) ${TEST_DIR}/st/converter_test.cc ${TEST_DIR}/st/control_flow_test.cc ${TEST_DIR}/st/sub_graph_test.cc + ${TEST_DIR}/common/import_from_meta_graphT.cc ${TEST_DIR}/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc ${TEST_DIR}/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc ${TEST_DIR}/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc @@ -334,9 +342,9 @@ if(ENABLE_FP16) ) endif() -if(SUPPORT_TRAIN) +if(ENABLE_FP16 AND SUPPORT_TRAIN) file(GLOB_RECURSE TEST_CASE_KERNEL_FP16_SRC_GRAD - ${TEST_DIR}/ut/src/runtime/kernel/arm/fp6_grad/*.cc) + ${TEST_DIR}/ut/src/runtime/kernel/arm/fp16_grad/*.cc) list(APPEND TEST_SRC ${TEST_CASE_KERNEL_FP16_SRC_GRAD}) endif() @@ -345,9 +353,6 @@ add_dependencies(lite-test fbs_src) target_link_libraries(lite-test dl mindspore::gtest) -if(SUPPORT_TRAIN) - target_link_libraries(lite-test minddata-lite) -endif() if(ENABLE_MINDRT) target_link_libraries(lite-test mindrt_mid) endif() @@ -358,6 +363,8 @@ endif() if(PLATFORM_ARM) target_link_libraries(lite-test log) +else() + target_link_libraries(lite-test ${SECUREC_LIBRARY} pthread) endif() if(SUPPORT_NPU) @@ -367,7 +374,6 @@ endif() if(ENABLE_CONVERTER) add_dependencies(lite-test fbs_inner_src) target_link_libraries(lite-test - anf_importer_mid anf_exporter_mid tflite_parser_mid caffe_parser_mid @@ -377,12 +383,10 @@ if(ENABLE_CONVERTER) fusion_mid quantizer_mid proto_mid - pthread mindspore::protobuf mindspore::eigen mindspore::json - mindspore_core + -Wl,--whole-archive mindspore_core -Wl,--no-whole-archive mindspore::glog - ${SECUREC_LIBRARY} ) endif() diff --git a/mindspore/lite/test/common/import_from_meta_graphT.cc b/mindspore/lite/test/common/import_from_meta_graphT.cc new file mode 100644 index 0000000000..86a9331899 --- /dev/null +++ b/mindspore/lite/test/common/import_from_meta_graphT.cc @@ -0,0 +1,175 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <vector> +#include <algorithm> +#include "schema/inner/model_generated.h" +#include "frontend/operator/ops.h" +#include "src/param_value_lite.h" +#include "src/common/log_adapter.h" +#include "tools/converter/converter_context.h" +#include "include/errorcode.h" +#include "test/common/import_from_meta_graphT.h" +#include "ir/func_graph.h" + +namespace mindspore::lite { +AnfNodePtr AnfImporterFromMetaGraphT::GetNode(int tensor_id) { + auto n = nodes_.find(tensor_id); + if (n == nodes_.end()) { + return nullptr; + } + return n->second; +} + +void AnfImporterFromMetaGraphT::AddNode(int tensor_id, AnfNodePtr node) { nodes_[tensor_id] = std::move(node); } + +int AnfImporterFromMetaGraphT::ConverterConstTensor() { + MS_ASSERT(nullptr != meta_graph_); + MS_ASSERT(nullptr != func_graph_); + for (size_t i = 0; i < meta_graph_->allTensors.size(); i++) { + auto &tensor = meta_graph_->allTensors.at(i); + MS_ASSERT(tensor != nullptr); + if (tensor->nodeType != schema::NodeType::NodeType_ValueNode) { + continue; + } + auto parameter = func_graph_->add_parameter(); + std::vector<int> shape(tensor->dims.size()); + std::copy(tensor->dims.begin(), tensor->dims.end(), shape.begin()); + auto type_id = static_cast<TypeId>(tensor->dataType); + auto type_ptr = TypeIdToType(type_id); + std::vector<int64_t> shape_vector; + (void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), + [](const int32_t &value) { return static_cast<int64_t>(value); }); + auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector); + MS_ASSERT(nullptr != abstract_tensor); + parameter->set_abstract(abstract_tensor); + if (!tensor->name.empty()) { + parameter->set_name(tensor->name); + } else { + parameter->set_name("const-" + std::to_string(i)); + } + + ParamValueLitePtr param_value = std::make_shared<ParamValueLite>(); + MS_ASSERT(nullptr != param_value); + param_value->set_tensor_shape(shape); + param_value->set_tensor_type(type_id); + param_value->set_format(tensor->format); + if (!tensor->data.empty()) { + auto size = tensor->data.size(); + char *tensor_data = new (std::nothrow) char[size]; + if (tensor_data == nullptr) { + MS_LOG(ERROR) << "new char[] failed"; + return RET_MEMORY_FAILED; + } + auto ret = memcpy_s(tensor_data, size, tensor->data.data(), size); + if (EOK != ret) { + MS_LOG(ERROR) << "memcpy_s error"; + delete[] tensor_data; + return RET_MEMORY_FAILED; + } + param_value->SetTensorData(tensor_data, size); + parameter->set_default_param(param_value); + } else if (std::find(meta_graph_->inputIndex.begin(), meta_graph_->inputIndex.end(), i) == + meta_graph_->inputIndex.end()) { + parameter->set_default_param(param_value); + } + AddNode(i, parameter); + } + return RET_OK; +} + +ValueNodePtr AnfImporterFromMetaGraphT::ConvertPrimitive(const std::unique_ptr<schema::CNodeT> &cNode) { + return nullptr; +} + +abstract::AbstractTensorPtr AnfImporterFromMetaGraphT::ConvertTensorToAbstractTensor( + const std::unique_ptr<schema::TensorT> &tensor) { + MS_ASSERT(nullptr != tensor); + std::vector<int> shape(tensor->dims.size()); + std::copy(tensor->dims.begin(), tensor->dims.end(), shape.begin()); + auto type_id = static_cast<TypeId>(tensor->dataType); + auto type_ptr = TypeIdToType(type_id); + std::vector<int64_t> shape_vector; + (void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), + [](const int32_t &value) { return static_cast<int64_t>(value); }); + auto ptr = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector); + MS_ASSERT(nullptr != ptr); + return ptr; +} + +int AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr<schema::CNodeT> &src_cnode, + const CNodePtr &dst_cnode) { + return RET_ERROR; +} + +int AnfImporterFromMetaGraphT::ConverterCNode() { + MS_ASSERT(nullptr != meta_graph_); + MS_ASSERT(nullptr != func_graph_); + for (const auto &cNode : meta_graph_->nodes) { + MS_ASSERT(nullptr != cNode); + auto anf_primitive = ConvertPrimitive(cNode); + if (anf_primitive == nullptr) { + MS_LOG(ERROR) << "cannot obtain anf primitive"; + return RET_NULL_PTR; + } + std::vector<AnfNodePtr> op_inputs = {anf_primitive}; + for (int j : cNode->inputIndex) { + auto node = GetNode(j); + if (nullptr == node) { + MS_LOG(ERROR) << "Can't find input node."; + return RET_NULL_PTR; + } + op_inputs.push_back(node); + } + auto new_cnode = func_graph_->NewCNode(op_inputs); + MS_ASSERT(nullptr != new_cnode); + new_cnode->set_fullname_with_scope(cNode->name); + auto status = ConvertAbstract(cNode, new_cnode); + if (status != RET_OK) { + MS_LOG(ERROR) << "ConvertAbstract failed."; + return status; + } + } + return RET_OK; +} + +int AnfImporterFromMetaGraphT::AddReturnCNode() { return RET_ERROR; } + +FuncGraphPtr AnfImporterFromMetaGraphT::Fb2Anf(schema::MetaGraphT *meta_graph) { + if (meta_graph == nullptr) { + MS_LOG(ERROR) << "meta_graph is null"; + ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_NULL_PTR); + return nullptr; + } + AnfImporterFromMetaGraphT anfImporterFromMetaGraphT(meta_graph); + auto ret = anfImporterFromMetaGraphT.ConverterConstTensor(); + if (RET_OK != ret) { + MS_LOG(ERROR) << "ConverterConstTensor failed " << ret; + return nullptr; + } + ret = anfImporterFromMetaGraphT.ConverterCNode(); + if (RET_OK != ret) { + MS_LOG(ERROR) << "ConverterCNode failed " << ret; + return nullptr; + } + ret = anfImporterFromMetaGraphT.AddReturnCNode(); + if (RET_OK != ret) { + MS_LOG(ERROR) << "AddReturnCNode failed " << ret; + return nullptr; + } + return anfImporterFromMetaGraphT.func_graph_; +} +} // namespace mindspore::lite diff --git a/mindspore/lite/test/common/import_from_meta_graphT.h b/mindspore/lite/test/common/import_from_meta_graphT.h new file mode 100644 index 0000000000..5e75004f4f --- /dev/null +++ b/mindspore/lite/test/common/import_from_meta_graphT.h @@ -0,0 +1,64 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_ +#define MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_ + +#include <utility> +#include <memory> +#include <unordered_map> +#include "schema/inner/model_generated.h" +#include "abstract/abstract_value.h" +#include "ir/func_graph.h" + +namespace mindspore::lite { +class AnfImporterFromMetaGraphT { + public: + virtual ~AnfImporterFromMetaGraphT() = default; + + static FuncGraphPtr Fb2Anf(schema::MetaGraphT *meta_graph); + + private: + explicit AnfImporterFromMetaGraphT(schema::MetaGraphT *meta_graph) : meta_graph_(meta_graph) { + this->func_graph_ = std::make_shared<FuncGraph>(); + } + + int ConverterConstTensor(); + + int ConverterCNode(); + + ValueNodePtr ConvertPrimitive(const std::unique_ptr<schema::CNodeT> &cNode); + + static abstract::AbstractTensorPtr ConvertTensorToAbstractTensor(const std::unique_ptr<schema::TensorT> &tensor); + + int ConvertAbstract(const std::unique_ptr<schema::CNodeT> &src_cnode, const CNodePtr &dst_cnode); + + int AddReturnCNode(); + + AnfNodePtr GetNode(int tensor_id); + + void AddNode(int tensor_id, AnfNodePtr node); + + private: + std::unordered_map<int, AnfNodePtr> nodes_; + schema::MetaGraphT *meta_graph_; + FuncGraphPtr func_graph_; +}; + +FuncGraphPtr Fb2Anf(schema::MetaGraphT *meta_graph); +} // namespace mindspore::lite + +#endif // MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_ diff --git a/mindspore/lite/test/models_for_process_only.cfg b/mindspore/lite/test/models_for_process_only.cfg index d0217c2761..e6401e4600 100644 --- a/mindspore/lite/test/models_for_process_only.cfg +++ b/mindspore/lite/test/models_for_process_only.cfg @@ -2,10 +2,6 @@ lite-model_arbitrary-image-stylization-inceptionv3_dr_transfer_1.tflite lite-model_arbitrary-image-stylization-inceptionv3_int8_transfer_1.tflite lite-model_arbitrary-image-stylization-inceptionv3-dynamic-shapes_dr_transfer_1.tflite;2;1,1,1,100:1,64,64,3 lite-model_cartoongan_dr_1.tflite -mindspore_efficientnet_b0.mindir -mindspore_efficientnet_b4minus.mindir -mindspore_tinynet-a.mindir -mindspore_tinynet-e.mindir lite-model_deeplabv3-mobilenetv2_1_default_1.tflite lite-model_deeplabv3-mobilenetv2_dm05_1_default_1.tflite lite-model_deeplabv3-mobilenetv2-int8_1_default_1.tflite diff --git a/mindspore/lite/test/models_gpu_fp32.cfg b/mindspore/lite/test/models_gpu_fp32.cfg index 9123df61a8..545506c7d3 100644 --- a/mindspore/lite/test/models_gpu_fp32.cfg +++ b/mindspore/lite/test/models_gpu_fp32.cfg @@ -23,4 +23,3 @@ landmark PoseNet_dla_17_x512 age_new plat_isface -efficientnet.mindir diff --git a/mindspore/lite/test/models_mindspore.cfg b/mindspore/lite/test/models_mindspore.cfg index 889f9a190a..6fe2d77fb6 100644 --- a/mindspore/lite/test/models_mindspore.cfg +++ b/mindspore/lite/test/models_mindspore.cfg @@ -1,26 +1,4 @@ -ssd.mindir 1.5 -mobilenetv2_438.mindir 1.5 -gate_u_net_small-1_110.mindir 1.5 -shufflenetv2.mindir 1.5 -#inceptionv3.mindir 1.5 -cyclegan_AtoB.mindir 0.5 -cyclegan_BtoA.mindir 0.5 -googlenet.mindir 1.5 -retinaface_732_1280_iod.mindir 1.5 -mobilefacenet_iod.mindir 1.5 -effnet_iod.mindir 1.5 -resnext50.mindir 1.5 -ocr_mobilenetV2.mindir 1.5 -mobilenet_quant.mindir 5 -mindspore_ghostnet_ssd_13x.mindir 1.5 -mindspore_ghost-nose-pets-811.mindir 0.5 -mindspore_ghost-pets-8244.mindir 1.5 -mindspore_ghostnet600M-pets.mindir 1.5 -mindspore_ghostnet_1x_pets_int8.mindir 12 -mindspore_deeplab_v3_s16.mindir 6.5 -CenterNet_MultiPose_ascend.mindir 0.5 -googlenet_1202.mindir 0.5 -inceptionv3_1203.mindir 0.5 -mobilenetv2_gpu.mindir 0.5 -resnet50_1202.mindir 0.5 -ssd_1130.mindir 0.5 +deeplabv3.r1.1.mindir 1.5 +mobilenetv2.r1.1.mindir 0.5 +ssd.r1.1.mindir 0.5 +ssd_ghostnet.r1.1.mindir 2.0 diff --git a/mindspore/lite/test/models_mindspore_mixbit.cfg b/mindspore/lite/test/models_mindspore_mixbit.cfg index f7ba8d439f..d1c66733f8 100644 --- a/mindspore/lite/test/models_mindspore_mixbit.cfg +++ b/mindspore/lite/test/models_mindspore_mixbit.cfg @@ -1 +1 @@ -efficientnet.mindir 41.37 9.98 +#efficientnet.mindir 41.37 9.98 diff --git a/mindspore/lite/test/models_mindspore_train.cfg b/mindspore/lite/test/models_mindspore_train.cfg index babe893a79..69b7aa63e4 100644 --- a/mindspore/lite/test/models_mindspore_train.cfg +++ b/mindspore/lite/test/models_mindspore_train.cfg @@ -1 +1 @@ -efficientnet.mindir +#efficientnet.mindir diff --git a/mindspore/lite/test/models_mindspore_weightquant.cfg b/mindspore/lite/test/models_mindspore_weightquant.cfg index 683fef9f59..f954cf8ce8 100644 --- a/mindspore/lite/test/models_mindspore_weightquant.cfg +++ b/mindspore/lite/test/models_mindspore_weightquant.cfg @@ -1,3 +1,3 @@ -retinaface_732_1280_iod.mindir 16.9 -mobilefacenet_iod.mindir 13.5 +#retinaface_732_1280_iod.mindir 16.9 +#mobilefacenet_iod.mindir 13.5 #effnet_iod.mindir diff --git a/mindspore/lite/test/models_ms_train.cfg b/mindspore/lite/test/models_ms_train.cfg index fe1cbf1c5d..5724ef959d 100644 --- a/mindspore/lite/test/models_ms_train.cfg +++ b/mindspore/lite/test/models_ms_train.cfg @@ -1,15 +1,8 @@ -mini_alexnet -mobilenetv1 -mobilenetv2 -mobilenetv3 -lenet -effnet -effnet_tune -resnet -googlenet -# densenet -# shufflenetv2 -#nin -# one_net -# lenetv1 +mini_alexnet_r1.1 +mobilenetv1_r1.1 +mobilenetv2_r1.1 +lenet_r1.1 +effnet_r1.1 +effnet_tune_r1.1 +googlenet_r1.1 #LAST diff --git a/mindspore/lite/test/models_onnx_fp16.cfg b/mindspore/lite/test/models_onnx_fp16.cfg index 03213a338c..fdce3feae7 100644 --- a/mindspore/lite/test/models_onnx_fp16.cfg +++ b/mindspore/lite/test/models_onnx_fp16.cfg @@ -26,10 +26,10 @@ crnn_lite_lstm_v2.onnx;32,32,32,1 0.3 psenet_lite_mbv2.onnx;1,32,32,3 0.6 super-resolution-10.onnx;1,224,224,1 4.5 tinyyolov2-8.onnx;1,416,416,3 5.5 -ml_2012_ocr_cn.onnx -1 +#ml_2012_ocr_cn.onnx -1 #ml_2012_ocr_cn_noLSTM.onnx 1 -candy-9.onnx 5 -mosaic-9.onnx 4 +#candy-9.onnx 5 +#mosaic-9.onnx 4 pointilism-9.onnx 3 rain-princess-9.onnx 5 udnie-9.onnx 3 diff --git a/mindspore/lite/test/run_benchmark_nets.sh b/mindspore/lite/test/run_benchmark_nets.sh index 79d07f75cd..6208d1f2f6 100755 --- a/mindspore/lite/test/run_benchmark_nets.sh +++ b/mindspore/lite/test/run_benchmark_nets.sh @@ -22,7 +22,7 @@ function Run_Converter() { # Convert tf models: while read line; do tf_line_info=${line} - if [[ $model_name == \#* ]]; then + if [[ $tf_line_info == \#* ]]; then continue fi model_name=`echo ${tf_line_info}|awk -F ' ' '{print $1}'` @@ -371,7 +371,7 @@ function Run_x86() { length=${#model_name_and_input_num} input_shapes=${line:length+1} tf_line_info=${model_name_and_input_num} - if [[ $model_name == \#* ]]; then + if [[ $tf_line_info == \#* ]]; then continue fi model_name=`echo ${tf_line_info}|awk -F ' ' '{print $1}'` @@ -457,6 +457,45 @@ function Run_x86() { fi done < ${models_onnx_config} + # Run mindspore converted models: + while read line; do + mindspore_line_info=${line} + if [[ $mindspore_line_info == \#* ]]; then + continue + fi + model_name=`echo ${mindspore_line_info}|awk -F ' ' '{print $1}'` + accuracy_limit=`echo ${mindspore_line_info}|awk -F ' ' '{print $2}'` + echo "---------------------------------------------------------" >> "${run_x86_log_file}" + echo "mindspore run: ${model_name}, accuracy limit:${accuracy_limit}" >> "${run_x86_log_file}" + echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64' >> "${run_x86_log_file}" + cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64 || return 1 + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_x86_log_file}" + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --accuracyThreshold=${accuracy_limit} >> "${run_x86_log_file}" + if [ $? = 0 ]; then + run_result='x86: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='x86: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + done < ${models_mindspore_config} + + # Run mindspore converted train models: + while read line; do + model_name=${line} + if [[ $model_name == \#* ]]; then + continue + fi + echo ${model_name}'_train' >> "${run_x86_log_file}" + echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64' >> "${run_x86_log_file}" + cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64 || return 1 + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'_train.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.train.ms.out' >> "${run_x86_log_file}" + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}'_train'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.train.ms.out --accuracyThreshold=1.5 >> "${run_x86_log_file}" + if [ $? = 0 ]; then + run_result='x86: '${model_name}'_train pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='x86: '${model_name}'_train failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + done < ${models_mindspore_train_config} + # Run tflite post training quantization converted models: while read line; do posttraining_line_info=${line} @@ -526,45 +565,6 @@ function Run_x86() { fi done < ${models_tflite_awaretraining_config} - # Run mindspore converted train models: - while read line; do - model_name=${line} - if [[ $model_name == \#* ]]; then - continue - fi - echo ${model_name}'_train' >> "${run_x86_log_file}" - echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64' >> "${run_x86_log_file}" - cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64 || return 1 - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'_train.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.train.ms.out' >> "${run_x86_log_file}" - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}'_train'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.train.ms.out --accuracyThreshold=1.5 >> "${run_x86_log_file}" - if [ $? = 0 ]; then - run_result='x86: '${model_name}'_train pass'; echo ${run_result} >> ${run_benchmark_result_file} - else - run_result='x86: '${model_name}'_train failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 - fi - done < ${models_mindspore_train_config} - - # Run mindspore converted models: - while read line; do - mindspore_line_info=${line} - if [[ $mindspore_line_info == \#* ]]; then - continue - fi - model_name=`echo ${mindspore_line_info}|awk -F ' ' '{print $1}'` - accuracy_limit=`echo ${mindspore_line_info}|awk -F ' ' '{print $2}'` - echo "---------------------------------------------------------" >> "${run_x86_log_file}" - echo "mindspore run: ${model_name}, accuracy limit:${accuracy_limit}" >> "${run_x86_log_file}" - echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64' >> "${run_x86_log_file}" - cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64 || return 1 - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_x86_log_file}" - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --accuracyThreshold=${accuracy_limit} >> "${run_x86_log_file}" - if [ $? = 0 ]; then - run_result='x86: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} - else - run_result='x86: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 - fi - done < ${models_mindspore_config} - # Run tflite weight quantization converted models: while read line; do weight_quant_line_info=${line} @@ -750,6 +750,45 @@ function Run_x86_sse() { fi done < ${models_onnx_config} + # Run mindspore converted models: + while read line; do + mindspore_line_info=${line} + if [[ $mindspore_line_info == \#* ]]; then + continue + fi + model_name=`echo ${mindspore_line_info}|awk -F ' ' '{print $1}'` + accuracy_limit=`echo ${mindspore_line_info}|awk -F ' ' '{print $2}'` + echo "---------------------------------------------------------" >> "${run_x86_sse_log_file}" + echo "mindspore run: ${model_name}, accuracy limit:${accuracy_limit}" >> "${run_x86_sse_log_file}" + echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64-sse' >> "${run_x86_sse_log_file}" + cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64-sse || return 1 + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_x86_sse_log_file}" + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --accuracyThreshold=${accuracy_limit} >> "${run_x86_sse_log_file}" + if [ $? = 0 ]; then + run_result='x86_sse: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='x86_sse: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + done < ${models_mindspore_config} + + # Run mindspore converted train models: + while read line; do + model_name=${line} + if [[ $model_name == \#* ]]; then + continue + fi + echo ${model_name}'_train' >> "${run_x86_sse_log_file}" + echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64-sse' >> "${run_x86_sse_log_file}" + cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64-sse || return 1 + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'_train.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.train.ms.out' >> "${run_x86_sse_log_file}" + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}'_train'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.train.ms.out --accuracyThreshold=1.5 >> "${run_x86_sse_log_file}" + if [ $? = 0 ]; then + run_result='x86_sse: '${model_name}'_train pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='x86_sse: '${model_name}'_train failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + done < ${models_mindspore_train_config} + # Run tflite post training quantization converted models: while read line; do posttraining_line_info=${line} @@ -819,45 +858,6 @@ function Run_x86_sse() { fi done < ${models_tflite_awaretraining_config} - # Run mindspore converted train models: - while read line; do - model_name=${line} - if [[ $model_name == \#* ]]; then - continue - fi - echo ${model_name}'_train' >> "${run_x86_sse_log_file}" - echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64-sse' >> "${run_x86_sse_log_file}" - cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64-sse || return 1 - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'_train.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.train.ms.out' >> "${run_x86_sse_log_file}" - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}'_train'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.train.ms.out --accuracyThreshold=1.5 >> "${run_x86_sse_log_file}" - if [ $? = 0 ]; then - run_result='x86_sse: '${model_name}'_train pass'; echo ${run_result} >> ${run_benchmark_result_file} - else - run_result='x86_sse: '${model_name}'_train failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 - fi - done < ${models_mindspore_train_config} - - # Run mindspore converted models: - while read line; do - mindspore_line_info=${line} - if [[ $mindspore_line_info == \#* ]]; then - continue - fi - model_name=`echo ${mindspore_line_info}|awk -F ' ' '{print $1}'` - accuracy_limit=`echo ${mindspore_line_info}|awk -F ' ' '{print $2}'` - echo "---------------------------------------------------------" >> "${run_x86_sse_log_file}" - echo "mindspore run: ${model_name}, accuracy limit:${accuracy_limit}" >> "${run_x86_sse_log_file}" - echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64-sse' >> "${run_x86_sse_log_file}" - cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64-sse || return 1 - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_x86_sse_log_file}" - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --accuracyThreshold=${accuracy_limit} >> "${run_x86_sse_log_file}" - if [ $? = 0 ]; then - run_result='x86_sse: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} - else - run_result='x86_sse: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 - fi - done < ${models_mindspore_config} - # Run tflite weight quantization converted models: while read line; do weight_quant_line_info=${line} @@ -1042,6 +1042,45 @@ function Run_x86_avx() { fi done < ${models_onnx_config} + # Run mindspore converted models: + while read line; do + mindspore_line_info=${line} + if [[ $mindspore_line_info == \#* ]]; then + continue + fi + model_name=`echo ${mindspore_line_info}|awk -F ' ' '{print $1}'` + accuracy_limit=`echo ${mindspore_line_info}|awk -F ' ' '{print $2}'` + echo "---------------------------------------------------------" >> "${run_x86_avx_log_file}" + echo "mindspore run: ${model_name}, accuracy limit:${accuracy_limit}" >> "${run_x86_avx_log_file}" + echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64-avx' >> "${run_x86_avx_log_file}" + cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64-avx || return 1 + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_x86_avx_log_file}" + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --accuracyThreshold=${accuracy_limit} >> "${run_x86_avx_log_file}" + if [ $? = 0 ]; then + run_result='x86_avx: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='x86_avx: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + done < ${models_mindspore_config} + + # Run mindspore converted train models: + while read line; do + model_name=${line} + if [[ $model_name == \#* ]]; then + continue + fi + echo ${model_name}'_train' >> "${run_x86_avx_log_file}" + echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64-avx' >> "${run_x86_avx_log_file}" + cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64-avx || return 1 + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'_train.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.train.ms.out' >> "${run_x86_avx_log_file}" + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}'_train'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.train.ms.out --accuracyThreshold=1.5 >> "${run_x86_avx_log_file}" + if [ $? = 0 ]; then + run_result='x86_avx: '${model_name}'_train pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='x86_avx: '${model_name}'_train failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + done < ${models_mindspore_train_config} + # Run tflite post training quantization converted models: while read line; do posttraining_line_info=${line} @@ -1111,45 +1150,6 @@ function Run_x86_avx() { fi done < ${models_tflite_awaretraining_config} - # Run mindspore converted train models: - while read line; do - model_name=${line} - if [[ $model_name == \#* ]]; then - continue - fi - echo ${model_name}'_train' >> "${run_x86_avx_log_file}" - echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64-avx' >> "${run_x86_avx_log_file}" - cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64-avx || return 1 - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'_train.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.train.ms.out' >> "${run_x86_avx_log_file}" - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}'_train'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.train.ms.out --accuracyThreshold=1.5 >> "${run_x86_avx_log_file}" - if [ $? = 0 ]; then - run_result='x86_avx: '${model_name}'_train pass'; echo ${run_result} >> ${run_benchmark_result_file} - else - run_result='x86_avx: '${model_name}'_train failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 - fi - done < ${models_mindspore_train_config} - - # Run mindspore converted models: - while read line; do - mindspore_line_info=${line} - if [[ $mindspore_line_info == \#* ]]; then - continue - fi - model_name=`echo ${mindspore_line_info}|awk -F ' ' '{print $1}'` - accuracy_limit=`echo ${mindspore_line_info}|awk -F ' ' '{print $2}'` - echo "---------------------------------------------------------" >> "${run_x86_avx_log_file}" - echo "mindspore run: ${model_name}, accuracy limit:${accuracy_limit}" >> "${run_x86_avx_log_file}" - echo 'cd '${x86_path}'/mindspore-lite-'${version}'-inference-linux-x64-avx' >> "${run_x86_avx_log_file}" - cd ${x86_path}/mindspore-lite-${version}-inference-linux-x64-avx || return 1 - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile='${ms_models_path}'/'${model_name}'.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/'${model_name}'.ms.out' >> "${run_x86_avx_log_file}" - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:./lib:./third_party/libjpeg-turbo/lib:./third_party/opencv/lib;./benchmark/benchmark --modelFile=${ms_models_path}/${model_name}.ms --inDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/input/${model_name}.ms.bin --benchmarkDataFile=/home/workspace/mindspore_dataset/mslite/models/hiai/input_output/output/${model_name}.ms.out --accuracyThreshold=${accuracy_limit} >> "${run_x86_avx_log_file}" - if [ $? = 0 ]; then - run_result='x86_avx: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} - else - run_result='x86_avx: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 - fi - done < ${models_mindspore_config} - # Run tflite weight quantization converted models: while read line; do weight_quant_line_info=${line} @@ -1343,7 +1343,7 @@ function Run_arm64() { length=${#model_name_and_input_num} input_shapes=${line:length+1} tf_line_info=${model_name_and_input_num} - if [[ $model_name == \#* ]]; then + if [[ $tf_line_info == \#* ]]; then continue fi model_name=`echo ${tf_line_info}|awk -F ' ' '{print $1}'` @@ -1432,6 +1432,97 @@ function Run_arm64() { fi done < ${models_caffe_config} + # Run onnx converted models: + while read line; do + model_name=${line%;*} + length=${#model_name} + input_shapes=${line:length+1} + if [[ $model_name == \#* ]]; then + continue + fi + echo ${model_name} >> "${run_arm64_log_file}" + echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --inputShapes='${input_shapes}' --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> "${run_arm64_log_file}" + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --inputShapes='${input_shapes}' --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> adb_run_cmd.txt + adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" + if [ $? = 0 ]; then + run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + # run benchmark test without clib data + echo ${model_name} >> "${run_arm64_log_file}" + echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inputShapes='${input_shapes}' --warmUpLoopCount=1 --loopCount=2' >> "{run_arm64_log_file}" + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inputShapes='${input_shapes}' --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt + adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" + if [ $? = 0 ]; then + run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + done < ${models_onnx_config} + + # Run mindir converted models: + while read line; do + mindspore_line_info=${line} + if [[ $mindspore_line_info == \#* ]]; then + continue + fi + model_name=`echo ${mindspore_line_info}|awk -F ' ' '{print $1}'` + accuracy_limit=`echo ${mindspore_line_info}|awk -F ' ' '{print $2}'` + echo "mindspore run: ${model_name}, accuracy limit:${accuracy_limit}" >> "${run_arm64_log_file}" + echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out --accuracyThreshold='${accuracy_limit} >> "${run_arm64_log_file}" + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out --accuracyThreshold='${accuracy_limit} >> adb_run_cmd.txt + adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" + if [ $? = 0 ]; then + run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + # run benchmark test without clib data + echo ${model_name} >> "${run_arm64_log_file}" + echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --warmUpLoopCount=1 --loopCount=2' >> "{run_arm64_log_file}" + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt + adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" + if [ $? = 0 ]; then + run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + done < ${models_mindspore_config} + + # Run mindir converted train models: + while read line; do + model_name=${line} + if [[ $model_name == \#* ]]; then + continue + fi + echo ${model_name}'_train' >> "${run_arm64_log_file}" + echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_train.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.train.ms.out --accuracyThreshold=1.5' >> "${run_arm64_log_file}" + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_train.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.train.ms.out --accuracyThreshold=1.5' >> adb_run_cmd.txt + adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" + if [ $? = 0 ]; then + run_result='arm64: '${model_name}'_train pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='arm64: '${model_name}'_train failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + # run benchmark test without clib data + echo ${model_name} >> "${run_arm64_log_file}" + echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_train.ms --warmUpLoopCount=1 --loopCount=2' >> "{run_arm64_log_file}" + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_train.ms --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt + adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" + if [ $? = 0 ]; then + run_result='arm64: '${model_name}'_train pass'; echo ${run_result} >> ${run_benchmark_result_file} + else + run_result='arm64: '${model_name}'_train failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + fi + done < ${models_mindspore_train_config} + # Run tflite post training quantization converted models: while read line; do posttraining_line_info=${line} @@ -1500,18 +1591,16 @@ function Run_arm64() { fi done < ${models_caffe_posttraining_config} - # Run onnx converted models: + # Run tflite aware training quantization converted models: while read line; do - model_name=${line%;*} - length=${#model_name} - input_shapes=${line:length+1} + model_name=${line} if [[ $model_name == \#* ]]; then continue fi echo ${model_name} >> "${run_arm64_log_file}" echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --inputShapes='${input_shapes}' --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> "${run_arm64_log_file}" - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --inputShapes='${input_shapes}' --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> adb_run_cmd.txt + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> "${run_arm64_log_file}" + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> adb_run_cmd.txt adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" if [ $? = 0 ]; then run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} @@ -1521,15 +1610,15 @@ function Run_arm64() { # run benchmark test without clib data echo ${model_name} >> "${run_arm64_log_file}" echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inputShapes='${input_shapes}' --warmUpLoopCount=1 --loopCount=2' >> "{run_arm64_log_file}" - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inputShapes='${input_shapes}' --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2' >> "${run_arm64_log_file}" + echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" if [ $? = 0 ]; then - run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} + run_result='arm64_awq: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} else - run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 + run_result='arm64_awq: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 fi - done < ${models_onnx_config} + done < ${models_tflite_awaretraining_config} # Run fp16 converted models: while read line; do @@ -1607,35 +1696,6 @@ function Run_arm64() { fi done < ${models_tflite_fp16_config} - # Run tflite aware training quantization converted models: - while read line; do - model_name=${line} - if [[ $model_name == \#* ]]; then - continue - fi - echo ${model_name} >> "${run_arm64_log_file}" - echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> "${run_arm64_log_file}" - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out' >> adb_run_cmd.txt - adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" - if [ $? = 0 ]; then - run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} - else - run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 - fi - # run benchmark test without clib data - echo ${model_name} >> "${run_arm64_log_file}" - echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2' >> "${run_arm64_log_file}" - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt - adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" - if [ $? = 0 ]; then - run_result='arm64_awq: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} - else - run_result='arm64_awq: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 - fi - done < ${models_tflite_awaretraining_config} - # Run gpu tflite converted models: while read line; do model_name=${line} @@ -1692,66 +1752,6 @@ function Run_arm64() { #sleep 1 done < ${models_gpu_weightquant_config} - # Run mindir converted models: - while read line; do - mindspore_line_info=${line} - if [[ $mindspore_line_info == \#* ]]; then - continue - fi - model_name=`echo ${mindspore_line_info}|awk -F ' ' '{print $1}'` - accuracy_limit=`echo ${mindspore_line_info}|awk -F ' ' '{print $2}'` - echo "mindspore run: ${model_name}, accuracy limit:${accuracy_limit}" >> "${run_arm64_log_file}" - echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out --accuracyThreshold='${accuracy_limit} >> "${run_arm64_log_file}" - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.ms.out --accuracyThreshold='${accuracy_limit} >> adb_run_cmd.txt - adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" - if [ $? = 0 ]; then - run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} - else - run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 - fi - # run benchmark test without clib data - echo ${model_name} >> "${run_arm64_log_file}" - echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --warmUpLoopCount=1 --loopCount=2' >> "{run_arm64_log_file}" - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt - adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" - if [ $? = 0 ]; then - run_result='arm64: '${model_name}' pass'; echo ${run_result} >> ${run_benchmark_result_file} - else - run_result='arm64: '${model_name}' failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 - fi - done < ${models_mindspore_config} - - # Run mindir converted train models: - while read line; do - model_name=${line} - if [[ $model_name == \#* ]]; then - continue - fi - echo ${model_name}'_train' >> "${run_arm64_log_file}" - echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_train.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.train.ms.out --accuracyThreshold=1.5' >> "${run_arm64_log_file}" - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_train.ms --inDataFile=/data/local/tmp/input_output/input/'${model_name}'.ms.bin --benchmarkDataFile=/data/local/tmp/input_output/output/'${model_name}'.train.ms.out --accuracyThreshold=1.5' >> adb_run_cmd.txt - adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" - if [ $? = 0 ]; then - run_result='arm64: '${model_name}'_train pass'; echo ${run_result} >> ${run_benchmark_result_file} - else - run_result='arm64: '${model_name}'_train failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 - fi - # run benchmark test without clib data - echo ${model_name} >> "${run_arm64_log_file}" - echo 'cd /data/local/tmp/benchmark_test' > adb_run_cmd.txt - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_train.ms --warmUpLoopCount=1 --loopCount=2' >> "{run_arm64_log_file}" - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/data/local/tmp/benchmark_test;./benchmark --modelFile='${model_name}'_train.ms --warmUpLoopCount=1 --loopCount=2' >> adb_run_cmd.txt - adb -s ${device_id} shell < adb_run_cmd.txt >> "${run_arm64_log_file}" - if [ $? = 0 ]; then - run_result='arm64: '${model_name}'_train pass'; echo ${run_result} >> ${run_benchmark_result_file} - else - run_result='arm64: '${model_name}'_train failed'; echo ${run_result} >> ${run_benchmark_result_file}; return 1 - fi - done < ${models_mindspore_train_config} - # Run mindir weightquant converted train models: while read line; do weight_quant_line_info=${line} @@ -1774,6 +1774,10 @@ function Run_arm64() { # Run npu converted models: while read line; do + model_line_info=${line} + if [[ $model_line_info == \#* ]]; then + continue + fi model_name=`echo ${line}|awk -F ' ' '{print $1}'` accuracy_limit=`echo ${line}|awk -F ' ' '{print $2}'` input_num=`echo ${line}|awk -F ' ' '{print $3}'` diff --git a/mindspore/lite/test/st/control_flow_test.cc b/mindspore/lite/test/st/control_flow_test.cc index 4d5d7fce22..057ce463a5 100644 --- a/mindspore/lite/test/st/control_flow_test.cc +++ b/mindspore/lite/test/st/control_flow_test.cc @@ -59,9 +59,9 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { sub_graph_0_node_0->inputIndex = {0, 1}; sub_graph_0_node_0->outputIndex = {2}; sub_graph_0_node_0->primitive = std::make_unique<schema::PrimitiveT>(); - sub_graph_0_node_0->primitive->value.type = schema::PrimitiveType_Add; - auto primitive_sub_graph_0_node_0 = new schema::AddT; - primitive_sub_graph_0_node_0->activationType = schema::ActivationType_NO_ACTIVATION; + sub_graph_0_node_0->primitive->value.type = schema::PrimitiveType_AddFusion; + auto primitive_sub_graph_0_node_0 = new schema::AddFusionT; + primitive_sub_graph_0_node_0->activation_type = schema::ActivationType_NO_ACTIVATION; sub_graph_0_node_0->primitive->value.value = primitive_sub_graph_0_node_0; sub_graph_0_node_0->name = "before_Add_1"; meta_graph->nodes.emplace_back(std::move(sub_graph_0_node_0)); @@ -73,9 +73,9 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { sub_graph_0_node_1->inputIndex = {2, 3}; sub_graph_0_node_1->outputIndex = {4}; sub_graph_0_node_1->primitive = std::make_unique<schema::PrimitiveT>(); - sub_graph_0_node_1->primitive->value.type = schema::PrimitiveType_Add; - auto primitive_sub_graph_0_node_1 = new schema::AddT; - primitive_sub_graph_0_node_1->activationType = schema::ActivationType_NO_ACTIVATION; + sub_graph_0_node_1->primitive->value.type = schema::PrimitiveType_AddFusion; + auto primitive_sub_graph_0_node_1 = new schema::AddFusionT; + primitive_sub_graph_0_node_1->activation_type = schema::ActivationType_NO_ACTIVATION; sub_graph_0_node_1->primitive->value.value = primitive_sub_graph_0_node_1; sub_graph_0_node_1->name = "before_Add_2"; meta_graph->nodes.emplace_back(std::move(sub_graph_0_node_1)); @@ -100,9 +100,9 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { sub_graph_0_node_3->inputIndex = {16}; sub_graph_0_node_3->outputIndex = {5}; // 5 : bool sub_graph_0_node_3->primitive = std::make_unique<schema::PrimitiveT>(); - sub_graph_0_node_3->primitive->value.type = schema::PrimitiveType_Partial; - auto primitive_sub_graph_0_node_3 = new schema::PartialT; - primitive_sub_graph_0_node_3->subGraphIndex = 1; + sub_graph_0_node_3->primitive->value.type = schema::PrimitiveType_PartialFusion; + auto primitive_sub_graph_0_node_3 = new schema::PartialFusionT; + primitive_sub_graph_0_node_3->sub_graph_index = 1; sub_graph_0_node_3->primitive->value.value = primitive_sub_graph_0_node_3; sub_graph_0_node_3->name = "Partial_cond"; meta_graph->nodes.emplace_back(std::move(sub_graph_0_node_3)); @@ -127,9 +127,9 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { sub_graph_0_node_5->inputIndex = {6}; sub_graph_0_node_5->outputIndex = {17}; sub_graph_0_node_5->primitive = std::make_unique<schema::PrimitiveT>(); - sub_graph_0_node_5->primitive->value.type = schema::PrimitiveType_Partial; - auto primitive_sub_graph_0_node_5 = new schema::PartialT; - primitive_sub_graph_0_node_5->subGraphIndex = 2; + sub_graph_0_node_5->primitive->value.type = schema::PrimitiveType_PartialFusion; + auto primitive_sub_graph_0_node_5 = new schema::PartialFusionT; + primitive_sub_graph_0_node_5->sub_graph_index = 2; sub_graph_0_node_5->primitive->value.value = primitive_sub_graph_0_node_5; sub_graph_0_node_5->name = "Partial_body"; meta_graph->nodes.emplace_back(std::move(sub_graph_0_node_5)); @@ -141,8 +141,8 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { sub_graph_0_node_6->inputIndex = {7, 8}; sub_graph_0_node_6->outputIndex = {9}; sub_graph_0_node_6->primitive = std::make_unique<schema::PrimitiveT>(); - sub_graph_0_node_6->primitive->value.type = schema::PrimitiveType_Add; - auto primitive_sub_graph_0_node_6 = new schema::AddT; + sub_graph_0_node_6->primitive->value.type = schema::PrimitiveType_AddFusion; + auto primitive_sub_graph_0_node_6 = new schema::AddFusionT; sub_graph_0_node_6->primitive->value.value = primitive_sub_graph_0_node_6; sub_graph_0_node_6->name = "Add-after"; meta_graph->nodes.emplace_back(std::move(sub_graph_0_node_6)); @@ -160,8 +160,8 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { sub_graph_1_node_0->inputIndex = {16, 10}; sub_graph_1_node_0->outputIndex = {11}; sub_graph_1_node_0->primitive = std::make_unique<schema::PrimitiveT>(); - sub_graph_1_node_0->primitive->value.type = schema::PrimitiveType_Add; - auto primitive_sub_graph_1_node_0 = new schema::AddT; + sub_graph_1_node_0->primitive->value.type = schema::PrimitiveType_AddFusion; + auto primitive_sub_graph_1_node_0 = new schema::AddFusionT; sub_graph_1_node_0->primitive->value.value = primitive_sub_graph_1_node_0; sub_graph_1_node_0->name = "cond_add"; meta_graph->nodes.emplace_back(std::move(sub_graph_1_node_0)); @@ -191,8 +191,8 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { sub_graph_2_node_0->inputIndex = {6, 13}; sub_graph_2_node_0->outputIndex = {14}; sub_graph_2_node_0->primitive = std::make_unique<schema::PrimitiveT>(); - sub_graph_2_node_0->primitive->value.type = schema::PrimitiveType_Add; - auto primitive_sub_graph_2_node_0 = new schema::AddT; + sub_graph_2_node_0->primitive->value.type = schema::PrimitiveType_AddFusion; + auto primitive_sub_graph_2_node_0 = new schema::AddFusionT; sub_graph_2_node_0->primitive->value.value = primitive_sub_graph_2_node_0; sub_graph_2_node_0->name = "body_add_1"; meta_graph->nodes.emplace_back(std::move(sub_graph_2_node_0)); @@ -204,8 +204,8 @@ TEST_F(ControlFlowTest, TestMergeWhileModel) { sub_graph_2_node_1->inputIndex = {14, 15}; sub_graph_2_node_1->outputIndex = {17}; sub_graph_2_node_1->primitive = std::make_unique<schema::PrimitiveT>(); - sub_graph_2_node_1->primitive->value.type = schema::PrimitiveType_Add; - auto primitive_sub_graph_2_node_1 = new schema::AddT; + sub_graph_2_node_1->primitive->value.type = schema::PrimitiveType_AddFusion; + auto primitive_sub_graph_2_node_1 = new schema::AddFusionT; sub_graph_2_node_1->primitive->value.value = primitive_sub_graph_2_node_1; sub_graph_2_node_1->name = "body_add_2"; meta_graph->nodes.emplace_back(std::move(sub_graph_2_node_1)); diff --git a/mindspore/lite/test/st/sub_graph_test.cc b/mindspore/lite/test/st/sub_graph_test.cc index e05946a2bc..0d43c17ad5 100644 --- a/mindspore/lite/test/st/sub_graph_test.cc +++ b/mindspore/lite/test/st/sub_graph_test.cc @@ -44,9 +44,9 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { add_0->inputIndex = {0, 1}; add_0->outputIndex = {2}; add_0->primitive = std::make_unique<schema::PrimitiveT>(); - add_0->primitive->value.type = schema::PrimitiveType_Add; - auto add_0_prim = new schema::AddT; - add_0_prim->activationType = schema::ActivationType_NO_ACTIVATION; + add_0->primitive->value.type = schema::PrimitiveType_AddFusion; + auto add_0_prim = new schema::AddFusionT; + add_0_prim->activation_type = schema::ActivationType_NO_ACTIVATION; add_0->primitive->value.value = add_0_prim; add_0->name = "Add0"; auto tensor_0 = std::make_unique<schema::TensorT>(); @@ -77,9 +77,9 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { add_1->inputIndex = {2, 3}; add_1->outputIndex = {4}; add_1->primitive = std::make_unique<schema::PrimitiveT>(); - add_1->primitive->value.type = schema::PrimitiveType_Add; - auto add_1_prim = new schema::AddT; - add_1_prim->activationType = schema::ActivationType_NO_ACTIVATION; + add_1->primitive->value.type = schema::PrimitiveType_AddFusion; + auto add_1_prim = new schema::AddFusionT; + add_1_prim->activation_type = schema::ActivationType_NO_ACTIVATION; add_1->primitive->value.value = add_1_prim; add_1->name = "Add1"; auto tensor_3 = std::make_unique<schema::TensorT>(); @@ -104,9 +104,9 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { partial_cond->inputIndex = {4}; partial_cond->outputIndex = {9}; partial_cond->primitive = std::make_unique<schema::PrimitiveT>(); - partial_cond->primitive->value.type = schema::PrimitiveType_Partial; - auto partial_cond_prim = new schema::PartialT; - partial_cond_prim->subGraphIndex = 1; + partial_cond->primitive->value.type = schema::PrimitiveType_PartialFusion; + auto partial_cond_prim = new schema::PartialFusionT; + partial_cond_prim->sub_graph_index = 1; partial_cond->primitive->value.value = partial_cond_prim; partial_cond->name = "partial_cond"; meta_graph->nodes.emplace_back(std::move(partial_cond)); @@ -116,9 +116,9 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { add_5->inputIndex = {9, 13}; add_5->outputIndex = {14}; add_5->primitive = std::make_unique<schema::PrimitiveT>(); - add_5->primitive->value.type = schema::PrimitiveType_Add; - auto add_5_prim = new schema::AddT; - add_5_prim->activationType = schema::ActivationType_NO_ACTIVATION; + add_5->primitive->value.type = schema::PrimitiveType_AddFusion; + auto add_5_prim = new schema::AddFusionT; + add_5_prim->activation_type = schema::ActivationType_NO_ACTIVATION; add_5->primitive->value.value = add_5_prim; add_5->name = "Add5"; auto tensor_13 = std::make_unique<schema::TensorT>(); @@ -152,9 +152,9 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { add_2->inputIndex = {4, 5}; add_2->outputIndex = {6}; add_2->primitive = std::make_unique<schema::PrimitiveT>(); - add_2->primitive->value.type = schema::PrimitiveType_Add; - auto add_2_prim = new schema::AddT; - add_2_prim->activationType = schema::ActivationType_NO_ACTIVATION; + add_2->primitive->value.type = schema::PrimitiveType_AddFusion; + auto add_2_prim = new schema::AddFusionT; + add_2_prim->activation_type = schema::ActivationType_NO_ACTIVATION; add_2->primitive->value.value = add_2_prim; add_2->name = "Add2"; auto tensor_5 = std::make_unique<schema::TensorT>(); @@ -226,9 +226,9 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { partial_body->inputIndex = {8}; partial_body->outputIndex = {4}; partial_body->primitive = std::make_unique<schema::PrimitiveT>(); - partial_body->primitive->value.type = schema::PrimitiveType_Partial; - auto partial_body_prim = new schema::PartialT; - partial_body_prim->subGraphIndex = 2; + partial_body->primitive->value.type = schema::PrimitiveType_PartialFusion; + auto partial_body_prim = new schema::PartialFusionT; + partial_body_prim->sub_graph_index = 2; partial_body->primitive->value.value = partial_body_prim; partial_body->name = "partial_body"; meta_graph->nodes.emplace_back(std::move(partial_body)); @@ -247,9 +247,9 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { add_3->inputIndex = {8, 10}; add_3->outputIndex = {11}; add_3->primitive = std::make_unique<schema::PrimitiveT>(); - add_3->primitive->value.type = schema::PrimitiveType_Add; - auto add_3_prim = new schema::AddT; - add_3_prim->activationType = schema::ActivationType_NO_ACTIVATION; + add_3->primitive->value.type = schema::PrimitiveType_AddFusion; + auto add_3_prim = new schema::AddFusionT; + add_3_prim->activation_type = schema::ActivationType_NO_ACTIVATION; add_3->primitive->value.value = add_3_prim; add_3->name = "Add3"; auto tensor_10 = std::make_unique<schema::TensorT>(); @@ -274,9 +274,9 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { add_4->inputIndex = {11, 12}; add_4->outputIndex = {4}; add_4->primitive = std::make_unique<schema::PrimitiveT>(); - add_4->primitive->value.type = schema::PrimitiveType_Add; - auto add_4_prim = new schema::AddT; - add_4_prim->activationType = schema::ActivationType_NO_ACTIVATION; + add_4->primitive->value.type = schema::PrimitiveType_AddFusion; + auto add_4_prim = new schema::AddFusionT; + add_4_prim->activation_type = schema::ActivationType_NO_ACTIVATION; add_4->primitive->value.value = add_4_prim; add_4->name = "Add4"; auto tensor_12 = std::make_unique<schema::TensorT>(); @@ -296,9 +296,9 @@ TEST_F(SubGraphTest, RecursiveSubGraphTest) { partial_cond->inputIndex = {4}; partial_cond->outputIndex = {9}; partial_cond->primitive = std::make_unique<schema::PrimitiveT>(); - partial_cond->primitive->value.type = schema::PrimitiveType_Partial; - auto partial_cond_prim = new schema::PartialT; - partial_cond_prim->subGraphIndex = 1; + partial_cond->primitive->value.type = schema::PrimitiveType_PartialFusion; + auto partial_cond_prim = new schema::PartialFusionT; + partial_cond_prim->sub_graph_index = 1; partial_cond->primitive->value.value = partial_cond_prim; partial_cond->name = "partial_cond1"; meta_graph->nodes.emplace_back(std::move(partial_cond)); diff --git a/mindspore/lite/test/ut/nnacl/infer/adam_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/adam_infer_test.cc new file mode 100644 index 0000000000..7f00dedb67 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/adam_infer_test.cc @@ -0,0 +1,52 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/adam_infer.h" + +namespace mindspore { + +class AdamInferTest : public mindspore::CommonTest { + public: + AdamInferTest() {} +}; + +TEST_F(AdamInferTest, AdamInferTest0) { + size_t inputs_size = 10; + std::vector<TensorC *> inputs(inputs_size, NULL); + for (size_t i = 0; i < inputs_size; i++) { + inputs[i] = new TensorC; + inputs[i]->shape_size_ = 1; + inputs[i]->shape_[0] = 1; + } + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = AdamInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/addn_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/addn_infer_test.cc new file mode 100644 index 0000000000..5b65fdd05d --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/addn_infer_test.cc @@ -0,0 +1,93 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/addn_infer.h" + +namespace mindspore { + +class AddnInferTest : public mindspore::CommonTest { + public: + AddnInferTest() {} +}; + +// https://tensorflow.google.cn/api_docs/python/tf/math/add_n?hl=en +TEST_F(AddnInferTest, AddnInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 2; + inputs[1]->shape_[1] = 3; + inputs[1]->data_type_ = kNumberTypeInt; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = AddnInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +// https://tensorflow.google.cn/api_docs/python/tf/math/add_n?hl=en +// ours support broadcast +TEST_F(AddnInferTest, AddnInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 1; + inputs[0]->data_type_ = kNumberTypeInt; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 2; + inputs[1]->shape_[1] = 4; + inputs[1]->data_type_ = kNumberTypeInt; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = AddnInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/apply_momentum_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/apply_momentum_infer_test.cc new file mode 100644 index 0000000000..67c651be96 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/apply_momentum_infer_test.cc @@ -0,0 +1,52 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/apply_momentum_infer.h" + +namespace mindspore { + +class ApplyMomentumInferTest : public mindspore::CommonTest { + public: + ApplyMomentumInferTest() {} +}; + +TEST_F(ApplyMomentumInferTest, ApplyMomentumInferTest0) { + size_t inputs_size = 5; + std::vector<TensorC *> inputs(inputs_size, NULL); + for (size_t i = 0; i < inputs_size; i++) { + inputs[i] = new TensorC; + inputs[i]->shape_size_ = 1; + inputs[i]->shape_[0] = 1; + } + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = ApplyMomentumInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/argmax_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/argmax_infer_test.cc new file mode 100644 index 0000000000..2547f245ab --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/argmax_infer_test.cc @@ -0,0 +1,140 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/argmin_max_infer.h" + +namespace mindspore { + +class ArgmaxInferTest : public mindspore::CommonTest { + public: + ArgmaxInferTest() {} +}; + +TEST_F(ArgmaxInferTest, ArgmaxInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 5; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ArgMinMaxParameter *parameter = new ArgMinMaxParameter; + parameter->topk_ = 1; + parameter->keep_dims_ = true; + parameter->axis_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = ArgMinMaxInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ArgmaxInferTest, ArgmaxInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 5; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ArgMinMaxParameter *parameter = new ArgMinMaxParameter; + parameter->topk_ = 1; + parameter->keep_dims_ = true; + parameter->axis_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = ArgMinMaxInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 5); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ArgmaxInferTest, ArgmaxInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 5; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ArgMinMaxParameter *parameter = new ArgMinMaxParameter; + parameter->topk_ = 1; + parameter->keep_dims_ = true; + parameter->axis_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = ArgMinMaxInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ArgmaxInferTest, ArgmaxInferTestTopK2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 5; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ArgMinMaxParameter *parameter = new ArgMinMaxParameter; + parameter->topk_ = 2; + parameter->keep_dims_ = true; + parameter->axis_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = ArgMinMaxInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 2); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/argmin_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/argmin_infer_test.cc new file mode 100644 index 0000000000..1131a85aff --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/argmin_infer_test.cc @@ -0,0 +1,140 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/argmin_max_infer.h" + +namespace mindspore { + +class ArgminInferTest : public mindspore::CommonTest { + public: + ArgminInferTest() {} +}; + +TEST_F(ArgminInferTest, ArgminInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 5; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ArgMinMaxParameter *parameter = new ArgMinMaxParameter; + parameter->topk_ = 1; + parameter->keep_dims_ = true; + parameter->axis_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = ArgMinMaxInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ArgminInferTest, ArgminInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 5; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ArgMinMaxParameter *parameter = new ArgMinMaxParameter; + parameter->topk_ = 1; + parameter->keep_dims_ = true; + parameter->axis_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = ArgMinMaxInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 5); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ArgminInferTest, ArgminInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 5; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ArgMinMaxParameter *parameter = new ArgMinMaxParameter; + parameter->topk_ = 1; + parameter->keep_dims_ = true; + parameter->axis_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = ArgMinMaxInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ArgminInferTest, ArgminInferTestTopK2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 5; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ArgMinMaxParameter *parameter = new ArgMinMaxParameter; + parameter->topk_ = 2; + parameter->keep_dims_ = true; + parameter->axis_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = ArgMinMaxInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 2); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/arithmetic_compare_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/arithmetic_compare_infer_test.cc new file mode 100644 index 0000000000..6100c0bd7e --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/arithmetic_compare_infer_test.cc @@ -0,0 +1,173 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/arithmetic_compare_infer.h" + +namespace mindspore { + +class ArithmeticCompareInferTest : public mindspore::CommonTest { + public: + ArithmeticCompareInferTest() {} +}; + +TEST_F(ArithmeticCompareInferTest, ArithmeticCompareInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 4; + inputs[0]->shape_[3] = 5; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 5; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 7; + inputs[1]->shape_[2] = 8; + inputs[1]->shape_[3] = 9; + inputs[1]->shape_[4] = 10; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = ArithmeticCompareInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + parameter); + ASSERT_EQ(ret, NNACL_ERR); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ArithmeticCompareInferTest, ArithmeticCompareInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 7; + inputs[0]->shape_[1] = 8; + inputs[0]->shape_[2] = 9; + inputs[0]->shape_[3] = 10; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 5; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 7; + inputs[1]->shape_[2] = 8; + inputs[1]->shape_[3] = 9; + inputs[1]->shape_[4] = 10; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = ArithmeticCompareInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + parameter); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 5); + ASSERT_EQ(outputs[0]->shape_[0], 6); + ASSERT_EQ(outputs[0]->shape_[1], 7); + ASSERT_EQ(outputs[0]->shape_[2], 8); + ASSERT_EQ(outputs[0]->shape_[3], 9); + ASSERT_EQ(outputs[0]->shape_[4], 10); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ArithmeticCompareInferTest, ArithmeticCompareInferTest2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 7; + inputs[1]->shape_[1] = 8; + inputs[1]->shape_[2] = 9; + inputs[1]->shape_[3] = 10; + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 5; + inputs[0]->shape_[0] = 6; + inputs[0]->shape_[1] = 7; + inputs[0]->shape_[2] = 8; + inputs[0]->shape_[3] = 9; + inputs[0]->shape_[4] = 10; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = ArithmeticCompareInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + parameter); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 5); + ASSERT_EQ(outputs[0]->shape_[0], 6); + ASSERT_EQ(outputs[0]->shape_[1], 7); + ASSERT_EQ(outputs[0]->shape_[2], 8); + ASSERT_EQ(outputs[0]->shape_[3], 9); + ASSERT_EQ(outputs[0]->shape_[4], 10); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ArithmeticCompareInferTest, ArithmeticCompareInferTest3) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 5; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 7; + inputs[1]->shape_[2] = 8; + inputs[1]->shape_[3] = 9; + inputs[1]->shape_[4] = 10; + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 5; + inputs[0]->shape_[0] = 6; + inputs[0]->shape_[1] = 7; + inputs[0]->shape_[2] = 8; + inputs[0]->shape_[3] = 9; + inputs[0]->shape_[4] = 10; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = ArithmeticCompareInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + parameter); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 5); + ASSERT_EQ(outputs[0]->shape_[0], 6); + ASSERT_EQ(outputs[0]->shape_[1], 7); + ASSERT_EQ(outputs[0]->shape_[2], 8); + ASSERT_EQ(outputs[0]->shape_[3], 9); + ASSERT_EQ(outputs[0]->shape_[4], 10); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/arithmetic_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/arithmetic_infer_test.cc new file mode 100644 index 0000000000..2bc6ce81bf --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/arithmetic_infer_test.cc @@ -0,0 +1,173 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/arithmetic_infer.h" + +namespace mindspore { + +class ArithmeticInferTest : public mindspore::CommonTest { + public: + ArithmeticInferTest() {} +}; + +TEST_F(ArithmeticInferTest, ArithmeticInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 4; + inputs[0]->shape_[3] = 5; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 5; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 7; + inputs[1]->shape_[2] = 8; + inputs[1]->shape_[3] = 9; + inputs[1]->shape_[4] = 10; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = + ArithmeticInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), parameter); + ASSERT_EQ(ret, NNACL_ERR); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ArithmeticInferTest, ArithmeticInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 7; + inputs[0]->shape_[1] = 8; + inputs[0]->shape_[2] = 9; + inputs[0]->shape_[3] = 10; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 5; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 7; + inputs[1]->shape_[2] = 8; + inputs[1]->shape_[3] = 9; + inputs[1]->shape_[4] = 10; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = + ArithmeticInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), parameter); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 5); + ASSERT_EQ(outputs[0]->shape_[0], 6); + ASSERT_EQ(outputs[0]->shape_[1], 7); + ASSERT_EQ(outputs[0]->shape_[2], 8); + ASSERT_EQ(outputs[0]->shape_[3], 9); + ASSERT_EQ(outputs[0]->shape_[4], 10); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ArithmeticInferTest, ArithmeticInferTest2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 7; + inputs[1]->shape_[1] = 8; + inputs[1]->shape_[2] = 9; + inputs[1]->shape_[3] = 10; + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 5; + inputs[0]->shape_[0] = 6; + inputs[0]->shape_[1] = 7; + inputs[0]->shape_[2] = 8; + inputs[0]->shape_[3] = 9; + inputs[0]->shape_[4] = 10; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = + ArithmeticInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), parameter); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 5); + ASSERT_EQ(outputs[0]->shape_[0], 6); + ASSERT_EQ(outputs[0]->shape_[1], 7); + ASSERT_EQ(outputs[0]->shape_[2], 8); + ASSERT_EQ(outputs[0]->shape_[3], 9); + ASSERT_EQ(outputs[0]->shape_[4], 10); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ArithmeticInferTest, ArithmeticInferTest3) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 5; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 7; + inputs[1]->shape_[2] = 8; + inputs[1]->shape_[3] = 9; + inputs[1]->shape_[4] = 10; + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 5; + inputs[0]->shape_[0] = 6; + inputs[0]->shape_[1] = 7; + inputs[0]->shape_[2] = 8; + inputs[0]->shape_[3] = 9; + inputs[0]->shape_[4] = 10; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = + ArithmeticInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), parameter); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 5); + ASSERT_EQ(outputs[0]->shape_[0], 6); + ASSERT_EQ(outputs[0]->shape_[1], 7); + ASSERT_EQ(outputs[0]->shape_[2], 8); + ASSERT_EQ(outputs[0]->shape_[3], 9); + ASSERT_EQ(outputs[0]->shape_[4], 10); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/assign_add_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/assign_add_infer_test.cc new file mode 100644 index 0000000000..369b24cab7 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/assign_add_infer_test.cc @@ -0,0 +1,55 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/assign_add_infer.h" + +namespace mindspore { + +class AssignAddInferTest : public mindspore::CommonTest { + public: + AssignAddInferTest() {} +}; + +TEST_F(AssignAddInferTest, AssignAddInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt8; + inputs[1] = new TensorC; + inputs[1]->data_type_ = kNumberTypeInt8; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = AssignAddInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/assign_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/assign_infer_test.cc new file mode 100644 index 0000000000..cfbd434a4b --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/assign_infer_test.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/assign_infer.h" + +namespace mindspore { + +class AssignInferTest : public mindspore::CommonTest { + public: + AssignInferTest() {} +}; + +TEST_F(AssignInferTest, AssignInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt8; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 4; + inputs[1]->shape_[1] = 3; + inputs[1]->data_type_ = kNumberTypeInt8; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = AssignInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/audio_spectrogram_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/audio_spectrogram_infer_test.cc new file mode 100644 index 0000000000..58fb0d8f91 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/audio_spectrogram_infer_test.cc @@ -0,0 +1,60 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/audio_spectrogram_infer.h" + +namespace mindspore { + +class AudioSpectrogramInferTest : public mindspore::CommonTest { + public: + AudioSpectrogramInferTest() {} +}; + +TEST_F(AudioSpectrogramInferTest, AudioSpectrogramInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + AudioSpectrogramParameter *parameter = new AudioSpectrogramParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->window_size_ = 3; + parameter->stride_ = 2; + int ret = AudioSpectrogramInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 2); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/batch_to_space_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/batch_to_space_infer_test.cc new file mode 100644 index 0000000000..080b67da0b --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/batch_to_space_infer_test.cc @@ -0,0 +1,187 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/batch_to_space_infer.h" + +namespace mindspore { + +class BatchToSpaceInferTest : public mindspore::CommonTest { + public: + BatchToSpaceInferTest() {} +}; + +// https://tensorflow.google.cn/api_docs/python/tf/batch_to_space?hl=en +TEST_F(BatchToSpaceInferTest, BatchToSpaceInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 1; + inputs[0]->shape_[2] = 1; + inputs[0]->shape_[3] = 1; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + BatchToSpaceParameter *parameter = new BatchToSpaceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->block_shape_[0] = 2; + parameter->block_shape_[1] = 2; + parameter->crops_[0] = 0; + parameter->crops_[1] = 0; + parameter->crops_[2] = 0; + parameter->crops_[3] = 0; + int ret = BatchToSpaceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 2); + ASSERT_EQ(outputs[0]->shape_[3], 1); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(BatchToSpaceInferTest, BatchToSpaceInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 1; + inputs[0]->shape_[2] = 1; + inputs[0]->shape_[3] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + BatchToSpaceParameter *parameter = new BatchToSpaceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->block_shape_[0] = 2; + parameter->block_shape_[1] = 2; + parameter->crops_[0] = 0; + parameter->crops_[1] = 0; + parameter->crops_[2] = 0; + parameter->crops_[3] = 0; + int ret = BatchToSpaceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 2); + ASSERT_EQ(outputs[0]->shape_[3], 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(BatchToSpaceInferTest, BatchToSpaceInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 2; + inputs[0]->shape_[3] = 1; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + BatchToSpaceParameter *parameter = new BatchToSpaceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->block_shape_[0] = 2; + parameter->block_shape_[1] = 2; + parameter->crops_[0] = 0; + parameter->crops_[1] = 0; + parameter->crops_[2] = 0; + parameter->crops_[3] = 0; + int ret = BatchToSpaceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 4); + ASSERT_EQ(outputs[0]->shape_[2], 4); + ASSERT_EQ(outputs[0]->shape_[3], 1); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(BatchToSpaceInferTest, BatchToSpaceInferTest3) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 8; + inputs[0]->shape_[1] = 1; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 1; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + BatchToSpaceParameter *parameter = new BatchToSpaceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->block_shape_[0] = 2; + parameter->block_shape_[1] = 2; + parameter->crops_[0] = 0; + parameter->crops_[1] = 0; + parameter->crops_[2] = 2; + parameter->crops_[3] = 0; + int ret = BatchToSpaceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 4); + ASSERT_EQ(outputs[0]->shape_[3], 1); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/bias_grad_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/bias_grad_infer_test.cc new file mode 100644 index 0000000000..d21f926c50 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/bias_grad_infer_test.cc @@ -0,0 +1,60 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/bias_grad_infer.h" + +namespace mindspore { + +class BiasGradInferTest : public mindspore::CommonTest { + public: + BiasGradInferTest() {} +}; + +TEST_F(BiasGradInferTest, BiasGradInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 5; + inputs[0]->shape_[3] = 6; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = BiasGradInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 1); + ASSERT_EQ(outputs[0]->shape_[3], 6); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/binary_cross_entropy_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/binary_cross_entropy_infer_test.cc new file mode 100644 index 0000000000..dbedc57bbf --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/binary_cross_entropy_infer_test.cc @@ -0,0 +1,87 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/binary_cross_entropy_infer.h" + +namespace mindspore { + +class BinaryCrossEntropyInferTest : public mindspore::CommonTest { + public: + BinaryCrossEntropyInferTest() {} +}; + +TEST_F(BinaryCrossEntropyInferTest, BinaryCrossEntropyInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + BinaryCrossEntropyParameter *parameter = new BinaryCrossEntropyParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->reduction = 3; + int ret = BinaryCrossEntropyInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(BinaryCrossEntropyInferTest, BinaryCrossEntropyInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + BinaryCrossEntropyParameter *parameter = new BinaryCrossEntropyParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->reduction = 2; + int ret = BinaryCrossEntropyInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/bn_grad_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/bn_grad_infer_test.cc new file mode 100644 index 0000000000..7c2e529e5e --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/bn_grad_infer_test.cc @@ -0,0 +1,83 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/bn_grad_infer.h" + +namespace mindspore { + +class BnGradInferTest : public mindspore::CommonTest { + public: + BnGradInferTest() {} +}; + +TEST_F(BnGradInferTest, BnGradInferTest0) { + size_t inputs_size = 6; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 4; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 5; + inputs[1]->shape_[3] = 6; + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 2; + inputs[2]->shape_[0] = 7; + inputs[2]->shape_[1] = 8; + inputs[3] = new TensorC; + inputs[4] = new TensorC; + inputs[5] = new TensorC; + + inputs[1]->data_type_ = kNumberTypeInt32; + inputs[1]->format_ = Format_NHWC; + inputs[2]->data_type_ = kNumberTypeUInt8; + inputs[2]->format_ = Format_NCHW; + std::vector<TensorC *> outputs(3, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + outputs[2] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = BnGradInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 5); + ASSERT_EQ(outputs[0]->shape_[3], 6); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + ASSERT_EQ(outputs[1]->shape_size_, 2); + ASSERT_EQ(outputs[1]->shape_[0], 7); + ASSERT_EQ(outputs[1]->shape_[1], 8); + ASSERT_EQ(outputs[1]->data_type_, kNumberTypeUInt8); + ASSERT_EQ(outputs[1]->format_, Format_NCHW); + ASSERT_EQ(outputs[2]->shape_size_, 2); + ASSERT_EQ(outputs[2]->shape_[0], 7); + ASSERT_EQ(outputs[2]->shape_[1], 8); + ASSERT_EQ(outputs[2]->data_type_, kNumberTypeUInt8); + ASSERT_EQ(outputs[2]->format_, Format_NCHW); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/broadcast_to_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/broadcast_to_infer_test.cc new file mode 100644 index 0000000000..c212cc5c66 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/broadcast_to_infer_test.cc @@ -0,0 +1,152 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/broadcast_to_infer.h" + +namespace mindspore { + +class BroadcastToInferTest : public mindspore::CommonTest { + public: + BroadcastToInferTest() {} +}; + +TEST_F(BroadcastToInferTest, BroadcastToInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 4; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + BroadcastToParameter *param = new BroadcastToParameter; + param->op_parameter_.infer_flag_ = true; + param->shape_size_ = 2; + param->shape_[0] = 5; + param->shape_[1] = 4; + int ret = BroadcastToInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 4); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(BroadcastToInferTest, BroadcastToInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 1; + inputs[0]->shape_[2] = 3; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + BroadcastToParameter *param = new BroadcastToParameter; + param->op_parameter_.infer_flag_ = true; + param->shape_size_ = 3; + param->shape_[0] = 3; + param->shape_[1] = 3; + param->shape_[2] = 3; + int ret = BroadcastToInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 3); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(BroadcastToInferTest, BroadcastToInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 1; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 1; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + BroadcastToParameter *param = new BroadcastToParameter; + param->op_parameter_.infer_flag_ = true; + param->shape_size_ = 4; + param->shape_[0] = 4; + param->shape_[1] = 5; + param->shape_[2] = 3; + param->shape_[3] = 2; + int ret = BroadcastToInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 5); + ASSERT_EQ(outputs[0]->shape_[2], 3); + ASSERT_EQ(outputs[0]->shape_[3], 2); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(BroadcastToInferTest, BroadcastToInferTest3) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 1; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 4; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + BroadcastToParameter *param = new BroadcastToParameter; + param->op_parameter_.infer_flag_ = true; + param->shape_size_ = 4; + param->shape_[0] = 4; + param->shape_[1] = 5; + param->shape_[2] = 3; + param->shape_[3] = 2; + int ret = BroadcastToInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_ERR); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/cast_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/cast_infer_test.cc new file mode 100644 index 0000000000..fa2266d938 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/cast_infer_test.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/cast_infer.h" +#include "nnacl/cast_parameter.h" + +namespace mindspore { + +class CastInferTest : public mindspore::CommonTest { + public: + CastInferTest() {} +}; + +TEST_F(CastInferTest, CastInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeFloat32; + inputs[1] = new TensorC; + inputs[1]->data_ = new int(kNumberTypeInt32); + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = CastInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/concat_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/concat_infer_test.cc new file mode 100644 index 0000000000..1f31eab341 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/concat_infer_test.cc @@ -0,0 +1,245 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/concat_infer.h" + +namespace mindspore { + +class ConcatInferTest : public mindspore::CommonTest { + public: + ConcatInferTest() {} +}; + +TEST_F(ConcatInferTest, ConcatInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 4; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 3; + inputs[1]->shape_[1] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConcatParameter *parameter = new ConcatParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->axis_ = 0; + int ret = ConcatInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 6); + ASSERT_EQ(outputs[0]->shape_[1], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ConcatInferTest, ConcatInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 4; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 3; + inputs[1]->shape_[1] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConcatParameter *parameter = new ConcatParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->axis_ = 1; + int ret = ConcatInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 8); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ConcatInferTest, ConcatInferTest2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 3; + inputs[1]->shape_[0] = 5; + inputs[1]->shape_[1] = 2; + inputs[1]->shape_[2] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConcatParameter *parameter = new ConcatParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->axis_ = 0; + int ret = ConcatInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 10); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ConcatInferTest, ConcatInferTest3) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 2; + inputs[0]->shape_[3] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 5; + inputs[1]->shape_[2] = 2; + inputs[1]->shape_[3] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConcatParameter *parameter = new ConcatParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->axis_ = 0; + int ret = ConcatInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 10); + ASSERT_EQ(outputs[0]->shape_[1], 5); + ASSERT_EQ(outputs[0]->shape_[2], 2); + ASSERT_EQ(outputs[0]->shape_[3], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ConcatInferTest, ConcatInferTest4) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 6; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 2; + inputs[0]->shape_[3] = 4; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 5; + inputs[1]->shape_[2] = 2; + inputs[1]->shape_[3] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConcatParameter *parameter = new ConcatParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->axis_ = -1; + int ret = ConcatInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 6); + ASSERT_EQ(outputs[0]->shape_[1], 5); + ASSERT_EQ(outputs[0]->shape_[2], 2); + ASSERT_EQ(outputs[0]->shape_[3], 7); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ConcatInferTest, ConcatInferTest5) { + size_t inputs_size = 4; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 14; + inputs[0]->shape_[2] = 14; + inputs[0]->shape_[3] = 192; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 1; + inputs[1]->shape_[1] = 14; + inputs[1]->shape_[2] = 14; + inputs[1]->shape_[3] = 192; + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 4; + inputs[2]->shape_[0] = 1; + inputs[2]->shape_[1] = 14; + inputs[2]->shape_[2] = 14; + inputs[2]->shape_[3] = 192; + inputs[3] = new TensorC; + inputs[3]->shape_size_ = 4; + inputs[3]->shape_[0] = 1; + inputs[3]->shape_[1] = 14; + inputs[3]->shape_[2] = 14; + inputs[3]->shape_[3] = 192; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConcatParameter *parameter = new ConcatParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->axis_ = 3; + int ret = ConcatInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 14); + ASSERT_EQ(outputs[0]->shape_[2], 14); + ASSERT_EQ(outputs[0]->shape_[3], 768); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/constant_of_shape_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/constant_of_shape_infer_test.cc new file mode 100644 index 0000000000..356a7e100d --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/constant_of_shape_infer_test.cc @@ -0,0 +1,63 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/constant_of_shape_infer.h" + +namespace mindspore { + +class ConstantOfShapeInferTest : public mindspore::CommonTest { + public: + ConstantOfShapeInferTest() {} +}; + +TEST_F(ConstantOfShapeInferTest, ConstantOfShapeInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + std::vector<int> input_data = {2, 3, 5, 6, 7, 8}; + inputs[0]->data_ = input_data.data(); + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConstantOfShapeParameter *parameter = new ConstantOfShapeParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->data_type_ = kNumberTypeInt8; + int ret = ConstantOfShapeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 6); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 5); + ASSERT_EQ(outputs[0]->shape_[3], 6); + ASSERT_EQ(outputs[0]->shape_[4], 7); + ASSERT_EQ(outputs[0]->shape_[5], 8); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt8); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/conv2d_grad_filter_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/conv2d_grad_filter_infer_test.cc new file mode 100644 index 0000000000..64f561511f --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/conv2d_grad_filter_infer_test.cc @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/conv2d_grad_filter_infer.h" + +namespace mindspore { + +class Conv2dGradFilterInferTest : public mindspore::CommonTest { + public: + Conv2dGradFilterInferTest() {} +}; + +TEST_F(Conv2dGradFilterInferTest, Conv2dGradFilterInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = Conv2dGradFilterInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 4); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/conv2d_grad_input_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/conv2d_grad_input_infer_test.cc new file mode 100644 index 0000000000..dc90bf7dd8 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/conv2d_grad_input_infer_test.cc @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/conv2d_grad_input_infer.h" + +namespace mindspore { + +class Conv2dGradInputInferTest : public mindspore::CommonTest { + public: + Conv2dGradInputInferTest() {} +}; + +TEST_F(Conv2dGradInputInferTest, Conv2dGradInputInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[1] = new TensorC; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = Conv2dGradInputInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/conv2d_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/conv2d_infer_test.cc new file mode 100644 index 0000000000..f9a018b7f1 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/conv2d_infer_test.cc @@ -0,0 +1,540 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/conv2d_infer.h" + +namespace mindspore { + +class Conv2dInferTest : public mindspore::CommonTest { + public: + Conv2dInferTest() {} +}; + +TEST_F(Conv2dInferTest, Conv2dInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 4; + inputs[0]->shape_[2] = 4; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 20; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 6; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 3; + parameter->kernel_w_ = 3; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_l_ = 1; + parameter->pad_r_ = 1; + parameter->pad_d_ = 1; + parameter->pad_u_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = Conv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 4); + ASSERT_EQ(outputs[0]->shape_[2], 4); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(Conv2dInferTest, Conv2dInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 120; + inputs[0]->shape_[2] = 120; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 20; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 6; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 7; + parameter->kernel_w_ = 7; + parameter->stride_h_ = 2; + parameter->stride_w_ = 2; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_l_ = 3; + parameter->pad_r_ = 3; + parameter->pad_d_ = 3; + parameter->pad_u_ = 3; + parameter->op_parameter_.infer_flag_ = true; + int ret = Conv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 60); + ASSERT_EQ(outputs[0]->shape_[2], 60); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(Conv2dInferTest, Conv2dInferTest2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 30; + inputs[0]->shape_[2] = 30; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 20; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 6; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 3; + parameter->kernel_w_ = 3; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 1; + parameter->pad_r_ = 1; + parameter->pad_d_ = 1; + parameter->pad_u_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = Conv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 30); + ASSERT_EQ(outputs[0]->shape_[2], 30); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(Conv2dInferTest, Conv2dInferTest3) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 30; + inputs[0]->shape_[2] = 30; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 20; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 6; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 3; + parameter->kernel_w_ = 3; + parameter->stride_h_ = 2; + parameter->stride_w_ = 2; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 1; + parameter->pad_r_ = 1; + parameter->pad_d_ = 1; + parameter->pad_u_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = Conv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 15); + ASSERT_EQ(outputs[0]->shape_[2], 15); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(Conv2dInferTest, Conv2dInferTest4) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 120; + inputs[0]->shape_[2] = 120; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 20; + inputs[1]->shape_[1] = 5; + inputs[1]->shape_[2] = 5; + inputs[1]->shape_[3] = 6; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 5; + parameter->kernel_w_ = 5; + parameter->stride_h_ = 2; + parameter->stride_w_ = 2; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 0; + parameter->pad_u_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = Conv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 58); + ASSERT_EQ(outputs[0]->shape_[2], 58); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(Conv2dInferTest, Conv2dInferTest5) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 27; + inputs[0]->shape_[2] = 27; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 20; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 6; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 3; + parameter->kernel_w_ = 3; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 0; + parameter->pad_u_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = Conv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 25); + ASSERT_EQ(outputs[0]->shape_[2], 25); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(Conv2dInferTest, Conv2dInferTest6) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 88; + inputs[0]->shape_[2] = 88; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 20; + inputs[1]->shape_[1] = 1; + inputs[1]->shape_[2] = 1; + inputs[1]->shape_[3] = 6; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 1; + parameter->kernel_w_ = 1; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 0; + parameter->pad_u_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = Conv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 88); + ASSERT_EQ(outputs[0]->shape_[2], 88); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(Conv2dInferTest, Conv2dInferTest7) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 11; + inputs[0]->shape_[2] = 11; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 20; + inputs[1]->shape_[1] = 9; + inputs[1]->shape_[2] = 1; + inputs[1]->shape_[3] = 6; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 9; + parameter->kernel_w_ = 1; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 4; + parameter->pad_u_ = 4; + parameter->op_parameter_.infer_flag_ = true; + int ret = Conv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 11); + ASSERT_EQ(outputs[0]->shape_[2], 11); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(Conv2dInferTest, Conv2dInferTest8) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 29; + inputs[0]->shape_[2] = 29; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 20; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 6; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 3; + parameter->kernel_w_ = 3; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_same; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 0; + parameter->pad_u_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = Conv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 29); + ASSERT_EQ(outputs[0]->shape_[2], 29); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(Conv2dInferTest, Conv2dInferTest9) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 14; + inputs[0]->shape_[2] = 14; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 20; + inputs[1]->shape_[1] = 1; + inputs[1]->shape_[2] = 1; + inputs[1]->shape_[3] = 6; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 1; + parameter->kernel_w_ = 1; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 0; + parameter->pad_u_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = Conv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 14); + ASSERT_EQ(outputs[0]->shape_[2], 14); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(Conv2dInferTest, Conv2dInferTest10) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 448; + inputs[0]->shape_[2] = 448; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 20; + inputs[1]->shape_[1] = 5; + inputs[1]->shape_[2] = 5; + inputs[1]->shape_[3] = 6; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 5; + parameter->kernel_w_ = 5; + parameter->stride_h_ = 2; + parameter->stride_w_ = 2; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_same; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 0; + parameter->pad_u_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = Conv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 224); + ASSERT_EQ(outputs[0]->shape_[2], 224); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/crop_and_resize_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/crop_and_resize_infer_test.cc new file mode 100644 index 0000000000..a5e91a1711 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/crop_and_resize_infer_test.cc @@ -0,0 +1,126 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/crop_and_resize_infer.h" + +namespace mindspore { + +class CropAndResizeInferTest : public mindspore::CommonTest { + public: + CropAndResizeInferTest() {} +}; + +/* + * inputs[0].shape: [3, 4, 5, 6] + * inputs[1].data: null + * inputs[3].data: 7, 8 + * output[0].shape: [3, 7, 8, 6] + */ +TEST_F(CropAndResizeInferTest, CropAndResizeInferTest0) { + size_t inputs_size = 4; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 4; + inputs[0]->shape_[2] = 5; + inputs[0]->shape_[3] = 6; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->data_ = nullptr; + inputs[2] = new TensorC; + inputs[3] = new TensorC; + inputs[3]->shape_size_ = 1; + inputs[3]->shape_[0] = 2; + std::vector<int> input3 = {7, 8}; + inputs[3]->data_ = input3.data(); + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = CropAndResizeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 7); + ASSERT_EQ(outputs[0]->shape_[2], 8); + ASSERT_EQ(outputs[0]->shape_[3], 6); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +/* + * inputs[0].shape: [3, 4, 5, 6] + * inputs[1].data: not null + * inputs[1].shape: [9] + * inputs[3].data: 7, 8 + * output[0].shape: [9, 7, 8, 6] + */ +TEST_F(CropAndResizeInferTest, CropAndResizeInferTest1) { + size_t inputs_size = 4; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 4; + inputs[0]->shape_[2] = 5; + inputs[0]->shape_[3] = 6; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + std::vector<int> inputs1 = {11}; + inputs[1]->data_ = inputs1.data(); + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 9; + inputs[2] = new TensorC; + inputs[3] = new TensorC; + inputs[3]->shape_size_ = 1; + inputs[3]->shape_[0] = 2; + std::vector<int> input3 = {7, 8}; + inputs[3]->data_ = input3.data(); + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = CropAndResizeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 9); + ASSERT_EQ(outputs[0]->shape_[1], 7); + ASSERT_EQ(outputs[0]->shape_[2], 8); + ASSERT_EQ(outputs[0]->shape_[3], 6); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/crop_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/crop_infer_test.cc new file mode 100644 index 0000000000..195f5da367 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/crop_infer_test.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/crop_infer.h" + +namespace mindspore { + +class CropInferTest : public mindspore::CommonTest { + public: + CropInferTest() {} +}; + +TEST_F(CropInferTest, CropInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 3; + inputs[1]->shape_[0] = 5; + inputs[1]->shape_[1] = 6; + inputs[1]->shape_[2] = 7; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + CropParameter *parameter = new CropParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = CropInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 6); + ASSERT_EQ(outputs[0]->shape_[2], 7); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/custom_extract_features_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/custom_extract_features_infer_test.cc new file mode 100644 index 0000000000..b5f2c672c0 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/custom_extract_features_infer_test.cc @@ -0,0 +1,96 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/custom_extract_features_infer.h" + +namespace mindspore { + +class CustomExtractFeaturesInferTest : public mindspore::CommonTest { + public: + CustomExtractFeaturesInferTest() {} +}; + +TEST_F(CustomExtractFeaturesInferTest, CustomExtractFeaturesInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 1; + std::vector<int> input_data = {3}; + inputs[0]->data_ = input_data.data(); + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = CustomExtractFeaturesInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), + outputs.size(), reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + ASSERT_EQ(outputs[1]->shape_size_, 1); + ASSERT_EQ(outputs[1]->shape_[0], 3); + ASSERT_EQ(outputs[1]->data_type_, kNumberTypeFloat32); + ASSERT_EQ(outputs[1]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(CustomExtractFeaturesInferTest, CustomExtractFeaturesInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 1; + std::vector<int> input_data = {0}; + inputs[0]->data_ = input_data.data(); + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = CustomExtractFeaturesInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), + outputs.size(), reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + ASSERT_EQ(outputs[1]->shape_size_, 1); + ASSERT_EQ(outputs[1]->shape_[0], 1); + ASSERT_EQ(outputs[1]->data_type_, kNumberTypeFloat32); + ASSERT_EQ(outputs[1]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/custom_normalize_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/custom_normalize_infer_test.cc new file mode 100644 index 0000000000..2d665eb3b8 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/custom_normalize_infer_test.cc @@ -0,0 +1,86 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/custom_normalize_infer.h" + +namespace mindspore { + +class CustomNormalizeInferTest : public mindspore::CommonTest { + public: + CustomNormalizeInferTest() {} +}; + +TEST_F(CustomNormalizeInferTest, CustomNormalizeInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 1; + std::vector<int> input_data = {2}; + inputs[0]->data_ = input_data.data(); + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = CustomNormalizeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(CustomNormalizeInferTest, CustomNormalizeInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 1; + std::vector<int> input_data = {0}; + inputs[0]->data_ = input_data.data(); + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = CustomNormalizeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/custom_predict_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/custom_predict_infer_test.cc new file mode 100644 index 0000000000..cf039eb524 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/custom_predict_infer_test.cc @@ -0,0 +1,58 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/custom_predict_infer.h" + +namespace mindspore { + +class CustomPredictInferTest : public mindspore::CommonTest { + public: + CustomPredictInferTest() {} +}; + +TEST_F(CustomPredictInferTest, CustomPredictInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + CustomPredictParameter *parameter = new CustomPredictParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->output_num = 5; + int ret = CustomPredictInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + ASSERT_EQ(outputs[1]->shape_size_, 1); + ASSERT_EQ(outputs[1]->shape_[0], 5); + ASSERT_EQ(outputs[1]->data_type_, kNumberTypeFloat32); + ASSERT_EQ(outputs[1]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/deconv2d_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/deconv2d_infer_test.cc new file mode 100644 index 0000000000..8cbd8ac2c1 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/deconv2d_infer_test.cc @@ -0,0 +1,172 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/deconv2d_infer.h" + +namespace mindspore { + +class Deconv2dInferTest : public mindspore::CommonTest { + public: + Deconv2dInferTest() {} +}; + +TEST_F(Deconv2dInferTest, Deconv2dInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 4; + inputs[0]->shape_[2] = 4; + inputs[0]->shape_[3] = 6; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 20; + inputs[1]->format_ = Format_KHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 3; + parameter->kernel_w_ = 3; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 1; + parameter->pad_r_ = 1; + parameter->pad_d_ = 1; + parameter->pad_u_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = Deconv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 4); + ASSERT_EQ(outputs[0]->shape_[2], 4); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(Deconv2dInferTest, Deconv2dInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 6; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 20; + inputs[1]->format_ = Format_KHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 3; + parameter->kernel_w_ = 3; + parameter->stride_h_ = 2; + parameter->stride_w_ = 2; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 1; + parameter->pad_r_ = 1; + parameter->pad_d_ = 1; + parameter->pad_u_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = Deconv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 5); + ASSERT_EQ(outputs[0]->shape_[2], 5); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(Deconv2dInferTest, Deconv2dInferTest2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 17; + inputs[0]->shape_[2] = 17; + inputs[0]->shape_[3] = 6; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 20; + inputs[1]->format_ = Format_KHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 2; + parameter->kernel_w_ = 2; + parameter->stride_h_ = 2; + parameter->stride_w_ = 2; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 0; + parameter->pad_u_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = Deconv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 34); + ASSERT_EQ(outputs[0]->shape_[2], 34); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/dedepthwise_conv2d_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/dedepthwise_conv2d_infer_test.cc new file mode 100644 index 0000000000..684b92b7a0 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/dedepthwise_conv2d_infer_test.cc @@ -0,0 +1,175 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/dedepthwise_conv2d_infer.h" + +namespace mindspore { + +class DeDepthwiseConv2DInferTest : public mindspore::CommonTest { + public: + DeDepthwiseConv2DInferTest() {} +}; + +TEST_F(DeDepthwiseConv2DInferTest, DeDepthwiseConv2DInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 4; + inputs[0]->shape_[2] = 4; + inputs[0]->shape_[3] = 6; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 1; // must be 1, because it is channel_multiplier + inputs[1]->format_ = Format_KHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->kernel_h_ = 3; + parameter->kernel_w_ = 3; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 1; + parameter->pad_r_ = 1; + parameter->pad_d_ = 1; + parameter->pad_u_ = 1; + parameter->op_parameter_.infer_flag_ = true; + parameter->channel_multiplie_ = 1; + int ret = DeDepthwiseConv2DInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 4); + ASSERT_EQ(outputs[0]->shape_[2], 4); + ASSERT_EQ(outputs[0]->shape_[3], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DeDepthwiseConv2DInferTest, DeDepthwiseConv2DInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 6; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 1; + inputs[1]->format_ = Format_KHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->channel_multiplie_ = 1; + parameter->kernel_h_ = 3; + parameter->kernel_w_ = 3; + parameter->stride_h_ = 2; + parameter->stride_w_ = 2; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 1; + parameter->pad_r_ = 1; + parameter->pad_d_ = 1; + parameter->pad_u_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = DeDepthwiseConv2DInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 5); + ASSERT_EQ(outputs[0]->shape_[2], 5); + ASSERT_EQ(outputs[0]->shape_[3], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DeDepthwiseConv2DInferTest, DeDepthwiseConv2DInferTest2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 17; + inputs[0]->shape_[2] = 17; + inputs[0]->shape_[3] = 6; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 1; + inputs[1]->format_ = Format_KHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->channel_multiplie_ = 1; + parameter->kernel_h_ = 2; + parameter->kernel_w_ = 2; + parameter->stride_h_ = 2; + parameter->stride_w_ = 2; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 0; + parameter->pad_u_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = DeDepthwiseConv2DInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 34); + ASSERT_EQ(outputs[0]->shape_[2], 34); + ASSERT_EQ(outputs[0]->shape_[3], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/depth_to_space_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/depth_to_space_infer_test.cc new file mode 100644 index 0000000000..fd818522a5 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/depth_to_space_infer_test.cc @@ -0,0 +1,181 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/depth_to_space_infer.h" +#include "src/tensor.h" + +namespace mindspore { + +class DepthToSpaceInferTest : public mindspore::CommonTest { + public: + DepthToSpaceInferTest() {} +}; + +TEST_F(DepthToSpaceInferTest, DepthToSpaceInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->format_ = Format_NHWC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 1; + inputs[0]->shape_[2] = 1; + inputs[0]->shape_[3] = 12; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + DepthToSpaceParameter *param = new DepthToSpaceParameter; + param->op_parameter_.infer_flag_ = true; + param->block_size_ = 2; + int ret = DepthToSpaceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 2); + ASSERT_EQ(outputs[0]->shape_[3], 3); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DepthToSpaceInferTest, DepthToSpaceInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->format_ = Format_NHWC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 2; + inputs[0]->shape_[3] = 4; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + DepthToSpaceParameter *param = new DepthToSpaceParameter; + param->op_parameter_.infer_flag_ = true; + param->block_size_ = 2; + int ret = DepthToSpaceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 4); + ASSERT_EQ(outputs[0]->shape_[2], 4); + ASSERT_EQ(outputs[0]->shape_[3], 1); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DepthToSpaceInferTest, DepthToSpaceInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->format_ = Format_NHWC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 1; + inputs[0]->shape_[2] = 1; + inputs[0]->shape_[3] = 4; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + DepthToSpaceParameter *param = new DepthToSpaceParameter; + param->op_parameter_.infer_flag_ = true; + param->block_size_ = 2; + int ret = DepthToSpaceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 2); + ASSERT_EQ(outputs[0]->shape_[3], 1); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DepthToSpaceInferTest, DepthToSpaceInferTest3) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->format_ = Format_NHWC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 7; + inputs[0]->shape_[3] = 32; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + DepthToSpaceParameter *param = new DepthToSpaceParameter; + param->op_parameter_.infer_flag_ = true; + param->block_size_ = 4; + int ret = DepthToSpaceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 20); + ASSERT_EQ(outputs[0]->shape_[2], 28); + ASSERT_EQ(outputs[0]->shape_[3], 2); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DepthToSpaceInferTest, DepthToSpaceInferTest4) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + // inputs[0]->format_ = Format_NHWC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 7; + inputs[0]->shape_[2] = 32; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + DepthToSpaceParameter *param = new DepthToSpaceParameter; + param->op_parameter_.infer_flag_ = true; + param->block_size_ = 4; + int ret = DepthToSpaceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_ERR); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/depthwise_conv2d_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/depthwise_conv2d_infer_test.cc new file mode 100644 index 0000000000..c78c274617 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/depthwise_conv2d_infer_test.cc @@ -0,0 +1,551 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/depthwise_conv2d_infer.h" + +namespace mindspore { + +class DepthwiseConv2dInferTest : public mindspore::CommonTest { + public: + DepthwiseConv2dInferTest() {} +}; + +TEST_F(DepthwiseConv2dInferTest, DepthwiseConv2dInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 4; + inputs[0]->shape_[2] = 4; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; // in channel + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 1; // channel_multiplier + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->channel_multiplie_ = 1; + parameter->kernel_h_ = 3; + parameter->kernel_w_ = 3; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_l_ = 1; + parameter->pad_r_ = 1; + parameter->pad_d_ = 1; + parameter->pad_u_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = DepthwiseConv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 4); + ASSERT_EQ(outputs[0]->shape_[2], 4); + ASSERT_EQ(outputs[0]->shape_[3], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DepthwiseConv2dInferTest, DepthwiseConv2dInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 120; + inputs[0]->shape_[2] = 120; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->channel_multiplie_ = 1; + parameter->kernel_h_ = 7; + parameter->kernel_w_ = 7; + parameter->stride_h_ = 2; + parameter->stride_w_ = 2; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_l_ = 3; + parameter->pad_r_ = 3; + parameter->pad_d_ = 3; + parameter->pad_u_ = 3; + parameter->op_parameter_.infer_flag_ = true; + int ret = DepthwiseConv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 60); + ASSERT_EQ(outputs[0]->shape_[2], 60); + ASSERT_EQ(outputs[0]->shape_[3], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DepthwiseConv2dInferTest, DepthwiseConv2dInferTest2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 30; + inputs[0]->shape_[2] = 30; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->channel_multiplie_ = 1; + parameter->kernel_h_ = 3; + parameter->kernel_w_ = 3; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 1; + parameter->pad_r_ = 1; + parameter->pad_d_ = 1; + parameter->pad_u_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = DepthwiseConv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 30); + ASSERT_EQ(outputs[0]->shape_[2], 30); + ASSERT_EQ(outputs[0]->shape_[3], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DepthwiseConv2dInferTest, DepthwiseConv2dInferTest3) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 30; + inputs[0]->shape_[2] = 30; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->channel_multiplie_ = 1; + parameter->kernel_h_ = 3; + parameter->kernel_w_ = 3; + parameter->stride_h_ = 2; + parameter->stride_w_ = 2; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 1; + parameter->pad_r_ = 1; + parameter->pad_d_ = 1; + parameter->pad_u_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = DepthwiseConv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 15); + ASSERT_EQ(outputs[0]->shape_[2], 15); + ASSERT_EQ(outputs[0]->shape_[3], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DepthwiseConv2dInferTest, DepthwiseConv2dInferTest4) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 120; + inputs[0]->shape_[2] = 120; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 6; + inputs[1]->shape_[0] = 20; + inputs[1]->shape_[1] = 5; + inputs[1]->shape_[2] = 5; + inputs[1]->shape_[3] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->channel_multiplie_ = 1; + parameter->kernel_h_ = 5; + parameter->kernel_w_ = 5; + parameter->stride_h_ = 2; + parameter->stride_w_ = 2; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 0; + parameter->pad_u_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = DepthwiseConv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 58); + ASSERT_EQ(outputs[0]->shape_[2], 58); + ASSERT_EQ(outputs[0]->shape_[3], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DepthwiseConv2dInferTest, DepthwiseConv2dInferTest5) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 27; + inputs[0]->shape_[2] = 27; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->channel_multiplie_ = 1; + parameter->kernel_h_ = 3; + parameter->kernel_w_ = 3; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 0; + parameter->pad_u_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = DepthwiseConv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 25); + ASSERT_EQ(outputs[0]->shape_[2], 25); + ASSERT_EQ(outputs[0]->shape_[3], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DepthwiseConv2dInferTest, DepthwiseConv2dInferTest6) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 88; + inputs[0]->shape_[2] = 88; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 1; + inputs[1]->shape_[2] = 1; + inputs[1]->shape_[3] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->channel_multiplie_ = 1; + parameter->kernel_h_ = 1; + parameter->kernel_w_ = 1; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 0; + parameter->pad_u_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = DepthwiseConv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 88); + ASSERT_EQ(outputs[0]->shape_[2], 88); + ASSERT_EQ(outputs[0]->shape_[3], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DepthwiseConv2dInferTest, DepthwiseConv2dInferTest7) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 11; + inputs[0]->shape_[2] = 11; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 9; + inputs[1]->shape_[2] = 1; + inputs[1]->shape_[3] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->channel_multiplie_ = 1; + parameter->kernel_h_ = 9; + parameter->kernel_w_ = 1; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 4; + parameter->pad_u_ = 4; + parameter->op_parameter_.infer_flag_ = true; + int ret = DepthwiseConv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 11); + ASSERT_EQ(outputs[0]->shape_[2], 11); + ASSERT_EQ(outputs[0]->shape_[3], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DepthwiseConv2dInferTest, DepthwiseConv2dInferTest8) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 29; + inputs[0]->shape_[2] = 29; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 3; + inputs[1]->shape_[3] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->channel_multiplie_ = 1; + parameter->kernel_h_ = 3; + parameter->kernel_w_ = 3; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_same; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 0; + parameter->pad_u_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = DepthwiseConv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 29); + ASSERT_EQ(outputs[0]->shape_[2], 29); + ASSERT_EQ(outputs[0]->shape_[3], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DepthwiseConv2dInferTest, DepthwiseConv2dInferTest9) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 14; + inputs[0]->shape_[2] = 14; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 1; + inputs[1]->shape_[2] = 1; + inputs[1]->shape_[3] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->channel_multiplie_ = 1; + parameter->kernel_h_ = 1; + parameter->kernel_w_ = 1; + parameter->stride_h_ = 1; + parameter->stride_w_ = 1; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 0; + parameter->pad_u_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = DepthwiseConv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 14); + ASSERT_EQ(outputs[0]->shape_[2], 14); + ASSERT_EQ(outputs[0]->shape_[3], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(DepthwiseConv2dInferTest, DepthwiseConv2dInferTest10) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 448; + inputs[0]->shape_[2] = 448; + inputs[0]->shape_[3] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 5; + inputs[1]->shape_[2] = 5; + inputs[1]->shape_[3] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->channel_multiplie_ = 1; + parameter->kernel_h_ = 5; + parameter->kernel_w_ = 5; + parameter->stride_h_ = 2; + parameter->stride_w_ = 2; + parameter->dilation_h_ = 1; + parameter->dilation_w_ = 1; + parameter->pad_mode_ = Pad_same; + parameter->pad_l_ = 0; + parameter->pad_r_ = 0; + parameter->pad_d_ = 0; + parameter->pad_u_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = DepthwiseConv2dInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 224); + ASSERT_EQ(outputs[0]->shape_[2], 224); + ASSERT_EQ(outputs[0]->shape_[3], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/detection_post_process_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/detection_post_process_infer_test.cc new file mode 100644 index 0000000000..c998bf68c5 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/detection_post_process_infer_test.cc @@ -0,0 +1,83 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/detection_post_process_infer.h" + +namespace mindspore { + +class DetectionPostProcessInferTest : public mindspore::CommonTest { + public: + DetectionPostProcessInferTest() {} +}; + +TEST_F(DetectionPostProcessInferTest, DetectionPostProcessInferTest0) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 3; + inputs[1]->shape_[1] = 5; + inputs[1]->shape_[2] = 10; + inputs[2] = new TensorC; + inputs[2]->shape_[0] = 5; + std::vector<TensorC *> outputs(4, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + outputs[2] = new TensorC; + outputs[3] = new TensorC; + DetectionPostProcessParameter *parameter = new DetectionPostProcessParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->max_detections_ = 20; + parameter->max_classes_per_detection_ = 3; + parameter->num_classes_ = 10; + int ret = DetectionPostProcessInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), + outputs.size(), reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 60); + ASSERT_EQ(outputs[0]->shape_[2], 4); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeFloat32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + ASSERT_EQ(outputs[1]->shape_size_, 2); + ASSERT_EQ(outputs[1]->shape_[0], 1); + ASSERT_EQ(outputs[1]->shape_[1], 60); + ASSERT_EQ(outputs[1]->data_type_, kNumberTypeFloat32); + ASSERT_EQ(outputs[1]->format_, Format_NHWC); + ASSERT_EQ(outputs[2]->shape_size_, 2); + ASSERT_EQ(outputs[2]->shape_[0], 1); + ASSERT_EQ(outputs[2]->shape_[1], 60); + ASSERT_EQ(outputs[2]->data_type_, kNumberTypeFloat32); + ASSERT_EQ(outputs[2]->format_, Format_NHWC); + ASSERT_EQ(outputs[3]->shape_size_, 1); + ASSERT_EQ(outputs[3]->shape_[0], 1); + ASSERT_EQ(outputs[3]->data_type_, kNumberTypeFloat32); + ASSERT_EQ(outputs[3]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/dropout_grad_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/dropout_grad_infer_test.cc new file mode 100644 index 0000000000..24e61f841d --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/dropout_grad_infer_test.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/dropout_grad_infer.h" + +namespace mindspore { + +class DropoutGradInferTest : public mindspore::CommonTest { + public: + DropoutGradInferTest() {} +}; + +TEST_F(DropoutGradInferTest, DropoutGradInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = DropoutGradInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/embedding_lookup_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/embedding_lookup_infer_test.cc new file mode 100644 index 0000000000..58eac2a12d --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/embedding_lookup_infer_test.cc @@ -0,0 +1,68 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/embedding_lookup_infer.h" + +namespace mindspore { + +class EmbeddingLookupInferTest : public mindspore::CommonTest { + public: + EmbeddingLookupInferTest() {} +}; + +// https://tensorflow.google.cn/api_docs/python/tf/nn/embedding_lookup?hl=en +TEST_F(EmbeddingLookupInferTest, EmbeddingLookupInferTest0) { + size_t inputs_size = 4; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 2; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 5; + inputs[1]->shape_[1] = 2; + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 2; + inputs[2]->shape_[0] = 5; + inputs[2]->shape_[1] = 2; + inputs[3] = new TensorC; + inputs[3]->shape_size_ = 1; + inputs[3]->shape_[0] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = EmbeddingLookupInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/expand_dims_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/expand_dims_infer_test.cc new file mode 100644 index 0000000000..7a7d6b06fa --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/expand_dims_infer_test.cc @@ -0,0 +1,126 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/expand_dims_infer.h" + +namespace mindspore { + +class ExpandDimsInferTest : public mindspore::CommonTest { + public: + ExpandDimsInferTest() {} +}; + +// https://tensorflow.google.cn/api_docs/python/tf/expand_dims?hl=en +TEST_F(ExpandDimsInferTest, ExpandDimsInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 10; + inputs[0]->shape_[1] = 10; + inputs[0]->shape_[2] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = + ExpandDimsInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), parameter); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 10); + ASSERT_EQ(outputs[0]->shape_[2], 10); + ASSERT_EQ(outputs[0]->shape_[3], 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ExpandDimsInferTest, ExpandDimsInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 10; + inputs[0]->shape_[1] = 10; + inputs[0]->shape_[2] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = + ExpandDimsInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), parameter); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 10); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 10); + ASSERT_EQ(outputs[0]->shape_[3], 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ExpandDimsInferTest, ExpandDimsInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 10; + inputs[0]->shape_[1] = 10; + inputs[0]->shape_[2] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = + ExpandDimsInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), parameter); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 10); + ASSERT_EQ(outputs[0]->shape_[1], 10); + ASSERT_EQ(outputs[0]->shape_[2], 3); + ASSERT_EQ(outputs[0]->shape_[3], 1); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/fft_imag_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/fft_imag_infer_test.cc new file mode 100644 index 0000000000..32e7b4f262 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/fft_imag_infer_test.cc @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/fft_imag_infer.h" + +namespace mindspore { + +class FftImagInferTest : public mindspore::CommonTest { + public: + FftImagInferTest() {} +}; + +TEST_F(FftImagInferTest, FftImagInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 5; + inputs[0]->shape_[3] = 6; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = FftImagInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 5); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeFloat32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/fill_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/fill_infer_test.cc new file mode 100644 index 0000000000..504ca19047 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/fill_infer_test.cc @@ -0,0 +1,140 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/fill_infer.h" +#include "nnacl/fill_parameter.h" + +namespace mindspore { + +class FillInferTest : public mindspore::CommonTest { + public: + FillInferTest() {} +}; + +TEST_F(FillInferTest, FillInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 4; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + FillParameter *parameter = new FillParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->num_dims_ = 4; + parameter->dims_[0] = 1; + parameter->dims_[1] = 2; + parameter->dims_[2] = 3; + parameter->dims_[3] = 4; + int ret = FillInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 3); + ASSERT_EQ(outputs[0]->shape_[3], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(FillInferTest, FillInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + FillParameter *parameter = new FillParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->num_dims_ = 3; + parameter->dims_[0] = 4; + parameter->dims_[1] = 2; + parameter->dims_[2] = 3; + int ret = FillInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(FillInferTest, FillInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + FillParameter *parameter = new FillParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->num_dims_ = 2; + parameter->dims_[0] = 4; + parameter->dims_[1] = 2; + int ret = FillInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 2); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(FillInferTest, FillInferTest3) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + FillParameter *parameter = new FillParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->num_dims_ = 1; + parameter->dims_[0] = 4; + int ret = FillInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/flatten_grad_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/flatten_grad_infer_test.cc new file mode 100644 index 0000000000..76c8747101 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/flatten_grad_infer_test.cc @@ -0,0 +1,57 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/flatten_grad_infer.h" + +namespace mindspore { + +class FlattenGradInferTest : public mindspore::CommonTest { + public: + FlattenGradInferTest() {} +}; + +TEST_F(FlattenGradInferTest, FlattenGradInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 5; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = FlattenGradInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 15); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/flatten_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/flatten_infer_test.cc new file mode 100644 index 0000000000..7e0f400682 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/flatten_infer_test.cc @@ -0,0 +1,128 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/flatten_infer.h" + +namespace mindspore { + +class FlattenInferTest : public mindspore::CommonTest { + public: + FlattenInferTest() {} +}; + +TEST_F(FlattenInferTest, FlattenInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 4; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + OpParameter *param = new OpParameter; + param->infer_flag_ = true; + int ret = FlattenInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), param); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 24); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(FlattenInferTest, FlattenInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 4; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + OpParameter *param = new OpParameter; + param->infer_flag_ = true; + int ret = FlattenInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), param); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 12); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(FlattenInferTest, FlattenInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 4; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + OpParameter *param = new OpParameter; + param->infer_flag_ = true; + int ret = FlattenInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), param); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 4); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(FlattenInferTest, FlattenInferTest3) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 4; + std::vector<TensorC *> outputs(inputs_size, NULL); + outputs[0] = new TensorC; + OpParameter *param = new OpParameter; + param->infer_flag_ = true; + int ret = FlattenInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), param); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 1); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/full_connection_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/full_connection_infer_test.cc new file mode 100644 index 0000000000..bcf65e01c0 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/full_connection_infer_test.cc @@ -0,0 +1,125 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/full_connection_infer.h" + +namespace mindspore { + +class FullConnectionInferTest : public mindspore::CommonTest { + public: + FullConnectionInferTest() {} +}; + +// mtk_pose_tuku.caffemodel +TEST_F(FullConnectionInferTest, FullConnectionInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 64; + inputs[0]->shape_[2] = 5; + inputs[0]->shape_[3] = 5; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 256; + inputs[1]->shape_[1] = 1600; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + MatMulParameter *param = new MatMulParameter; + param->op_parameter_.infer_flag_ = true; + param->has_bias_ = false; + param->use_axis_ = false; + int ret = FullConnectionInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 256); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(FullConnectionInferTest, FullConnectionInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 256; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 128; + inputs[1]->shape_[1] = 256; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + MatMulParameter *param = new MatMulParameter; + param->op_parameter_.infer_flag_ = true; + param->has_bias_ = false; + param->use_axis_ = false; + int ret = FullConnectionInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 128); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(FullConnectionInferTest, FullConnectionInferTest2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 128; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 3; + inputs[1]->shape_[1] = 128; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + MatMulParameter *param = new MatMulParameter; + param->op_parameter_.infer_flag_ = true; + param->has_bias_ = false; + param->use_axis_ = false; + int ret = FullConnectionInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 3); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/fused_batchnorm_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/fused_batchnorm_infer_test.cc new file mode 100644 index 0000000000..1e406644c7 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/fused_batchnorm_infer_test.cc @@ -0,0 +1,64 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/fused_batchnorm_infer.h" + +namespace mindspore { + +class FusedBatchNormInferTest : public mindspore::CommonTest { + public: + FusedBatchNormInferTest() {} +}; + +TEST_F(FusedBatchNormInferTest, FusedBatchNormInferTest0) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 5; + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 2; + inputs[2]->shape_[0] = 8; + inputs[2]->shape_[1] = 7; + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = FusedBatchNormInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[1]->shape_size_, 2); + ASSERT_EQ(outputs[1]->shape_[0], 6); + ASSERT_EQ(outputs[1]->shape_[1], 5); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/gather_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/gather_infer_test.cc new file mode 100644 index 0000000000..c3ab52095f --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/gather_infer_test.cc @@ -0,0 +1,194 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/gather_infer.h" + +namespace mindspore { + +class GatherInferTest : public mindspore::CommonTest { + public: + GatherInferTest() {} +}; + +TEST_F(GatherInferTest, GatherInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 18; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 3; + inputs[1]->shape_[0] = 2; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 2; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + GatherParameter *param = new GatherParameter; + param->op_parameter_.infer_flag_ = true; + param->axis_ = 0; + int ret = GatherInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 2); + ASSERT_EQ(outputs[0]->shape_[3], 3); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(GatherInferTest, GatherInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 18; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 3; + inputs[1]->shape_[1] = 2; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + GatherParameter *param = new GatherParameter; + param->op_parameter_.infer_flag_ = true; + param->axis_ = 0; + int ret = GatherInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 3); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(GatherInferTest, GatherInferTest2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 18; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 3; + inputs[1]->shape_[0] = 3; + inputs[1]->shape_[1] = 2; + inputs[1]->shape_[2] = 2; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + GatherParameter *param = new GatherParameter; + param->op_parameter_.infer_flag_ = true; + param->axis_ = 0; + int ret = GatherInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 2); + ASSERT_EQ(outputs[0]->shape_[3], 3); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(GatherInferTest, GatherInferTest3) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 18; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + GatherParameter *param = new GatherParameter; + param->op_parameter_.infer_flag_ = true; + param->axis_ = 0; + int ret = GatherInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 3); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(GatherInferTest, GatherInferTest4) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 3; + inputs[1]->shape_[0] = 2; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 2; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + GatherParameter *param = new GatherParameter; + param->op_parameter_.infer_flag_ = true; + param->axis_ = 0; + int ret = GatherInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(param)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 6); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 2); + ASSERT_EQ(outputs[0]->shape_[3], 2); + ASSERT_EQ(outputs[0]->shape_[4], 3); + ASSERT_EQ(outputs[0]->shape_[5], 3); + delete param; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/gather_nd_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/gather_nd_infer_test.cc new file mode 100644 index 0000000000..b7074eeb20 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/gather_nd_infer_test.cc @@ -0,0 +1,187 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/gather_nd_infer.h" + +namespace mindspore { + +class GatherNdInferTest : public mindspore::CommonTest { + public: + GatherNdInferTest() {} +}; + +TEST_F(GatherNdInferTest, GatherNdInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 2; + inputs[1]->shape_[1] = 2; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + GatherNdParameter *parameter = new GatherNdParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = GatherNdInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(GatherNdInferTest, GatherNdInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 4; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 2; + inputs[1]->shape_[1] = 2; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + GatherNdParameter *parameter = new GatherNdParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = GatherNdInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(GatherNdInferTest, GatherNdInferTest2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 4; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 1; + inputs[1]->shape_[1] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + GatherNdParameter *parameter = new GatherNdParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = GatherNdInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(GatherNdInferTest, GatherNdInferTest3) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 4; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 2; + inputs[1]->shape_[1] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + GatherNdParameter *parameter = new GatherNdParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = GatherNdInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(GatherNdInferTest, GatherNdInferTest4) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 5; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 4; + inputs[0]->shape_[4] = 6; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 3; + inputs[1]->shape_[0] = 2; + inputs[1]->shape_[1] = 2; + inputs[1]->shape_[2] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + GatherNdParameter *parameter = new GatherNdParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = GatherNdInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/group_conv2d_grad_input_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/group_conv2d_grad_input_infer_test.cc new file mode 100644 index 0000000000..0e471ed8cc --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/group_conv2d_grad_input_infer_test.cc @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/group_conv2d_grad_input_infer.h" + +namespace mindspore { + +class GroupConv2dGradInputInferTest : public mindspore::CommonTest { + public: + GroupConv2dGradInputInferTest() {} +}; + +TEST_F(GroupConv2dGradInputInferTest, GroupConv2dGradInputInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[1] = new TensorC; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ConvParameter *parameter = new ConvParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = GroupConv2dGradInputInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), + outputs.size(), reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/gru_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/gru_infer_test.cc new file mode 100644 index 0000000000..2a3f065918 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/gru_infer_test.cc @@ -0,0 +1,134 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/gru_infer.h" + +namespace mindspore { + +class GruInferTest : public mindspore::CommonTest { + public: + GruInferTest() {} +}; + +TEST_F(GruInferTest, GruInferTest0) { + size_t inputs_size = 5; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 6; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 3; + inputs[1]->shape_[1] = 9; + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 3; + inputs[2]->shape_[1] = 9; + inputs[3] = new TensorC; + inputs[3]->shape_[1] = 18; + inputs[4] = new TensorC; + + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + GruParameter *parameter = new GruParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->bidirectional_ = true; + OpParameter *param = reinterpret_cast<OpParameter *>(parameter); + int ret = GruInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), param); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 5); + ASSERT_EQ(outputs[0]->shape_[3], 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + + ASSERT_EQ(outputs[1]->shape_size_, 3); + ASSERT_EQ(outputs[1]->shape_[0], 2); + ASSERT_EQ(outputs[1]->shape_[1], 5); + ASSERT_EQ(outputs[1]->shape_[2], 3); + ASSERT_EQ(outputs[1]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[1]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +// bidirectional_ is false and inputs_size is 6 +TEST_F(GruInferTest, GruInferTest1) { + size_t inputs_size = 5; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 6; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 3; + inputs[1]->shape_[1] = 9; + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 3; + inputs[2]->shape_[1] = 9; + inputs[3] = new TensorC; + inputs[3]->shape_[1] = 18; + inputs[4] = new TensorC; + inputs[5] = new TensorC; + inputs[5]->shape_size_ = 1; + inputs[5]->shape_[0] = -1; + + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + GruParameter *parameter = new GruParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->bidirectional_ = false; + int ret = GruInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 5); + ASSERT_EQ(outputs[0]->shape_[3], 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + + ASSERT_EQ(outputs[1]->shape_size_, 3); + ASSERT_EQ(outputs[1]->shape_[0], 1); + ASSERT_EQ(outputs[1]->shape_[1], 5); + ASSERT_EQ(outputs[1]->shape_[2], 3); + ASSERT_EQ(outputs[1]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[1]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/hashtable_lookup_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/hashtable_lookup_infer_test.cc new file mode 100644 index 0000000000..d18fca3609 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/hashtable_lookup_infer_test.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/hashtable_lookup_infer.h" + +namespace mindspore { + +class HashtableLookupInferTest : public mindspore::CommonTest { + public: + HashtableLookupInferTest() {} +}; + +TEST_F(HashtableLookupInferTest, HashtableLookupInferTest0) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + inputs[0]->data_ = NULL; // if you don't set, it will have values; + inputs[1] = new TensorC; + inputs[2] = new TensorC; + inputs[2]->data_type_ = kNumberTypeFloat32; + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = HashtableLoopupInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_INFER_INVALID); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeFloat32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + ASSERT_EQ(outputs[1]->shape_size_, 1); + ASSERT_EQ(outputs[1]->shape_[0], 4); + ASSERT_EQ(outputs[1]->data_type_, kNumberTypeUInt8); + ASSERT_EQ(outputs[1]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/infer_manager_test.cc b/mindspore/lite/test/ut/nnacl/infer/infer_manager_test.cc new file mode 100644 index 0000000000..7d03d9c0e3 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/infer_manager_test.cc @@ -0,0 +1,204 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "src/tensorlist.h" +#include "mindspore/lite/src/runtime/infer_manager.h" + +namespace mindspore::lite { + +class InferManagerTest : public mindspore::CommonTest { + public: + InferManagerTest() {} +}; + +// PrimitiveType_TensorListGetItem +TEST_F(InferManagerTest, InferManagerTest0) { + Tensor *tensor0 = new (std::nothrow) Tensor; + std::vector<int> tensor0_shape = {4, 6, 5}; + tensor0->set_shape(tensor0_shape); + tensor0->set_data_type(kNumberTypeInt32); + Tensor *tensor1 = new (std::nothrow) Tensor; + std::vector<int> tensor1_shape = {1, 2}; + tensor1->set_shape(tensor1_shape); + std::vector<int> tensor1_data = {-1, 5}; + tensor1->set_data(tensor1_data.data()); + tensor1->set_data_type(kNumberTypeInt32); + std::vector<lite::Tensor *> inputs; + inputs.push_back(tensor0); + inputs.push_back(tensor1); + + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + parameter->type_ = mindspore::schema::PrimitiveType_TensorListFromTensor; + + std::vector<lite::Tensor *> outputs; + TensorList *tensorList = new (std::nothrow) TensorList; + tensorList->set_data_type(kObjectTypeTensorType); + Tensor *output = reinterpret_cast<Tensor *>(tensorList); + outputs.push_back(output); + + int ret = KernelInferShape(inputs, &outputs, parameter); + + TensorList *out = reinterpret_cast<TensorList *>(outputs[0]); + + ASSERT_EQ(ret, RET_OK); + ASSERT_EQ(out->shape()[0], 4); + ASSERT_EQ(out->data_type(), kObjectTypeTensorType); + ASSERT_EQ(out->element_shape().size(), 2); + ASSERT_EQ(out->element_shape()[0], -1); + ASSERT_EQ(out->element_shape()[1], 5); + ASSERT_EQ(out->tensors_data_type(), kNumberTypeInt32); + // ASSERT_EQ(outputs[0]->format_, Format_NHWC); + for (int i = 0; i < out->shape()[0]; i++) { + ASSERT_EQ(out->tensors()[i]->shape().size(), 2); + ASSERT_EQ(out->tensors()[i]->shape()[0], 6); + ASSERT_EQ(out->tensors()[i]->shape()[1], 5); + } + delete parameter; + for (size_t i = 0; i < 2; i++) { + if (inputs[i]->data_type() == kObjectTypeTensorType) { + delete reinterpret_cast<TensorList *>(inputs[i]); + } else { + delete inputs[i]; + } + } + for (size_t i = 0; i < outputs.size(); i++) { + if (outputs[i]->data_type() == kObjectTypeTensorType) { + delete reinterpret_cast<TensorList *>(outputs[i]); + } else { + delete outputs[i]; + } + } +} + +// PrimitiveType_TensorListGetItem +TEST_F(InferManagerTest, InferManagerTest1) { + TensorList *tensorList = new TensorList; + tensorList->set_data_type(kObjectTypeTensorType); + Tensor *tensor0_0 = new Tensor; + std::vector<int> tensor0_0_shape = {1, 2}; + tensor0_0->set_shape(tensor0_0_shape); + Tensor *tensor0_1 = new Tensor; + std::vector<int> tensor0_1_shape = {3, 4, 5}; + tensor0_1->set_shape(tensor0_1_shape); + Tensor *tensor0_2 = new Tensor; + std::vector<int> tensor0_2_shape = {6, 7, 8, 9}; + tensor0_2->set_shape(tensor0_2_shape); + std::vector<Tensor *> tensor0; + tensor0.push_back(tensor0_0); + tensor0.push_back(tensor0_1); + tensor0.push_back(tensor0_2); + tensorList->set_tensors(tensor0); + std::vector<int> tensorlist_shape = {3}; + tensorList->set_shape(tensorlist_shape); + + Tensor *tensor1 = new Tensor; + std::vector<int> tensor1_shape = {1}; + std::vector<int> tensor1_data = {2}; + tensor1->set_shape(tensor1_shape); + tensor1->set_data(tensor1_data.data()); + Tensor *tensor2 = new Tensor; + + std::vector<lite::Tensor *> inputs; + inputs.push_back(reinterpret_cast<Tensor *>(tensorList)); + inputs.push_back(tensor1); + inputs.push_back(tensor2); + + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + parameter->type_ = mindspore::schema::PrimitiveType_TensorListGetItem; + + std::vector<lite::Tensor *> outputs; + Tensor *output = new Tensor; + outputs.push_back(output); + int res = KernelInferShape(inputs, &outputs, parameter); + ASSERT_EQ(res, RET_OK); + ASSERT_EQ(outputs[0]->shape().size(), 4); + ASSERT_EQ(outputs[0]->shape().at(0), 6); + ASSERT_EQ(outputs[0]->shape().at(1), 7); + ASSERT_EQ(outputs[0]->shape().at(2), 8); + ASSERT_EQ(outputs[0]->shape().at(3), 9); + delete parameter; + for (size_t i = 0; i < 3; i++) { + if (inputs[i]->data_type() == kObjectTypeTensorType) { + delete reinterpret_cast<TensorList *>(inputs[i]); + } else { + delete inputs[i]; + } + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(InferManagerTest, InferManagerTest2) { + Tensor *tensor0 = new (std::nothrow) Tensor; + std::vector<int> tensor0_shape = {3}; + tensor0->set_shape(tensor0_shape); + tensor0->set_data_type(kNumberTypeInt32); + std::vector<int> tensor0_data = {2, 3, 4}; + tensor0->set_data(tensor0_data.data()); + Tensor *tensor1 = new (std::nothrow) Tensor; + std::vector<int> tensor1_shape = {1}; + tensor1->set_shape(tensor1_shape); + std::vector<int> tensor1_data = {5}; + tensor1->set_data(tensor1_data.data()); + tensor1->set_data_type(kNumberTypeInt32); + std::vector<lite::Tensor *> inputs; + inputs.push_back(tensor0); + inputs.push_back(tensor1); + + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + parameter->type_ = mindspore::schema::PrimitiveType_TensorListReserve; + + std::vector<lite::Tensor *> outputs; + TensorList *tensorList = new (std::nothrow) TensorList; + tensorList->set_data_type(kObjectTypeTensorType); + Tensor *output = reinterpret_cast<Tensor *>(tensorList); + outputs.push_back(output); + + int ret = KernelInferShape(inputs, &outputs, parameter); + + TensorList *out = reinterpret_cast<TensorList *>(outputs[0]); + + ASSERT_EQ(ret, RET_OK); + ASSERT_EQ(out->shape()[0], 5); + ASSERT_EQ(out->data_type(), kObjectTypeTensorType); + ASSERT_EQ(out->element_shape().size(), 3); + ASSERT_EQ(out->element_shape()[0], 2); + ASSERT_EQ(out->element_shape()[1], 3); + ASSERT_EQ(out->element_shape()[2], 4); + ASSERT_EQ(out->tensors_data_type(), kTypeUnknown); + + delete parameter; + for (size_t i = 0; i < 2; i++) { + if (inputs[i]->data_type() == kObjectTypeTensorType) { + delete reinterpret_cast<TensorList *>(inputs[i]); + } else { + delete inputs[i]; + } + } + for (size_t i = 0; i < outputs.size(); i++) { + if (outputs[i]->data_type() == kObjectTypeTensorType) { + delete reinterpret_cast<TensorList *>(outputs[i]); + } else { + delete outputs[i]; + } + } +} + +} // namespace mindspore::lite diff --git a/mindspore/lite/test/ut/nnacl/infer/invert_permutation_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/invert_permutation_infer_test.cc new file mode 100644 index 0000000000..59709d5cf8 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/invert_permutation_infer_test.cc @@ -0,0 +1,54 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/invert_permutation_infer.h" + +namespace mindspore { + +class InvertPermutationInferTest : public mindspore::CommonTest { + public: + InvertPermutationInferTest() {} +}; + +TEST_F(InvertPermutationInferTest, InvertPermutationInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 4; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = InvertPermutationInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/layer_norm_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/layer_norm_infer_test.cc new file mode 100644 index 0000000000..d0bad4679b --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/layer_norm_infer_test.cc @@ -0,0 +1,107 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/layer_norm_infer.h" + +namespace mindspore { + +class LayerNormInferTest : public mindspore::CommonTest { + public: + LayerNormInferTest() {} +}; + +TEST_F(LayerNormInferTest, LayerNormInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + LayerNormParameter *parameter = new LayerNormParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->normalized_dims_ = 1; + parameter->elementwise_affine_ = false; + parameter->normalized_shape_[0] = 3; + int ret = LayerNormInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(LayerNormInferTest, LayerNormInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + LayerNormParameter *parameter = new LayerNormParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->normalized_dims_ = 3; + parameter->elementwise_affine_ = false; + parameter->normalized_shape_[0] = 3; + int ret = LayerNormInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_PARAM_INVALID); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(LayerNormInferTest, LayerNormInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + LayerNormParameter *parameter = new LayerNormParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->normalized_dims_ = 2; + parameter->elementwise_affine_ = false; + parameter->normalized_shape_[0] = 3; + int ret = LayerNormInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_PARAM_INVALID); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/lsh_projection_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/lsh_projection_infer_test.cc new file mode 100644 index 0000000000..98f7600a78 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/lsh_projection_infer_test.cc @@ -0,0 +1,126 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/lsh_projection_infer.h" + +namespace mindspore { + +class LshProjectionInferTest : public mindspore::CommonTest { + public: + LshProjectionInferTest() {} +}; + +TEST_F(LshProjectionInferTest, LshProjectionInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + LshProjectionParameter *parameter = new LshProjectionParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->lsh_type_ = LshProjectionType_SPARSE; + int ret = LshProjectionInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(LshProjectionInferTest, LshProjectionInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + LshProjectionParameter *parameter = new LshProjectionParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->lsh_type_ = LshProjectionType_DENSE; + int ret = LshProjectionInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 4 * 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(LshProjectionInferTest, LshProjectionInferTest2) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 5; + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 1; + inputs[2]->shape_[0] = 5; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + LshProjectionParameter *parameter = new LshProjectionParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->lsh_type_ = LshProjectionType_DENSE; + int ret = LshProjectionInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 4 * 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} // note: may be error + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/lstm_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/lstm_infer_test.cc new file mode 100644 index 0000000000..3ac83ed5fd --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/lstm_infer_test.cc @@ -0,0 +1,79 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/lstm_infer.h" + +namespace mindspore { + +class LstmInferTest : public mindspore::CommonTest { + public: + LstmInferTest() {} +}; + +TEST_F(LstmInferTest, LstmInferTest0) { + size_t inputs_size = 6; + std::vector<TensorC *> inputs(inputs_size, NULL); + int seq_len = 2; + int batch = 4; + int input_size = 5; + int hidden_size = 2; + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = seq_len; + inputs[0]->shape_[1] = batch; + inputs[0]->shape_[2] = input_size; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 3; + inputs[1]->shape_[0] = 1; + inputs[1]->shape_[1] = hidden_size * 4; + inputs[1]->shape_[2] = input_size; + inputs[2] = new TensorC; + inputs[3] = new TensorC; + inputs[4] = new TensorC; + inputs[5] = new TensorC; + std::vector<TensorC *> outputs(3, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + outputs[2] = new TensorC; + LstmParameter *parameter = new LstmParameter; + parameter->bidirectional_ = false; + parameter->op_parameter_.infer_flag_ = true; + int ret = LstmInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], seq_len); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], batch); + ASSERT_EQ(outputs[0]->shape_[3], hidden_size); + ASSERT_EQ(outputs[1]->shape_size_, 3); + ASSERT_EQ(outputs[1]->shape_[0], 1); + ASSERT_EQ(outputs[1]->shape_[1], batch); + ASSERT_EQ(outputs[1]->shape_[2], hidden_size); + ASSERT_EQ(outputs[2]->shape_size_, 3); + ASSERT_EQ(outputs[2]->shape_[0], 1); + ASSERT_EQ(outputs[2]->shape_[1], batch); + ASSERT_EQ(outputs[2]->shape_[2], hidden_size); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/matmul_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/matmul_infer_test.cc new file mode 100644 index 0000000000..eb2937096a --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/matmul_infer_test.cc @@ -0,0 +1,161 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/matmul_infer.h" + +namespace mindspore { + +class MatmulInferTest : public mindspore::CommonTest { + public: + MatmulInferTest() {} +}; + +TEST_F(MatmulInferTest, MatmulInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 4; + inputs[1]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + MatMulParameter *parameter = new MatMulParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->a_transpose_ = false; + parameter->b_transpose_ = true; + int ret = MatmulInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(MatmulInferTest, MatmulInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 4; + inputs[0]->shape_[2] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 3; + inputs[1]->shape_[0] = 2; + inputs[1]->shape_[1] = 3; + inputs[1]->shape_[2] = 5; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + MatMulParameter *parameter = new MatMulParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->a_transpose_ = false; + parameter->b_transpose_ = false; + int ret = MatmulInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 4); + ASSERT_EQ(outputs[0]->shape_[2], 5); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(MatmulInferTest, MatmulInferTest2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 128; + inputs[0]->shape_[2] = 1; + inputs[0]->shape_[3] = 1; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 96; + inputs[1]->shape_[1] = 128; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + MatMulParameter *parameter = new MatMulParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->a_transpose_ = false; + parameter->b_transpose_ = true; + int ret = MatmulInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(inputs[0]->shape_size_, 2); + ASSERT_EQ(inputs[0]->shape_[0], 1); + ASSERT_EQ(inputs[0]->shape_[1], 128); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 96); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(MatmulInferTest, MatmulInferTest3) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 1288; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 256; + inputs[1]->shape_[1] = 1280; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + MatMulParameter *parameter = new MatMulParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->a_transpose_ = false; + parameter->b_transpose_ = true; + int ret = MatmulInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 256); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/maximum_grad_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/maximum_grad_infer_test.cc new file mode 100644 index 0000000000..afcce9fcc3 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/maximum_grad_infer_test.cc @@ -0,0 +1,84 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/maximum_grad_infer.h" + +namespace mindspore { + +class MaximumGradInferTest : public mindspore::CommonTest { + public: + MaximumGradInferTest() {} +}; + +TEST_F(MaximumGradInferTest, MaximumGradInferTest0) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 5; + inputs[1]->shape_[1] = 6; + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 3; + inputs[2]->shape_[0] = 7; + inputs[2]->shape_[1] = 8; + inputs[2]->shape_[2] = 9; + inputs[2]->data_type_ = kNumberTypeInt32; + inputs[2]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + MaximumGradParameter *parameter = new MaximumGradParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = MaximumGradInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + ASSERT_EQ(outputs[1]->shape_size_, 2); + ASSERT_EQ(outputs[1]->shape_[0], 5); + ASSERT_EQ(outputs[1]->shape_[1], 6); + ASSERT_EQ(outputs[1]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[1]->format_, Format_NHWC); + ASSERT_EQ(parameter->ndim_, 3); + ASSERT_EQ(parameter->dy_shape_size_, 3); + ASSERT_EQ(parameter->dy_shape_[0], 7); + ASSERT_EQ(parameter->dy_shape_[1], 8); + ASSERT_EQ(parameter->dy_shape_[2], 9); + ASSERT_EQ(parameter->x1_shape_size_, 3); + ASSERT_EQ(parameter->x1_shape_[0], 1); + ASSERT_EQ(parameter->x1_shape_[1], 4); + ASSERT_EQ(parameter->x1_shape_[2], 3); + ASSERT_EQ(parameter->x2_shape_size_, 3); + ASSERT_EQ(parameter->x2_shape_[0], 1); + ASSERT_EQ(parameter->x2_shape_[1], 5); + ASSERT_EQ(parameter->x2_shape_[2], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/mean_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/mean_infer_test.cc new file mode 100644 index 0000000000..c0112db8f2 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/mean_infer_test.cc @@ -0,0 +1,182 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/mean_infer.h" + +namespace mindspore { + +class MeanInferTest : public mindspore::CommonTest { + public: + MeanInferTest() {} +}; + +// same as reduce_infer_test.cc +TEST_F(MeanInferTest, MeanInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReduceParameter *parameter = new ReduceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->keep_dims_ = false; + parameter->axes_[0] = 1; + parameter->num_axes_ = 1; + int ret = MeanInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 2); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(MeanInferTest, MeanInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReduceParameter *parameter = new ReduceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->keep_dims_ = true; + parameter->axes_[0] = 1; + parameter->num_axes_ = 1; + int ret = MeanInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(MeanInferTest, MeanInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReduceParameter *parameter = new ReduceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->keep_dims_ = true; + parameter->axes_[0] = 0; + parameter->axes_[1] = 1; + parameter->num_axes_ = 2; + int ret = MeanInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(MeanInferTest, MeanInferTest3) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReduceParameter *parameter = new ReduceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->keep_dims_ = true; + parameter->num_axes_ = 2; + parameter->axes_[0] = 1; + parameter->axes_[1] = 3; + int ret = MeanInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 3); + ASSERT_EQ(outputs[0]->shape_[3], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(MeanInferTest, MeanInferTest4) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReduceParameter *parameter = new ReduceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->keep_dims_ = false; + parameter->num_axes_ = 2; + parameter->axes_[0] = 1; + parameter->axes_[1] = 3; + int ret = MeanInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/mfcc_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/mfcc_infer_test.cc new file mode 100644 index 0000000000..dd8d8a89a1 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/mfcc_infer_test.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/mfcc_infer.h" + +namespace mindspore { + +class MfccInferTest : public mindspore::CommonTest { + public: + MfccInferTest() {} +}; + +TEST_F(MfccInferTest, MfccInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 8; + inputs[0]->data_type_ = kNumberTypeInt; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + MfccParameter *parameter = new MfccParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->dct_coeff_num_ = 5; + int ret = MfccInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 5); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/one_hot_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/one_hot_infer_test.cc new file mode 100644 index 0000000000..b94aab4fb4 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/one_hot_infer_test.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/one_hot_infer.h" + +namespace mindspore { + +class OneHotInferTest : public mindspore::CommonTest { + public: + OneHotInferTest() {} +}; + +TEST_F(OneHotInferTest, OneHotInferTest0) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 4; + inputs[1] = new TensorC; + std::vector<int> input1_data = {3}; + inputs[1]->data_ = input1_data.data(); + inputs[2] = new TensorC; + inputs[2]->data_type_ = kNumberTypeFloat32; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OneHotParameter *parameter = new OneHotParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->axis_ = -2; + int ret = OneHotInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 4); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeFloat32); + + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/pad_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/pad_infer_test.cc new file mode 100644 index 0000000000..96cfcf0cf0 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/pad_infer_test.cc @@ -0,0 +1,193 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/pad_infer.h" + +namespace mindspore { + +class PadInferTest : public mindspore::CommonTest { + public: + PadInferTest() {} +}; + +TEST_F(PadInferTest, PadInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + std::vector<int> padding_tensor = {1, 1, 2, 2}; + inputs[1]->data_ = padding_tensor.data(); + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 1; + inputs[1]->shape_[1] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + PadParameter *parameter = new PadParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = PadInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 7); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(PadInferTest, PadInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + PadParameter *parameter = new PadParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->padding_length = 4; + parameter->paddings_[0] = 1; + parameter->paddings_[1] = 1; + parameter->paddings_[2] = 2; + parameter->paddings_[3] = 2; + int ret = PadInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 7); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(PadInferTest, PadInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + PadParameter *parameter = new PadParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->padding_length = 6; + parameter->paddings_[0] = 0; + parameter->paddings_[1] = 0; + parameter->paddings_[2] = 1; + parameter->paddings_[3] = 2; + parameter->paddings_[4] = 3; + parameter->paddings_[5] = 4; + int ret = PadInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 6); + ASSERT_EQ(outputs[0]->shape_[2], 11); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(PadInferTest, PadInferTest3) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 4; + inputs[1] = new TensorC; + std::vector<int> padding_tensor = {0, 0, 1, 2, 3, 4}; + inputs[1]->data_ = padding_tensor.data(); + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 1; + inputs[1]->shape_[1] = 6; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + PadParameter *parameter = new PadParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = PadInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 6); + ASSERT_EQ(outputs[0]->shape_[2], 11); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(PadInferTest, PadInferTest4) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 4; + inputs[0]->shape_[3] = 5; + inputs[1] = new TensorC; + std::vector<int> padding_tensor = {1, 2, 3, 4, 5, 6, 7, 8}; + inputs[1]->data_ = padding_tensor.data(); + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 1; + inputs[1]->shape_[1] = 8; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + PadParameter *parameter = new PadParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = PadInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 10); + ASSERT_EQ(outputs[0]->shape_[2], 15); + ASSERT_EQ(outputs[0]->shape_[3], 20); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/pooling_grad_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/pooling_grad_infer_test.cc new file mode 100644 index 0000000000..94cd43a6d6 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/pooling_grad_infer_test.cc @@ -0,0 +1,69 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/pooling_grad_infer.h" + +namespace mindspore { + +class PoolingGradInferTest : public mindspore::CommonTest { + public: + PoolingGradInferTest() {} +}; + +TEST_F(PoolingGradInferTest, PoolingGradInferTest0) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 21; + inputs[0]->shape_[1] = 14; + inputs[0]->shape_[2] = 14; + inputs[0]->shape_[3] = 3; + inputs[1] = new TensorC; + inputs[2] = new TensorC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + PoolingParameter *parameter = new PoolingParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->window_w_ = 3; + parameter->window_h_ = 3; + parameter->stride_w_ = 1; + parameter->stride_h_ = 1; + parameter->pad_u_ = 0; + parameter->pad_d_ = 0; + parameter->pad_r_ = 0; + parameter->pad_l_ = 0; + parameter->global_ = false; + parameter->pad_mode_ = Pad_same; + parameter->round_mode_ = RoundMode_Floor; + int ret = PoolingGradInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 21); + ASSERT_EQ(outputs[0]->shape_[1], 14); + ASSERT_EQ(outputs[0]->shape_[2], 14); + ASSERT_EQ(outputs[0]->shape_[3], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/pooling_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/pooling_infer_test.cc new file mode 100644 index 0000000000..6be3296dc6 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/pooling_infer_test.cc @@ -0,0 +1,276 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/pooling_infer.h" + +namespace mindspore { + +class PoolingInferTest : public mindspore::CommonTest { + public: + PoolingInferTest() {} +}; + +TEST_F(PoolingInferTest, PoolingInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 21; + inputs[0]->shape_[1] = 58; + inputs[0]->shape_[2] = 58; + inputs[0]->shape_[3] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + PoolingParameter *parameter = new PoolingParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->window_w_ = 2; + parameter->window_h_ = 2; + parameter->stride_w_ = 2; + parameter->stride_h_ = 2; + parameter->pad_mode_ = Pad_pad; + parameter->pad_u_ = 0; + parameter->pad_d_ = 0; + parameter->pad_r_ = 0; + parameter->pad_l_ = 0; + parameter->global_ = false; + parameter->round_mode_ = RoundMode_Ceil; + int ret = PoolingInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 21); + ASSERT_EQ(outputs[0]->shape_[1], 29); + ASSERT_EQ(outputs[0]->shape_[2], 29); + ASSERT_EQ(outputs[0]->shape_[3], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(PoolingInferTest, PoolingInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 21; + inputs[0]->shape_[1] = 14; + inputs[0]->shape_[2] = 14; + inputs[0]->shape_[3] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + PoolingParameter *parameter = new PoolingParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->window_w_ = 3; + parameter->window_h_ = 3; + parameter->stride_w_ = 1; + parameter->stride_h_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_u_ = 0; + parameter->pad_d_ = 0; + parameter->pad_r_ = 0; + parameter->pad_l_ = 0; + parameter->global_ = false; + parameter->pad_mode_ = Pad_same; + parameter->round_mode_ = RoundMode_Ceil; + int ret = PoolingInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 21); + ASSERT_EQ(outputs[0]->shape_[1], 14); + ASSERT_EQ(outputs[0]->shape_[2], 14); + ASSERT_EQ(outputs[0]->shape_[3], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(PoolingInferTest, PoolingInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 21; + inputs[0]->shape_[1] = 60; + inputs[0]->shape_[2] = 60; + inputs[0]->shape_[3] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + PoolingParameter *parameter = new PoolingParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->window_w_ = 3; + parameter->window_h_ = 3; + parameter->stride_w_ = 2; + parameter->stride_h_ = 2; + parameter->pad_mode_ = Pad_pad; + parameter->pad_u_ = 0; + parameter->pad_d_ = 0; + parameter->pad_r_ = 0; + parameter->pad_l_ = 0; + parameter->global_ = false; + parameter->pad_mode_ = Pad_valid; + parameter->round_mode_ = RoundMode_Floor; + int ret = PoolingInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 21); + ASSERT_EQ(outputs[0]->shape_[1], 29); + ASSERT_EQ(outputs[0]->shape_[2], 29); + ASSERT_EQ(outputs[0]->shape_[3], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(PoolingInferTest, PoolingInferTest3) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 21; + inputs[0]->shape_[1] = 7; + inputs[0]->shape_[2] = 7; + inputs[0]->shape_[3] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + PoolingParameter *parameter = new PoolingParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->window_w_ = 7; + parameter->window_h_ = 7; + parameter->stride_w_ = 1; + parameter->stride_h_ = 1; + parameter->pad_mode_ = Pad_pad; + parameter->pad_u_ = 0; + parameter->pad_d_ = 0; + parameter->pad_r_ = 0; + parameter->pad_l_ = 0; + parameter->global_ = false; + parameter->pad_mode_ = Pad_valid; + parameter->round_mode_ = RoundMode_Floor; + int ret = PoolingInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 21); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 1); + ASSERT_EQ(outputs[0]->shape_[3], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(PoolingInferTest, PoolingInferTest4) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 21; + inputs[0]->shape_[1] = 31; + inputs[0]->shape_[2] = 31; + inputs[0]->shape_[3] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + PoolingParameter *parameter = new PoolingParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->window_w_ = 2; + parameter->window_h_ = 2; + parameter->stride_w_ = 2; + parameter->stride_h_ = 2; + parameter->pad_mode_ = Pad_pad; + parameter->pad_u_ = 0; + parameter->pad_d_ = 0; + parameter->pad_r_ = 0; + parameter->pad_l_ = 0; + parameter->global_ = false; + parameter->pad_mode_ = Pad_pad; + parameter->round_mode_ = RoundMode_Ceil; + int ret = PoolingInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 21); + ASSERT_EQ(outputs[0]->shape_[1], 16); + ASSERT_EQ(outputs[0]->shape_[2], 16); + ASSERT_EQ(outputs[0]->shape_[3], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(PoolingInferTest, PoolingInferTest5) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 21; + inputs[0]->shape_[1] = 16; + inputs[0]->shape_[2] = 16; + inputs[0]->shape_[3] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + PoolingParameter *parameter = new PoolingParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->window_w_ = 2; + parameter->window_h_ = 2; + parameter->stride_w_ = 2; + parameter->stride_h_ = 2; + parameter->pad_mode_ = Pad_pad; + parameter->pad_u_ = 0; + parameter->pad_d_ = 0; + parameter->pad_r_ = 0; + parameter->pad_l_ = 0; + parameter->global_ = false; + parameter->pad_mode_ = Pad_pad; + parameter->round_mode_ = RoundMode_Ceil; + int ret = PoolingInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 21); + ASSERT_EQ(outputs[0]->shape_[1], 8); + ASSERT_EQ(outputs[0]->shape_[2], 8); + ASSERT_EQ(outputs[0]->shape_[3], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/power_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/power_infer_test.cc new file mode 100644 index 0000000000..b924e75e3f --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/power_infer_test.cc @@ -0,0 +1,115 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/power_infer.h" + +namespace mindspore { + +class PowerInferTest : public mindspore::CommonTest { + public: + PowerInferTest() {} +}; + +TEST_F(PowerInferTest, PowerInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = PowerInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(PowerInferTest, PowerInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 4; + inputs[1]->shape_[1] = 3; + inputs[1]->data_type_ = kNumberTypeInt; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = PowerInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(PowerInferTest, PowerInferTest2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 1; + inputs[1]->data_type_ = kNumberTypeInt; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = PowerInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/quant_dtype_cast_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/quant_dtype_cast_infer_test.cc new file mode 100644 index 0000000000..484f016f25 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/quant_dtype_cast_infer_test.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/quant_dtype_cast_infer.h" + +namespace mindspore { + +class QuantDtypeCastInferTest : public mindspore::CommonTest { + public: + QuantDtypeCastInferTest() {} +}; + +TEST_F(QuantDtypeCastInferTest, QuantDtypeCastInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4.0; + inputs[0]->shape_[1] = 3.0; + inputs[0]->data_type_ = kNumberTypeFloat32; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + QuantDtypeCastParameter *parameter = new QuantDtypeCastParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->srcT_ = kNumberTypeFloat32; + parameter->dstT_ = kNumberTypeInt; + int ret = QuantDtypeCastInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/random_standard_normal_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/random_standard_normal_infer_test.cc new file mode 100644 index 0000000000..db8b6965b7 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/random_standard_normal_infer_test.cc @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/random_standard_normal_infer.h" + +namespace mindspore { + +class RandomStandardNormalInferTest : public mindspore::CommonTest { + public: + RandomStandardNormalInferTest() {} +}; + +TEST_F(RandomStandardNormalInferTest, RandomStandardNormalInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 4; + std::vector<int> inputs0 = {3, 4, 5, 6}; + inputs[0]->data_ = inputs0.data(); + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = RandomStandardNormalInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), + outputs.size(), reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 4); + ASSERT_EQ(outputs[0]->shape_[2], 5); + ASSERT_EQ(outputs[0]->shape_[3], 6); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeFloat32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/range_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/range_infer_test.cc new file mode 100644 index 0000000000..fbcb9d89af --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/range_infer_test.cc @@ -0,0 +1,135 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/range_infer.h" + +namespace mindspore { + +class RangeInferTest : public mindspore::CommonTest { + public: + RangeInferTest() {} +}; + +// https://tensorflow.google.cn/api_docs/python/tf/range?hl=en +TEST_F(RangeInferTest, RangeInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + RangeParameter *parameter = new RangeParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->limit_ = 18; + parameter->start_ = 3; + parameter->delta_ = 3; // delta must be decimal + int ret = RangeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 5); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(RangeInferTest, RangeInferTest1) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + std::vector<int> input0_data = {3}; + std::vector<int> input1_data = {18}; + std::vector<int> input2_data = {3}; + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 1; + inputs[0]->data_ = input0_data.data(); + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 1; + inputs[1]->data_ = input1_data.data(); + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 1; + inputs[2]->shape_[0] = 1; + inputs[2]->data_ = input2_data.data(); + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + RangeParameter *parameter = new RangeParameter; + parameter->op_parameter_.infer_flag_ = true; + // parameter->limit_ = 18; + // parameter->start_ = 3; + // parameter->delta_ = 3; + int ret = RangeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 5); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(RangeInferTest, RangeInferTest2) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + std::vector<float> input0_data = {3.0}; + std::vector<float> input1_data = {18.0}; + std::vector<float> input2_data = {3.0}; + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 1; + inputs[0]->data_ = input0_data.data(); + inputs[0]->data_type_ = kNumberTypeFloat32; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 1; + inputs[1]->data_ = input1_data.data(); + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 1; + inputs[2]->shape_[0] = 1; + inputs[2]->data_ = input2_data.data(); + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + RangeParameter *parameter = new RangeParameter; + parameter->op_parameter_.infer_flag_ = true; + // parameter->limit_ = 18; + // parameter->start_ = 3; + // parameter->delta_ = 3; + int ret = RangeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 5); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/rank_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/rank_infer_test.cc new file mode 100644 index 0000000000..0b93d6355a --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/rank_infer_test.cc @@ -0,0 +1,53 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/rank_infer.h" + +namespace mindspore { + +class RankInferTest : public mindspore::CommonTest { + public: + RankInferTest() {} +}; + +// https://tensorflow.google.cn/api_docs/python/tf/rank?hl=en +TEST_F(RankInferTest, RankInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = RankInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/reduce_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/reduce_infer_test.cc new file mode 100644 index 0000000000..ec93e6311e --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/reduce_infer_test.cc @@ -0,0 +1,185 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/reduce_infer.h" + +namespace mindspore { + +class ReduceInferTest : public mindspore::CommonTest { + public: + ReduceInferTest() {} +}; + +TEST_F(ReduceInferTest, ReduceInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReduceParameter *parameter = new ReduceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->keep_dims_ = false; + parameter->axes_[0] = 1; + parameter->num_axes_ = 1; + parameter->reduce_to_end_ = false; + int ret = ReduceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 2); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ReduceInferTest, ReduceInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReduceParameter *parameter = new ReduceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->keep_dims_ = true; + parameter->axes_[0] = 1; + parameter->num_axes_ = 1; + parameter->reduce_to_end_ = false; + int ret = ReduceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ReduceInferTest, ReduceInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReduceParameter *parameter = new ReduceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->keep_dims_ = true; + parameter->axes_[0] = 0; + parameter->axes_[1] = 1; + parameter->num_axes_ = 2; + parameter->reduce_to_end_ = false; + int ret = ReduceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ReduceInferTest, ReduceInferTest3) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReduceParameter *parameter = new ReduceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->keep_dims_ = true; + parameter->num_axes_ = 2; + parameter->axes_[0] = 1; + parameter->axes_[1] = 3; + parameter->reduce_to_end_ = false; + int ret = ReduceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 3); + ASSERT_EQ(outputs[0]->shape_[3], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ReduceInferTest, ReduceInferTest4) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReduceParameter *parameter = new ReduceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->keep_dims_ = false; + parameter->num_axes_ = 2; + parameter->axes_[0] = 1; + parameter->axes_[1] = 3; + parameter->reduce_to_end_ = false; + int ret = ReduceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/reshape_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/reshape_infer_test.cc new file mode 100644 index 0000000000..89a41defc9 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/reshape_infer_test.cc @@ -0,0 +1,361 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/reshape_infer.h" +#include "nnacl/reshape_parameter.h" + +namespace mindspore { + +class ReshapeInferTest : public mindspore::CommonTest { + public: + ReshapeInferTest() {} +}; + +TEST_F(ReshapeInferTest, ReshapeInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReshapeParameter *parameter = new ReshapeParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->shape_dim_ = 1; + parameter->shape_[0] = 6; + int ret = ReshapeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ReshapeInferTest, ReshapeInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + std::vector<int32_t> shape_tensor = {6}; + inputs[1]->data_ = shape_tensor.data(); + inputs[1]->data_type_ = kNumberTypeInt32; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReshapeParameter *parameter = new ReshapeParameter; + parameter->op_parameter_.infer_flag_ = true; + // parameter->shape_size_ = 1; + // parameter->shape_[0] = 6; + int ret = ReshapeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ReshapeInferTest, ReshapeInferTest2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + std::vector<int8_t> shape_tensor = {6}; + inputs[1]->data_ = shape_tensor.data(); + inputs[1]->data_type_ = kNumberTypeInt8; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReshapeParameter *parameter = new ReshapeParameter; + parameter->op_parameter_.infer_flag_ = true; + // parameter->shape_size_ = 1; + // parameter->shape_[0] = 6; + int ret = ReshapeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ReshapeInferTest, ReshapeInferTest3) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + std::vector<uint32_t> shape_tensor = {6}; + inputs[1]->data_ = shape_tensor.data(); + inputs[1]->data_type_ = kNumberTypeUInt32; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReshapeParameter *parameter = new ReshapeParameter; + parameter->op_parameter_.infer_flag_ = true; + // parameter->shape_size_ = 1; + // parameter->shape_[0] = 6; + int ret = ReshapeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ReshapeInferTest, ReshapeInferTest4) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 12; + inputs[1] = new TensorC; + std::vector<float> shape_tensor = {3.0, 4.0}; + inputs[1]->data_ = shape_tensor.data(); + inputs[1]->data_type_ = kNumberTypeFloat; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 2; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReshapeParameter *parameter = new ReshapeParameter; + parameter->op_parameter_.infer_flag_ = true; + // parameter->shape_size_ = 1; + // parameter->shape_[0] = 6; + int ret = ReshapeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ReshapeInferTest, ReshapeInferTest5) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 12; + inputs[1] = new TensorC; + std::vector<int64_t> shape_tensor = {3, 4}; + inputs[1]->data_ = shape_tensor.data(); + inputs[1]->data_type_ = kNumberTypeInt64; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 2; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReshapeParameter *parameter = new ReshapeParameter; + parameter->op_parameter_.infer_flag_ = true; + // parameter->shape_size_ = 1; + // parameter->shape_[0] = 6; + int ret = ReshapeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ReshapeInferTest, ReshapeInferTest6) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + inputs[1] = new TensorC; + std::vector<int64_t> shape_tensor = {3, 6}; + inputs[1]->data_ = shape_tensor.data(); + inputs[1]->data_type_ = kNumberTypeInt64; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 2; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReshapeParameter *parameter = new ReshapeParameter; + parameter->op_parameter_.infer_flag_ = true; + // parameter->shape_size_ = 1; + // parameter->shape_[0] = 6; + int ret = ReshapeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ReshapeInferTest, ReshapeInferTest7) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + inputs[1] = new TensorC; + std::vector<int64_t> shape_tensor = {3, -1}; + inputs[1]->data_ = shape_tensor.data(); + inputs[1]->data_type_ = kNumberTypeInt64; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 2; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReshapeParameter *parameter = new ReshapeParameter; + parameter->op_parameter_.infer_flag_ = true; + // parameter->shape_size_ = 1; + // parameter->shape_[0] = 6; + int ret = ReshapeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ReshapeInferTest, ReshapeInferTest8) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 8; + inputs[1] = new TensorC; + std::vector<int64_t> shape_tensor = {1, 2, 5, 4}; + inputs[1]->data_ = shape_tensor.data(); + inputs[1]->data_type_ = kNumberTypeInt64; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReshapeParameter *parameter = new ReshapeParameter; + parameter->op_parameter_.infer_flag_ = true; + // parameter->shape_size_ = 1; + // parameter->shape_[0] = 6; + int ret = ReshapeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 5); + ASSERT_EQ(outputs[0]->shape_[3], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ReshapeInferTest, ReshapeInferTest9) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 8; + inputs[1] = new TensorC; + std::vector<int64_t> shape_tensor = {8, 5, -1, 1}; + inputs[1]->data_ = shape_tensor.data(); + inputs[1]->data_type_ = kNumberTypeInt64; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ReshapeParameter *parameter = new ReshapeParameter; + parameter->op_parameter_.infer_flag_ = true; + // parameter->shape_size_ = 1; + // parameter->shape_[0] = 6; + int ret = ReshapeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 8); + ASSERT_EQ(outputs[0]->shape_[1], 5); + ASSERT_EQ(outputs[0]->shape_[2], 1); + ASSERT_EQ(outputs[0]->shape_[3], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/resize_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/resize_infer_test.cc new file mode 100644 index 0000000000..aa5c4943cd --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/resize_infer_test.cc @@ -0,0 +1,179 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/resize_infer.h" + +namespace mindspore { + +class ResizeInferTest : public mindspore::CommonTest { + public: + ResizeInferTest() {} +}; + +TEST_F(ResizeInferTest, ResizeInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 5; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ResizeParameter *parameter = new ResizeParameter; + parameter->new_width_ = 2; + parameter->new_height_ = 3; + parameter->op_parameter_.infer_flag_ = true; + int ret = ResizeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 2); + ASSERT_EQ(outputs[0]->shape_[3], 5); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ResizeInferTest, ResizeInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 5; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + std::vector<int32_t> shape_tensor = {4, 3, 2, 5}; + inputs[1]->data_ = shape_tensor.data(); + inputs[1]->data_type_ = kNumberTypeInt32; + inputs[1]->format_ = Format_NHWC; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ResizeParameter *parameter = new ResizeParameter; + // parameter->new_width_ = 2; + // parameter->new_height_ = 3; + parameter->op_parameter_.infer_flag_ = true; + int ret = ResizeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 15); + ASSERT_EQ(outputs[0]->shape_[2], 6); + ASSERT_EQ(outputs[0]->shape_[3], 5); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ResizeInferTest, ResizeInferTest2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 5; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + std::vector<float> shape_tensor = {4.0, 3.0, 2.0, 5.0}; + inputs[1]->data_ = shape_tensor.data(); + inputs[1]->data_type_ = kNumberTypeFloat32; + inputs[1]->format_ = Format_NHWC; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ResizeParameter *parameter = new ResizeParameter; + // parameter->new_width_ = 2; + // parameter->new_height_ = 3; + parameter->op_parameter_.infer_flag_ = true; + int ret = ResizeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 15); + ASSERT_EQ(outputs[0]->shape_[2], 6); + ASSERT_EQ(outputs[0]->shape_[3], 5); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(ResizeInferTest, ResizeInferTest3) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 5; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + std::vector<int32_t> shape_tensor = {4, 3, 2, 5}; + inputs[1]->data_ = shape_tensor.data(); + inputs[1]->data_type_ = kNumberTypeInt32; + inputs[1]->format_ = Format_NHWC; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ResizeParameter *parameter = new ResizeParameter; + // parameter->new_width_ = 2; + // parameter->new_height_ = 3; + parameter->op_parameter_.infer_flag_ = true; + int ret = ResizeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 15); + ASSERT_EQ(outputs[0]->shape_[2], 6); + ASSERT_EQ(outputs[0]->shape_[3], 5); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/rfft_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/rfft_infer_test.cc new file mode 100644 index 0000000000..177100707c --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/rfft_infer_test.cc @@ -0,0 +1,55 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/rfft_infer.h" + +namespace mindspore { + +class RfftInferTest : public mindspore::CommonTest { + public: + RfftInferTest() {} +}; + +TEST_F(RfftInferTest, RfftInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + RfftParameter *parameter = new RfftParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->fft_length_ = 4; + int ret = RfftInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 2); + + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/roi_pooling_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/roi_pooling_infer_test.cc new file mode 100644 index 0000000000..459b4906a6 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/roi_pooling_infer_test.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/roi_pooling_infer.h" + +namespace mindspore { + +class ROIPoolingInferTest : public mindspore::CommonTest { + public: + ROIPoolingInferTest() {} +}; + +TEST_F(ROIPoolingInferTest, ROIPoolingInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->format_ = Format_NHWC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 5; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 21; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + ROIPoolingParameter *parameter = new ROIPoolingParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->pooledW_ = 3; + parameter->pooledH_ = 4; + int ret = ROIPoolingInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 21); + ASSERT_EQ(outputs[0]->shape_[1], 4); + ASSERT_EQ(outputs[0]->shape_[2], 3); + ASSERT_EQ(outputs[0]->shape_[3], 5); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/scatter_nd_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/scatter_nd_infer_test.cc new file mode 100644 index 0000000000..7baf04ea66 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/scatter_nd_infer_test.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/scatter_nd_infer.h" + +namespace mindspore { + +class ScatterNdInferTest : public mindspore::CommonTest { + public: + ScatterNdInferTest() {} +}; + +TEST_F(ScatterNdInferTest, ScatterNdInferTest0) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 4; + std::vector<int> input_data = {1, 2, 3, 4}; + inputs[0]->data_ = input_data.data(); + inputs[1] = new TensorC; + inputs[2] = new TensorC; + inputs[2]->data_type_ = kNumberTypeInt8; + inputs[2]->format_ = kNCHW_H; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = ScatterNdInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 3); + ASSERT_EQ(outputs[0]->shape_[3], 4); + ASSERT_EQ(outputs[0]->format_, kNCHW_H); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt8); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/select_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/select_infer_test.cc new file mode 100644 index 0000000000..e711c58a86 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/select_infer_test.cc @@ -0,0 +1,241 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/select_infer.h" + +namespace mindspore { + +class SelectInferTest : public mindspore::CommonTest { + public: + SelectInferTest() {} +}; + +/* + * inputs_size: 3 + * outputs_size: 1 + * inputs[1].shape: [4, 5, 6, 7] + * outputs[0].shape: [4, 5, 6 ,7] + */ +TEST_F(SelectInferTest, SelectInferTest0) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 4; + inputs[1]->shape_[1] = 5; + inputs[1]->shape_[2] = 6; + inputs[1]->shape_[3] = 7; + inputs[1]->data_type_ = kNumberTypeInt32; + inputs[1]->format_ = Format_NHWC; + inputs[2] = new TensorC; + + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = SelectInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 5); + ASSERT_EQ(outputs[0]->shape_[2], 6); + ASSERT_EQ(outputs[0]->shape_[3], 7); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +/* + * inputs_size: 5 + * outputs_size: 2 + * inputs[1].shape: [4, 5, 6, 7] + * outputs[0].shape: [4, 5, 6 ,7] + * inputs[2].shape: [8, 9, 10, 11] + * outputs[1].shape: [8, 9, 10, 11] + */ +TEST_F(SelectInferTest, SelectInferTest1) { + size_t inputs_size = 5; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 4; + inputs[1]->shape_[0] = 4; + inputs[1]->shape_[1] = 5; + inputs[1]->shape_[2] = 6; + inputs[1]->shape_[3] = 7; + inputs[1]->data_type_ = kNumberTypeInt32; + inputs[1]->format_ = Format_NHWC; + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 4; + inputs[2]->shape_[0] = 8; + inputs[2]->shape_[1] = 9; + inputs[2]->shape_[2] = 10; + inputs[2]->shape_[3] = 11; + inputs[2]->data_type_ = kNumberTypeInt32; + inputs[2]->format_ = Format_NHWC; + inputs[3] = new TensorC; + inputs[4] = new TensorC; + inputs[5] = new TensorC; + + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = SelectInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 5); + ASSERT_EQ(outputs[0]->shape_[2], 6); + ASSERT_EQ(outputs[0]->shape_[3], 7); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + ASSERT_EQ(outputs[1]->shape_size_, 4); + ASSERT_EQ(outputs[1]->shape_[0], 8); + ASSERT_EQ(outputs[1]->shape_[1], 9); + ASSERT_EQ(outputs[1]->shape_[2], 10); + ASSERT_EQ(outputs[1]->shape_[3], 11); + ASSERT_EQ(outputs[1]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[1]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SelectInferTest, SelectInferTest2) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + TensorListC *inputs1 = new TensorListC; + inputs1->data_type_ = kObjectTypeTensorType; + inputs1->format_ = Format_NHWC; + inputs1->max_elements_num_ = 8; + inputs1->tensors_data_type_ = kNumberTypeInt32; + inputs1->element_shape_size_ = 4; + inputs1->element_shape_[0] = 4; + inputs1->element_shape_[1] = 5; + inputs1->element_shape_[2] = 6; + inputs1->element_shape_[3] = 7; + inputs[1] = reinterpret_cast<TensorC *>(inputs1); + inputs[2] = new TensorC; + + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = reinterpret_cast<TensorC *>(new TensorListC); + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = SelectInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + TensorListC *outputs0 = reinterpret_cast<TensorListC *>(outputs[0]); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs0->element_shape_size_, 4); + ASSERT_EQ(outputs0->element_shape_[0], 4); + ASSERT_EQ(outputs0->element_shape_[1], 5); + ASSERT_EQ(outputs0->element_shape_[2], 6); + ASSERT_EQ(outputs0->element_shape_[3], 7); + ASSERT_EQ(outputs0->tensors_data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs0->max_elements_num_, 8); + ASSERT_EQ(outputs0->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SelectInferTest, SelectInferTest3) { + size_t inputs_size = 5; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + TensorListC *inputs1 = new TensorListC; + inputs1->data_type_ = kObjectTypeTensorType; + inputs1->format_ = Format_NHWC; + inputs1->max_elements_num_ = 8; + inputs1->tensors_data_type_ = kNumberTypeInt32; + inputs1->element_shape_size_ = 4; + inputs1->element_shape_[0] = 4; + inputs1->element_shape_[1] = 5; + inputs1->element_shape_[2] = 6; + inputs1->element_shape_[3] = 7; + inputs[1] = reinterpret_cast<TensorC *>(inputs1); + // inputs[2] = new TensorC; + TensorListC *inputs2 = new TensorListC; + inputs2->data_type_ = kObjectTypeTensorType; + inputs2->format_ = Format_NHWC; + inputs2->max_elements_num_ = 8; + inputs2->tensors_data_type_ = kNumberTypeInt32; + inputs2->element_shape_size_ = 4; + inputs2->element_shape_[0] = 8; + inputs2->element_shape_[1] = 9; + inputs2->element_shape_[2] = 10; + inputs2->element_shape_[3] = 11; + inputs[2] = reinterpret_cast<TensorC *>(inputs2); + inputs[3] = new TensorC; + inputs[4] = new TensorC; + + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = reinterpret_cast<TensorC *>(new TensorListC); + outputs[1] = reinterpret_cast<TensorC *>(new TensorListC); + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = SelectInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + TensorListC *outputs0 = reinterpret_cast<TensorListC *>(outputs[0]); + TensorListC *outputs1 = reinterpret_cast<TensorListC *>(outputs[1]); + + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs0->element_shape_size_, 4); + ASSERT_EQ(outputs0->element_shape_[0], 4); + ASSERT_EQ(outputs0->element_shape_[1], 5); + ASSERT_EQ(outputs0->element_shape_[2], 6); + ASSERT_EQ(outputs0->element_shape_[3], 7); + ASSERT_EQ(outputs0->tensors_data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs0->max_elements_num_, 8); + ASSERT_EQ(outputs0->format_, Format_NHWC); + + ASSERT_EQ(outputs1->element_shape_size_, 4); + ASSERT_EQ(outputs1->element_shape_[0], 8); + ASSERT_EQ(outputs1->element_shape_[1], 9); + ASSERT_EQ(outputs1->element_shape_[2], 10); + ASSERT_EQ(outputs1->element_shape_[3], 11); + ASSERT_EQ(outputs1->tensors_data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs1->max_elements_num_, 8); + ASSERT_EQ(outputs1->format_, Format_NHWC); + + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/sgd_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/sgd_infer_test.cc new file mode 100644 index 0000000000..0df329290d --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/sgd_infer_test.cc @@ -0,0 +1,66 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/sgd_infer.h" + +namespace mindspore { + +class SgdInferTest : public mindspore::CommonTest { + public: + SgdInferTest() {} +}; + +TEST_F(SgdInferTest, SgdInferTest0) { + size_t inputs_size = 6; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 4; + inputs[1]->shape_[1] = 3; + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 1; + inputs[2]->shape_[0] = 1; + inputs[3] = new TensorC; + inputs[3]->shape_size_ = 2; + inputs[3]->shape_[0] = 4; + inputs[3]->shape_[1] = 3; + inputs[4] = new TensorC; + inputs[4]->shape_size_ = 1; + inputs[4]->shape_[0] = 1; + inputs[5] = new TensorC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = SgdInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/shape_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/shape_infer_test.cc new file mode 100644 index 0000000000..ec968dd6c1 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/shape_infer_test.cc @@ -0,0 +1,51 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/shape_infer.h" + +namespace mindspore { + +class ShapeInferTest : public mindspore::CommonTest { + public: + ShapeInferTest() {} +}; + +TEST_F(ShapeInferTest, ShapeInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = ShapeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 2); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/size_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/size_infer_test.cc new file mode 100644 index 0000000000..026eef8221 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/size_infer_test.cc @@ -0,0 +1,55 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/size_infer.h" + +namespace mindspore { + +class SizeInferTest : public mindspore::CommonTest { + public: + SizeInferTest() {} +}; + +TEST_F(SizeInferTest, SizeInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = SizeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/skip_gram_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/skip_gram_infer_test.cc new file mode 100644 index 0000000000..f36480a359 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/skip_gram_infer_test.cc @@ -0,0 +1,51 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/skip_gram_infer.h" + +namespace mindspore { + +class SkipGramInferTest : public mindspore::CommonTest { + public: + SkipGramInferTest() {} +}; + +TEST_F(SkipGramInferTest, SkipGramInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->data_ = NULL; + inputs[0]->data_type_ = kNumberTypeInt8; + inputs[0]->format_ = kNHWC_C; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = SkipGramInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_INFER_INVALID); + ASSERT_EQ(outputs[0]->format_, kNHWC_C); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt8); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/slice_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/slice_infer_test.cc new file mode 100644 index 0000000000..38ae57a995 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/slice_infer_test.cc @@ -0,0 +1,175 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/slice_infer.h" + +namespace mindspore { + +class SliceInferTest : public mindspore::CommonTest { + public: + SliceInferTest() {} +}; + +TEST_F(SliceInferTest, SliceInferTest0) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SliceParameter *parameter = new SliceParameter; + parameter->begin_[0] = 1; + parameter->begin_[1] = 1; + parameter->size_[0] = 1; + parameter->size_[1] = 3; + parameter->axis_[0] = 0; + parameter->axis_[1] = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = SliceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SliceInferTest, SliceInferTest1) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SliceParameter *parameter = new SliceParameter; + parameter->begin_[0] = 1; + parameter->begin_[1] = 0; + parameter->begin_[2] = 0; + parameter->size_[0] = 1; + parameter->size_[1] = 1; + parameter->size_[2] = 3; + parameter->axis_[0] = 0; + parameter->axis_[1] = 1; + parameter->axis_[2] = 2; + parameter->op_parameter_.infer_flag_ = true; + int ret = SliceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SliceInferTest, SliceInferTest2) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SliceParameter *parameter = new SliceParameter; + parameter->begin_[0] = 1; + parameter->begin_[1] = 0; + parameter->begin_[2] = 0; + parameter->size_[0] = 1; + parameter->size_[1] = 2; + parameter->size_[2] = 3; + parameter->axis_[0] = 0; + parameter->axis_[1] = 1; + parameter->axis_[2] = 2; + parameter->op_parameter_.infer_flag_ = true; + int ret = SliceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SliceInferTest, SliceInferTest3) { + size_t inputs_size = 5; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 4; + inputs[1] = new TensorC; + std::vector<int> inputs1 = {1, 0, 0}; + inputs[1]->data_ = inputs1.data(); + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 3; + inputs[2] = new TensorC; + std::vector<int> inputs2 = {2, 2, 3}; + inputs[2]->data_ = inputs2.data(); + inputs[2]->shape_size_ = 1; + inputs[2]->shape_[0] = 3; + inputs[3] = new TensorC; + std::vector<int> inputs3 = {0, 1, 2}; + inputs[3]->data_ = inputs3.data(); + inputs[3]->shape_size_ = 1; + inputs[3]->shape_[0] = 3; + inputs[4] = new TensorC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SliceParameter *parameter = new SliceParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = SliceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/softmax_cross_entropy_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/softmax_cross_entropy_infer_test.cc new file mode 100644 index 0000000000..952ddfa1c3 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/softmax_cross_entropy_infer_test.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/softmax_cross_entropy_infer.h" + +namespace mindspore { + +class SoftmaxCrossEntropyInferTest : public mindspore::CommonTest { + public: + SoftmaxCrossEntropyInferTest() {} +}; + +TEST_F(SoftmaxCrossEntropyInferTest, SoftmaxCrossEntropyInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = SoftmaxCrossEntropyInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), + outputs.size(), reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + ASSERT_EQ(outputs[1]->shape_size_, 2); + ASSERT_EQ(outputs[1]->shape_[0], 4); + ASSERT_EQ(outputs[1]->shape_[1], 3); + ASSERT_EQ(outputs[1]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[1]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/softmax_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/softmax_infer_test.cc new file mode 100644 index 0000000000..1f37f9d61b --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/softmax_infer_test.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/softmax_infer.h" + +namespace mindspore { + +class SoftmaxInferTest : public mindspore::CommonTest { + public: + SoftmaxInferTest() {} +}; + +TEST_F(SoftmaxInferTest, SoftmaxInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SoftmaxParameter *parameter = new SoftmaxParameter; + parameter->op_parameter_.infer_flag_ = true; + int ret = SoftMaxInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + ASSERT_EQ(outputs[0]->format_, Format_NHWC); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/space_to_batch_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/space_to_batch_infer_test.cc new file mode 100644 index 0000000000..a9b470e1a9 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/space_to_batch_infer_test.cc @@ -0,0 +1,178 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/space_to_batch_infer.h" + +namespace mindspore { + +class SpaceToBatchInferTest : public mindspore::CommonTest { + public: + SpaceToBatchInferTest() {} +}; + +TEST_F(SpaceToBatchInferTest, SpaceToBatchInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 2; + inputs[0]->shape_[3] = 1; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SpaceToBatchParameter *parameter = new SpaceToBatchParameter; + parameter->m_ = 2; + parameter->block_sizes_[0] = 2; + parameter->block_sizes_[1] = 2; + parameter->paddings_[0] = 0; + parameter->paddings_[1] = 0; + parameter->paddings_[2] = 0; + parameter->paddings_[3] = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = SpaceToBatchInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 1); + ASSERT_EQ(outputs[0]->shape_[3], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SpaceToBatchInferTest, SpaceToBatchInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 2; + inputs[0]->shape_[3] = 3; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SpaceToBatchParameter *parameter = new SpaceToBatchParameter; + parameter->m_ = 2; + parameter->block_sizes_[0] = 2; + parameter->block_sizes_[1] = 2; + parameter->paddings_[0] = 0; + parameter->paddings_[1] = 0; + parameter->paddings_[2] = 0; + parameter->paddings_[3] = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = SpaceToBatchInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 1); + ASSERT_EQ(outputs[0]->shape_[3], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SpaceToBatchInferTest, SpaceToBatchInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 4; + inputs[0]->shape_[2] = 4; + inputs[0]->shape_[3] = 1; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SpaceToBatchParameter *parameter = new SpaceToBatchParameter; + parameter->m_ = 2; + parameter->block_sizes_[0] = 2; + parameter->block_sizes_[1] = 2; + parameter->paddings_[0] = 0; + parameter->paddings_[1] = 0; + parameter->paddings_[2] = 0; + parameter->paddings_[3] = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = SpaceToBatchInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 2); + ASSERT_EQ(outputs[0]->shape_[3], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SpaceToBatchInferTest, SpaceToBatchInferTest3) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 4; + inputs[0]->shape_[3] = 1; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SpaceToBatchParameter *parameter = new SpaceToBatchParameter; + parameter->m_ = 2; + parameter->block_sizes_[0] = 2; + parameter->block_sizes_[1] = 2; + parameter->paddings_[0] = 0; + parameter->paddings_[1] = 0; + parameter->paddings_[2] = 2; + parameter->paddings_[3] = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = SpaceToBatchInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 8); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 3); + ASSERT_EQ(outputs[0]->shape_[3], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/space_to_batch_nd_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/space_to_batch_nd_infer_test.cc new file mode 100644 index 0000000000..1cb40e910e --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/space_to_batch_nd_infer_test.cc @@ -0,0 +1,179 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/space_to_batch_nd_infer.h" + +namespace mindspore { + +class SpaceToBatchNdInferTest : public mindspore::CommonTest { + public: + SpaceToBatchNdInferTest() {} +}; + +// https://tensorflow.google.cn/api_docs/python/tf/space_to_batch_nd?hl=en +TEST_F(SpaceToBatchNdInferTest, SpaceToBatchNdInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 2; + inputs[0]->shape_[3] = 1; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SpaceToBatchParameter *parameter = new SpaceToBatchParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->m_ = 2; + parameter->block_sizes_[0] = 2; + parameter->block_sizes_[1] = 2; + parameter->paddings_[0] = 0; + parameter->paddings_[1] = 0; + parameter->paddings_[2] = 0; + parameter->paddings_[3] = 0; + int ret = SpaceToBatchNdInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 1); + ASSERT_EQ(outputs[0]->shape_[3], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SpaceToBatchNdInferTest, SpaceToBatchNdInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 2; + inputs[0]->shape_[3] = 3; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SpaceToBatchParameter *parameter = new SpaceToBatchParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->m_ = 2; + parameter->block_sizes_[0] = 2; + parameter->block_sizes_[1] = 2; + parameter->paddings_[0] = 0; + parameter->paddings_[1] = 0; + parameter->paddings_[2] = 0; + parameter->paddings_[3] = 0; + int ret = SpaceToBatchNdInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 1); + ASSERT_EQ(outputs[0]->shape_[3], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SpaceToBatchNdInferTest, SpaceToBatchNdInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 4; + inputs[0]->shape_[2] = 4; + inputs[0]->shape_[3] = 1; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SpaceToBatchParameter *parameter = new SpaceToBatchParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->m_ = 2; + parameter->block_sizes_[0] = 2; + parameter->block_sizes_[1] = 2; + parameter->paddings_[0] = 0; + parameter->paddings_[1] = 0; + parameter->paddings_[2] = 0; + parameter->paddings_[3] = 0; + int ret = SpaceToBatchNdInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 2); + ASSERT_EQ(outputs[0]->shape_[3], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SpaceToBatchNdInferTest, SpaceToBatchNdInferTest3) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 4; + inputs[0]->shape_[3] = 1; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SpaceToBatchParameter *parameter = new SpaceToBatchParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->m_ = 2; + parameter->block_sizes_[0] = 2; + parameter->block_sizes_[1] = 2; + parameter->paddings_[0] = 0; + parameter->paddings_[1] = 0; + parameter->paddings_[2] = 2; + parameter->paddings_[3] = 0; + int ret = SpaceToBatchNdInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 8); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 3); + ASSERT_EQ(outputs[0]->shape_[3], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/space_to_depth_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/space_to_depth_infer_test.cc new file mode 100644 index 0000000000..7dd2161527 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/space_to_depth_infer_test.cc @@ -0,0 +1,90 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/space_to_depth_infer.h" + +namespace mindspore { + +class SpaceToDepthInferTest : public mindspore::CommonTest { + public: + SpaceToDepthInferTest() {} +}; + +TEST_F(SpaceToDepthInferTest, SpaceToDepthInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 2; + inputs[0]->shape_[2] = 2; + inputs[0]->shape_[3] = 1; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SpaceToDepthParameter *parameter = new SpaceToDepthParameter; + parameter->block_size_ = 2; + parameter->op_parameter_.infer_flag_ = true; + int ret = SpaceToDepthInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 1); + ASSERT_EQ(outputs[0]->shape_[3], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SpaceToDepthInferTest, SpaceToDepthInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1; + inputs[0]->shape_[1] = 4; + inputs[0]->shape_[2] = 4; + inputs[0]->shape_[3] = 1; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SpaceToDepthParameter *parameter = new SpaceToDepthParameter; + parameter->block_size_ = 2; + parameter->op_parameter_.infer_flag_ = true; + int ret = SpaceToDepthInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 2); + ASSERT_EQ(outputs[0]->shape_[3], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/sparse_to_dense_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/sparse_to_dense_infer_test.cc new file mode 100644 index 0000000000..9b74fe2dd4 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/sparse_to_dense_infer_test.cc @@ -0,0 +1,59 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/sparse_to_dense_infer.h" + +namespace mindspore { + +class SparseToDenseInferTest : public mindspore::CommonTest { + public: + SparseToDenseInferTest() {} +}; + +TEST_F(SparseToDenseInferTest, SparseToDenseInferTest0) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 4; + std::vector<int> data_tmp = {2, 3, 4, 5}; + inputs[1]->data_ = data_tmp.data(); + inputs[2] = new TensorC; + inputs[2]->data_type_ = kNumberTypeInt32; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = SparseToDenseInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 4); + ASSERT_EQ(outputs[0]->shape_[3], 5); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/split_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/split_infer_test.cc new file mode 100644 index 0000000000..4817099fd8 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/split_infer_test.cc @@ -0,0 +1,231 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/split_infer.h" + +namespace mindspore { + +class SplitInferTest : public mindspore::CommonTest { + public: + SplitInferTest() {} +}; + +TEST_F(SplitInferTest, SplitInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 5; + inputs[0]->shape_[1] = 40; + std::vector<TensorC *> outputs(3, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + outputs[2] = new TensorC; + SplitParameter *parameter = new SplitParameter; + parameter->num_split_ = 3; + // parameter->split_count_ = 3; + std::vector<int> split_sizes = {4, 15, 11}; + parameter->split_sizes_ = split_sizes.data(); + parameter->split_dim_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = SplitInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 5); + ASSERT_EQ(outputs[0]->shape_[1], 4); + ASSERT_EQ(outputs[1]->shape_size_, 2); + ASSERT_EQ(outputs[1]->shape_[0], 5); + ASSERT_EQ(outputs[1]->shape_[1], 15); + ASSERT_EQ(outputs[2]->shape_size_, 2); + ASSERT_EQ(outputs[2]->shape_[0], 5); + ASSERT_EQ(outputs[2]->shape_[1], 11); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SplitInferTest, SplitInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 8; + inputs[0]->shape_[2] = 6; + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + SplitParameter *parameter = new SplitParameter; + parameter->num_split_ = 0; + // parameter->num_split_ = 2; + // parameter->split_count_ = 0; + parameter->split_dim_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = SplitInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 8); + ASSERT_EQ(outputs[0]->shape_[2], 6); + ASSERT_EQ(outputs[1]->shape_size_, 3); + ASSERT_EQ(outputs[1]->shape_[0], 2); + ASSERT_EQ(outputs[1]->shape_[1], 8); + ASSERT_EQ(outputs[1]->shape_[2], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SplitInferTest, SplitInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 6; + inputs[0]->shape_[3] = 7; + std::vector<TensorC *> outputs(3, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + outputs[2] = new TensorC; + SplitParameter *parameter = new SplitParameter; + parameter->num_split_ = 3; + parameter->split_count_ = 3; + parameter->split_sizes_ = reinterpret_cast<int *>(malloc(sizeof(int) * 3)); + parameter->split_sizes_[0] = 1; + parameter->split_sizes_[1] = 4; + parameter->split_sizes_[2] = 2; + parameter->split_dim_ = 3; + parameter->op_parameter_.infer_flag_ = true; + int ret = SplitInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 5); + ASSERT_EQ(outputs[0]->shape_[2], 6); + ASSERT_EQ(outputs[0]->shape_[3], 1); + ASSERT_EQ(outputs[1]->shape_size_, 4); + ASSERT_EQ(outputs[1]->shape_[0], 4); + ASSERT_EQ(outputs[1]->shape_[1], 5); + ASSERT_EQ(outputs[1]->shape_[2], 6); + ASSERT_EQ(outputs[1]->shape_[3], 4); + ASSERT_EQ(outputs[2]->shape_size_, 4); + ASSERT_EQ(outputs[2]->shape_[0], 4); + ASSERT_EQ(outputs[2]->shape_[1], 5); + ASSERT_EQ(outputs[2]->shape_[2], 6); + ASSERT_EQ(outputs[2]->shape_[3], 2); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } + free(parameter->split_sizes_); +} + +TEST_F(SplitInferTest, SplitInferTest3) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 6; + inputs[0]->shape_[3] = 7; + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + SplitParameter *parameter = new SplitParameter; + parameter->num_split_ = 0; + // parameter->num_split_ = 2; + // parameter->split_count_ = 0; + parameter->split_dim_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = SplitInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 5); + ASSERT_EQ(outputs[0]->shape_[2], 6); + ASSERT_EQ(outputs[0]->shape_[3], 7); + ASSERT_EQ(outputs[1]->shape_size_, 4); + ASSERT_EQ(outputs[1]->shape_[0], 2); + ASSERT_EQ(outputs[1]->shape_[1], 5); + ASSERT_EQ(outputs[1]->shape_[2], 6); + ASSERT_EQ(outputs[1]->shape_[3], 7); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SplitInferTest, SplitInferTest4) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 1200; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 6; + inputs[0]->shape_[3] = 7; + std::vector<TensorC *> outputs(100, NULL); + for (size_t i = 0; i < 100; i++) { + outputs[i] = new TensorC; + } + SplitParameter *parameter = new SplitParameter; + parameter->num_split_ = 0; + // parameter->num_split_ = 2; + // parameter->split_count_ = 0; + parameter->split_dim_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = SplitInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + for (size_t i = 0; i < 100; i++) { + ASSERT_EQ(outputs[i]->shape_size_, 4); + ASSERT_EQ(outputs[i]->shape_[0], 12); + ASSERT_EQ(outputs[i]->shape_[1], 5); + ASSERT_EQ(outputs[i]->shape_[2], 6); + ASSERT_EQ(outputs[i]->shape_[3], 7); + } + + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/squeeze_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/squeeze_infer_test.cc new file mode 100644 index 0000000000..7d6f932c9c --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/squeeze_infer_test.cc @@ -0,0 +1,151 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/squeeze_infer.h" + +namespace mindspore { + +class SqueezeInferTest : public mindspore::CommonTest { + public: + SqueezeInferTest() {} +}; + +TEST_F(SqueezeInferTest, SqueezeInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 5; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 1; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 1; + inputs[0]->shape_[4] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SqueezeParameter *parameter = new SqueezeParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->axis_size_ = 0; + int ret = SqueezeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SqueezeInferTest, SqueezeInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 5; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 1; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 1; + inputs[0]->shape_[4] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SqueezeParameter *parameter = new SqueezeParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->axis_size_ = 1; + parameter->axis_[0] = 1; + int ret = SqueezeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 1); + ASSERT_EQ(outputs[0]->shape_[3], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SqueezeInferTest, SqueezeInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 5; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 1; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 1; + inputs[0]->shape_[4] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SqueezeParameter *parameter = new SqueezeParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->axis_size_ = 2; + parameter->axis_[0] = 1; + parameter->axis_[1] = 3; + int ret = SqueezeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(SqueezeInferTest, SqueezeInferTest3) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 5; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 1; + inputs[0]->shape_[2] = 3; + inputs[0]->shape_[3] = 1; + inputs[0]->shape_[4] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + SqueezeParameter *parameter = new SqueezeParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->axis_size_ = 1; + parameter->axis_[0] = 0; + int ret = SqueezeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_PARAM_INVALID); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/stack_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/stack_infer_test.cc new file mode 100644 index 0000000000..e3c4ca6ab5 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/stack_infer_test.cc @@ -0,0 +1,94 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/stack_infer.h" + +namespace mindspore { + +class StackInferTest : public mindspore::CommonTest { + public: + StackInferTest() {} +}; + +TEST_F(StackInferTest, StackInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = 1; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 3; + inputs[1]->shape_[1] = 3; + inputs[1]->data_type_ = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + StackParameter *parameter = new StackParameter; + parameter->axis_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = StackInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(StackInferTest, StackInferTest1) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 3; + inputs[0]->data_type_ = 1; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 3; + inputs[1]->shape_[1] = 3; + inputs[1]->data_type_ = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + StackParameter *parameter = new StackParameter; + parameter->axis_ = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = StackInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/strided_slice_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/strided_slice_infer_test.cc new file mode 100644 index 0000000000..6e0afca6a2 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/strided_slice_infer_test.cc @@ -0,0 +1,318 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/strided_slice_infer.h" + +namespace mindspore { + +class StridedSliceInferTest : public mindspore::CommonTest { + public: + StridedSliceInferTest() {} +}; + +TEST_F(StridedSliceInferTest, StridedSliceInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + StridedSliceParameter *parameter = new StridedSliceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->begins_[0] = 1; + parameter->begins_[1] = 0; + parameter->begins_[2] = 0; + parameter->ends_[0] = 2; + parameter->ends_[1] = 1; + parameter->ends_[2] = 3; + parameter->strides_[0] = 1; + parameter->strides_[1] = 1; + parameter->strides_[2] = 1; + parameter->num_axes_ = 3; + parameter->begins_mask_ = 0; + parameter->ends_mask_ = 0; + parameter->ellipsisMask_ = 0; + parameter->newAxisMask_ = 0; + parameter->shrinkAxisMask_ = 0; + int ret = StridedSliceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(StridedSliceInferTest, StridedSliceInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + StridedSliceParameter *parameter = new StridedSliceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->begins_[0] = 1; + parameter->begins_[1] = 0; + parameter->begins_[2] = 0; + parameter->ends_[0] = 2; + parameter->ends_[1] = 2; + parameter->ends_[2] = 3; + parameter->strides_[0] = 1; + parameter->strides_[1] = 1; + parameter->strides_[2] = 1; + parameter->num_axes_ = 3; + parameter->begins_mask_ = 0; + parameter->ends_mask_ = 0; + parameter->ellipsisMask_ = 0; + parameter->newAxisMask_ = 0; + parameter->shrinkAxisMask_ = 0; + int ret = StridedSliceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(StridedSliceInferTest, StridedSliceInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + StridedSliceParameter *parameter = new StridedSliceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->begins_[0] = 1; + parameter->begins_[1] = -1; + parameter->begins_[2] = 0; + parameter->ends_[0] = 2; + parameter->ends_[1] = -3; + parameter->ends_[2] = 3; + parameter->strides_[0] = 1; + parameter->strides_[1] = -1; + parameter->strides_[2] = 1; + parameter->num_axes_ = 3; + parameter->begins_mask_ = 0; + parameter->ends_mask_ = 0; + parameter->ellipsisMask_ = 0; + parameter->newAxisMask_ = 0; + parameter->shrinkAxisMask_ = 0; + int ret = StridedSliceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(StridedSliceInferTest, StridedSliceInferTest3) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 5; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + StridedSliceParameter *parameter = new StridedSliceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->begins_[0] = 0; + parameter->ends_[0] = 3; + parameter->strides_[0] = 1; + parameter->num_axes_ = 1; + parameter->begins_mask_ = 0; + parameter->ends_mask_ = 0; + parameter->ellipsisMask_ = 0; + parameter->newAxisMask_ = 0; + parameter->shrinkAxisMask_ = 0; + int ret = StridedSliceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(StridedSliceInferTest, StridedSliceInferTest4) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 5; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + StridedSliceParameter *parameter = new StridedSliceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->begins_[0] = 1; + parameter->ends_[0] = -2; + parameter->strides_[0] = 1; + parameter->num_axes_ = 1; + parameter->begins_mask_ = 0; + parameter->ends_mask_ = 0; + parameter->ellipsisMask_ = 0; + parameter->newAxisMask_ = 0; + parameter->shrinkAxisMask_ = 0; + int ret = StridedSliceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 2); + + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(StridedSliceInferTest, StridedSliceInferTest5) { + size_t inputs_size = 4; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 5; + // std::vector<int> begin_vector = {1}; + // std::vector<int> end_vector = {-2}; + // std::vector<int> stride_vector = {1}; + int *begin_vector = reinterpret_cast<int *>(malloc(sizeof(int))); + begin_vector[0] = 1; + int *end_vector = reinterpret_cast<int *>(malloc(sizeof(int))); + end_vector[0] = -2; + int *stride_vector = reinterpret_cast<int *>(malloc(sizeof(int))); + stride_vector[0] = 1; + inputs[1] = new TensorC; + // inputs[1]->data_ = begin_vector.data(); + inputs[1]->data_ = begin_vector; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 1; + inputs[2] = new TensorC; + inputs[2]->data_ = end_vector; + inputs[2]->shape_size_ = 1; + inputs[2]->shape_[0] = 1; + inputs[3] = new TensorC; + inputs[3]->data_ = stride_vector; + inputs[3]->shape_size_ = 1; + inputs[3]->shape_[0] = 1; + std::vector<TensorC *> outputs; + outputs.push_back(NULL); + outputs[0] = new TensorC; + StridedSliceParameter *parameter = new StridedSliceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->begins_mask_ = 0; + parameter->ends_mask_ = 0; + parameter->ellipsisMask_ = 0; + parameter->newAxisMask_ = 0; + parameter->shrinkAxisMask_ = 0; + int ret = StridedSliceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 2); + delete parameter; + delete inputs[0]; + delete inputs[1]; + delete inputs[2]; + delete inputs[3]; + delete outputs[0]; + free(begin_vector); + free(end_vector); + free(stride_vector); +} + +TEST_F(StridedSliceInferTest, StridedSliceInferTest6) { + size_t inputs_size = 4; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 3; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 3; + std::vector<int> begin_vector = {1, 0, 0}; + std::vector<int> end_vector = {2, 1, 3}; + std::vector<int> stride_vector = {1, 1, 1}; + inputs[1] = new TensorC; + inputs[1]->data_ = begin_vector.data(); + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 3; + inputs[2] = new TensorC; + inputs[2]->data_ = end_vector.data(); + inputs[2]->shape_size_ = 1; + inputs[2]->shape_[0] = 3; + inputs[3] = new TensorC; + inputs[3]->data_ = stride_vector.data(); + inputs[3]->shape_size_ = 1; + inputs[3]->shape_[0] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + StridedSliceParameter *parameter = new StridedSliceParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->begins_mask_ = 0; + parameter->ends_mask_ = 0; + parameter->ellipsisMask_ = 0; + parameter->newAxisMask_ = 0; + parameter->shrinkAxisMask_ = 0; + int ret = StridedSliceInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 3); + delete parameter; +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc new file mode 100644 index 0000000000..d4f165bbee --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_fromtensor_infer_test.cc @@ -0,0 +1,79 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "src/common/tensor_util.h" +#include "mindspore/lite/nnacl/infer/tensorlist_fromtensor_infer.h" + +namespace mindspore { + +class TensorlistFromtensorInferTest : public mindspore::CommonTest { + public: + TensorlistFromtensorInferTest() {} +}; + +TEST_F(TensorlistFromtensorInferTest, TensorlistFromtensorInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 6; + inputs[0]->shape_[2] = 5; + inputs[0]->data_type_ = kNumberTypeInt32; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + std::vector<int> tmp = {-1, 5}; + inputs[1]->data_ = tmp.data(); + inputs[1]->data_type_ = kNumberTypeInt32; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 1; + inputs[1]->shape_[1] = 2; + + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorListC))); + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = TensorListFromTensorInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), + outputs.size(), reinterpret_cast<OpParameter *>(parameter)); + TensorListC *out = reinterpret_cast<TensorListC *>(outputs[0]); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(out->element_num_, 4); + ASSERT_EQ(out->data_type_, kObjectTypeTensorType); + ASSERT_EQ(out->element_shape_size_, 2); + ASSERT_EQ(out->element_shape_[0], -1); + ASSERT_EQ(out->element_shape_[1], 5); + ASSERT_EQ(out->tensors_data_type_, kNumberTypeInt32); + // ASSERT_EQ(outputs[0]->format_, Format_NHWC); + for (size_t i = 0; i < out->element_num_; i++) { + ASSERT_EQ(out->tensors_[i]->shape_size_, 2); + ASSERT_EQ(out->tensors_[i]->shape_[0], 6); + ASSERT_EQ(out->tensors_[i]->shape_[1], 5); + } + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + if (outputs[i]->data_type_ == kObjectTypeTensorType) { + TensorListC *tensorListC = reinterpret_cast<TensorListC *>(outputs[i]); + lite::FreeTensorListC(tensorListC); + } else { + delete outputs[i]; + } + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc new file mode 100644 index 0000000000..c82ac53291 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_getitem_infer_test.cc @@ -0,0 +1,97 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "src/common/tensor_util.h" +#include "mindspore/lite/nnacl/infer/tensorlist_getitem_infer.h" + +namespace mindspore { + +class TensorlistGetItemInferTest : public mindspore::CommonTest { + public: + TensorlistGetItemInferTest() {} +}; + +// [[1, 2], [3, 4, 5], [6, 7, 8, 9]] -> [6, 7, 8, 9] +TEST_F(TensorlistGetItemInferTest, TensorlistGetItemInferTest0) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + TensorListC *input0 = reinterpret_cast<TensorListC *>(malloc(sizeof(TensorListC))); + input0->element_num_ = 3; + input0->tensors_ = reinterpret_cast<TensorC **>(malloc(input0->element_num_ * sizeof(TensorC *))); + input0->tensors_[0] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC))); + input0->tensors_[0]->shape_size_ = 2; + input0->tensors_[0]->shape_[0] = 1; + input0->tensors_[0]->shape_[1] = 2; + input0->tensors_[0]->data_type_ = kNumberTypeInt32; + // input0->tensors_[0]->format_ = Format_NHWC; + input0->tensors_[1] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC))); + input0->tensors_[1]->shape_size_ = 3; + input0->tensors_[1]->shape_[0] = 3; + input0->tensors_[1]->shape_[1] = 4; + input0->tensors_[1]->shape_[2] = 5; + input0->tensors_[1]->data_type_ = kNumberTypeInt32; + // input0->tensors_[1]->format_ = Format_NHWC; + input0->tensors_[2] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC))); + input0->tensors_[2]->shape_size_ = 4; + input0->tensors_[2]->shape_[0] = 6; + input0->tensors_[2]->shape_[1] = 7; + input0->tensors_[2]->shape_[2] = 8; + input0->tensors_[2]->shape_[3] = 9; + input0->tensors_[2]->data_type_ = kNumberTypeInt32; + // input0->tensors_[2]->format_ = Format_NHWC; + inputs[0] = reinterpret_cast<TensorC *>(input0); + inputs[0]->data_type_ = kObjectTypeTensorType; + + inputs[1] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC))); + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 1; + std::vector<int> inputs1_data = {2}; + inputs[1]->data_ = inputs1_data.data(); + + inputs[2] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC))); + + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = reinterpret_cast<TensorC *>(malloc(sizeof(TensorC))); + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = TensorListGetItemInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 6); + ASSERT_EQ(outputs[0]->shape_[1], 7); + ASSERT_EQ(outputs[0]->shape_[2], 8); + ASSERT_EQ(outputs[0]->shape_[3], 9); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + // ASSERT_EQ(outputs[0]->format_, Format_NHWC); + + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + if (inputs[i]->data_type_ == kObjectTypeTensorType) { + TensorListC *tensorListC = reinterpret_cast<TensorListC *>(inputs[i]); + lite::FreeTensorListC(tensorListC); + } else { + free(inputs[i]); + } + } + for (size_t i = 0; i < outputs.size(); i++) { + free(outputs[i]); + } +} + +// retest mergeshape + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc new file mode 100644 index 0000000000..0389a81078 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_reserve_infer_test.cc @@ -0,0 +1,71 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/tensorlist_reserve_infer.h" + +namespace mindspore { + +class TensorlistReserveInferTest : public mindspore::CommonTest { + public: + TensorlistReserveInferTest() {} +}; + +TEST_F(TensorlistReserveInferTest, TensorlistReserveInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 3; + std::vector<int> inputs0 = {2, 3, 4}; + inputs[0]->data_ = inputs0.data(); + inputs[0]->data_type_ = kNumberTypeInt32; + // inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 1; + std::vector<int> inputs1 = {5}; + inputs[1]->data_ = inputs1.data(); + inputs[1]->data_type_ = kNumberTypeInt32; + + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = reinterpret_cast<TensorC *>(new TensorListC); + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = TensorListReserveInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + TensorListC *out = reinterpret_cast<TensorListC *>(outputs[0]); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(out->element_num_, 5); + ASSERT_EQ(out->data_type_, kObjectTypeTensorType); + ASSERT_EQ(out->element_shape_size_, 3); + ASSERT_EQ(out->element_shape_[0], 2); + ASSERT_EQ(out->element_shape_[1], 3); + ASSERT_EQ(out->element_shape_[2], 4); + ASSERT_EQ(out->tensors_data_type_, kTypeUnknown); + // ASSERT_EQ(outputs[0]->format_, Format_NHWC); + for (size_t i = 0; i < out->element_num_; i++) { + ASSERT_EQ(out->tensors_[i]->shape_size_, 0); + } + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc new file mode 100644 index 0000000000..09d25da9b5 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_setitem_infer_test.cc @@ -0,0 +1,108 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/tensorlist_setitem_infer.h" + +namespace mindspore { + +class TensorlistSetItemInferTest : public mindspore::CommonTest { + public: + TensorlistSetItemInferTest() {} +}; + +// [[1, 2], [3, 4, 5], [6, 7, 8, 9]], 3-> [6, 7, 8, 9] +TEST_F(TensorlistSetItemInferTest, TensorlistSetItemInferTest0) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + TensorListC *input0 = new TensorListC; + input0->element_num_ = 3; + input0->tensors_ = reinterpret_cast<TensorC **>(malloc(input0->element_num_ * sizeof(TensorC *))); + input0->element_shape_size_ = 2; + input0->element_shape_[0] = 2; + input0->element_shape_[1] = 4; + input0->tensors_data_type_ = kNumberTypeInt32; + input0->data_type_ = kObjectTypeTensorType; + input0->tensors_[0] = new TensorC; + input0->tensors_[0]->shape_size_ = 2; + input0->tensors_[0]->shape_[0] = 2; + input0->tensors_[0]->shape_[1] = 4; + input0->tensors_[0]->data_type_ = kNumberTypeInt32; + // input0->tensors_[0]->format_ = Format_NHWC; + input0->tensors_[1] = new TensorC; + input0->tensors_[1]->shape_size_ = 2; + input0->tensors_[1]->shape_[0] = 2; + input0->tensors_[1]->shape_[1] = 4; + input0->tensors_[1]->data_type_ = kNumberTypeInt32; + // input0->tensors_[1]->format_ = Format_NHWC; + input0->tensors_[2] = new TensorC; + input0->tensors_[2]->shape_size_ = 2; + input0->tensors_[2]->shape_[0] = 2; + input0->tensors_[2]->shape_[1] = 4; + input0->tensors_[2]->data_type_ = kNumberTypeInt32; + // input0->tensors_[2]->format_ = Format_NHWC; + inputs[0] = reinterpret_cast<TensorC *>(input0); + + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 1; + std::vector<int> inputs1_data = {2}; + inputs[1]->data_ = inputs1_data.data(); + inputs[1]->data_type_ = kNumberTypeInt32; + + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 2; + inputs[2]->shape_[0] = 5; + inputs[2]->shape_[1] = 6; + inputs[2]->data_type_ = kNumberTypeInt32; + + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = reinterpret_cast<TensorC *>(new TensorListC); + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = TensorListSetItemInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + TensorListC *res = reinterpret_cast<TensorListC *>(outputs[0]); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(res->element_num_, 3); + ASSERT_EQ(res->element_shape_size_, 2); + ASSERT_EQ(res->element_shape_[0], 2); + ASSERT_EQ(res->element_shape_[1], 4); + ASSERT_EQ(res->tensors_data_type_, kNumberTypeInt32); + ASSERT_EQ(res->data_type_, kObjectTypeTensorType); + ASSERT_EQ(res->tensors_[0]->shape_size_, 2); + ASSERT_EQ(res->tensors_[0]->shape_[0], 2); + ASSERT_EQ(res->tensors_[0]->shape_[1], 4); + ASSERT_EQ(res->tensors_[1]->shape_size_, 2); + ASSERT_EQ(res->tensors_[1]->shape_[0], 2); + ASSERT_EQ(res->tensors_[1]->shape_[1], 4); + ASSERT_EQ(res->tensors_[2]->shape_size_, 2); + ASSERT_EQ(res->tensors_[2]->shape_[0], 5); + ASSERT_EQ(res->tensors_[2]->shape_[1], 6); + + // ASSERT_EQ(outputs[0]->format_, Format_NHWC); + + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +// retest mergeshape + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc new file mode 100644 index 0000000000..ec03bdc827 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/tensorlist_stack_infer_test.cc @@ -0,0 +1,89 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/tensorlist_stack_infer.h" + +namespace mindspore { + +class TensorlistStackInferTest : public mindspore::CommonTest { + public: + TensorlistStackInferTest() {} +}; + +// TensorList[[2, 4], [2, 4], [2, 4]] -> size(3, 2, 4) +TEST_F(TensorlistStackInferTest, TensorlistStackInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + TensorListC *input0 = new TensorListC; + input0->element_num_ = 3; + input0->tensors_ = reinterpret_cast<TensorC **>(malloc(input0->element_num_ * sizeof(TensorC *))); + input0->element_shape_size_ = 2; + input0->element_shape_[0] = 2; + input0->element_shape_[1] = 4; + input0->tensors_data_type_ = kNumberTypeInt32; + input0->tensors_[0] = new TensorC; + input0->tensors_[0]->shape_size_ = 2; + input0->tensors_[0]->shape_[0] = 2; + input0->tensors_[0]->shape_[1] = 4; + input0->tensors_[0]->data_type_ = kNumberTypeInt32; + // input0->tensors_[0]->format_ = Format_NHWC; + input0->tensors_[1] = new TensorC; + input0->tensors_[1]->shape_size_ = 2; + input0->tensors_[1]->shape_[0] = 2; + input0->tensors_[1]->shape_[1] = 4; + input0->tensors_[1]->data_type_ = kNumberTypeInt32; + // input0->tensors_[1]->format_ = Format_NHWC; + input0->tensors_[2] = new TensorC; + input0->tensors_[2]->shape_size_ = 2; + input0->tensors_[2]->shape_[0] = 2; + input0->tensors_[2]->shape_[1] = 4; + input0->tensors_[2]->data_type_ = kNumberTypeInt32; + // input0->tensors_[2]->format_ = Format_NHWC; + inputs[0] = reinterpret_cast<TensorC *>(input0); + inputs[0]->data_type_ = kObjectTypeTensorType; + + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 2; + std::vector<int> inputs1_data = {-1, 4}; + inputs[1]->data_ = inputs1_data.data(); + + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = TensorListStackInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 3); + ASSERT_EQ(outputs[0]->shape_[1], 2); + ASSERT_EQ(outputs[0]->shape_[2], 4); + ASSERT_EQ(outputs[0]->data_type_, kNumberTypeInt32); + // ASSERT_EQ(outputs[0]->format_, Format_NHWC); + + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +// retest mergeshape + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/tile_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/tile_infer_test.cc new file mode 100644 index 0000000000..8ef8976844 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/tile_infer_test.cc @@ -0,0 +1,95 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/tile_infer.h" +#include "nnacl/base/tile_base.h" + +namespace mindspore { + +class TileInferTest : public mindspore::CommonTest { + public: + TileInferTest() {} +}; + +TEST_F(TileInferTest, TileInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + TileParameter *parameter = new TileParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->multiples_size_ = 2; + parameter->multiples_[0] = 4; + parameter->multiples_[1] = 5; + parameter->dims_size_ = 2; + parameter->dims_[0] = 0; + parameter->dims_[1] = 1; + int ret = TileInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 2 * 4); + ASSERT_EQ(outputs[0]->shape_[1], 3 * 5); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(TileInferTest, TileInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 6; + inputs[0]->shape_[3] = 7; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + TileParameter *parameter = new TileParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->multiples_size_ = 2; + parameter->multiples_[0] = 4; + parameter->multiples_[1] = 5; + parameter->dims_size_ = 2; + parameter->dims_[0] = 1; + parameter->dims_[1] = 2; + int ret = TileInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 3 * 4); + ASSERT_EQ(outputs[0]->shape_[2], 6 * 5); + ASSERT_EQ(outputs[0]->shape_[3], 7); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/topk_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/topk_infer_test.cc new file mode 100644 index 0000000000..3db894cd97 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/topk_infer_test.cc @@ -0,0 +1,99 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/topk_infer.h" + +namespace mindspore { + +class TopKInferTest : public mindspore::CommonTest { + public: + TopKInferTest() {} +}; + +TEST_F(TopKInferTest, TopKInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 5; + inputs[0]->format_ = Format_NHWC; + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + TopkParameter *parameter = new TopkParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->k_ = 6; + int ret = TopKInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 6); + ASSERT_EQ(outputs[1]->shape_size_, 3); + ASSERT_EQ(outputs[1]->shape_[0], 4); + ASSERT_EQ(outputs[1]->shape_[1], 3); + ASSERT_EQ(outputs[1]->shape_[2], 6); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(TopKInferTest, TopKInferInputsSize2) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 5; + inputs[0]->format_ = Format_NHWC; + inputs[1] = new TensorC; + std::vector<int> tmp = {7}; + inputs[1]->data_ = tmp.data(); + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + TopkParameter *parameter = new TopkParameter; + parameter->op_parameter_.infer_flag_ = true; + // parameter->k_ = 6; + int ret = TopKInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 3); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[0]->shape_[2], 7); + ASSERT_EQ(outputs[1]->shape_size_, 3); + ASSERT_EQ(outputs[1]->shape_[0], 4); + ASSERT_EQ(outputs[1]->shape_[1], 3); + ASSERT_EQ(outputs[1]->shape_[2], 7); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/transpose_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/transpose_infer_test.cc new file mode 100644 index 0000000000..666c0615d1 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/transpose_infer_test.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/transpose_infer.h" + +namespace mindspore { + +class TransposeInferTest : public mindspore::CommonTest { + public: + TransposeInferTest() {} +}; + +TEST_F(TransposeInferTest, TransposeInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 6; + inputs[0]->shape_[3] = 7; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + TransposeParameter *parameter = new TransposeParameter; + parameter->op_parameter_.infer_flag_ = true; + // parameter->conjugate_ = false; + parameter->perm_size_ = 4; + parameter->perm_[0] = 2; + parameter->perm_[1] = 1; + parameter->perm_[2] = 3; + parameter->perm_[3] = 0; + int ret = TransposeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 6); + ASSERT_EQ(outputs[0]->shape_[1], 5); + ASSERT_EQ(outputs[0]->shape_[2], 7); + ASSERT_EQ(outputs[0]->shape_[3], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/unique_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/unique_infer_test.cc new file mode 100644 index 0000000000..4c3204c121 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/unique_infer_test.cc @@ -0,0 +1,56 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/unique_infer.h" + +namespace mindspore { + +class UniqueInferTest : public mindspore::CommonTest { + public: + UniqueInferTest() {} +}; + +TEST_F(UniqueInferTest, UniqueInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = UniqueInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[1]->shape_size_, 2); + ASSERT_EQ(outputs[1]->shape_[0], 4); + ASSERT_EQ(outputs[1]->shape_[1], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/unsorted_segment_sum_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/unsorted_segment_sum_infer_test.cc new file mode 100644 index 0000000000..b04cf7c735 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/unsorted_segment_sum_infer_test.cc @@ -0,0 +1,61 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/unsorted_segment_sum_infer.h" + +namespace mindspore { + +class UnsortedSegmentSumInferTest : public mindspore::CommonTest { + public: + UnsortedSegmentSumInferTest() {} +}; + +TEST_F(UnsortedSegmentSumInferTest, UnsortedSegmentSumInferTest0) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 5; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 6; + inputs[0]->shape_[3] = 7; + inputs[0]->shape_[4] = 8; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[2] = new TensorC; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + UnsortedSegmentSumParameter *parameter = new UnsortedSegmentSumParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->segments_num_ = 10; + int ret = UnsortedSegmentSumInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 4); + ASSERT_EQ(outputs[0]->shape_[0], 10); + ASSERT_EQ(outputs[0]->shape_[1], 6); + ASSERT_EQ(outputs[0]->shape_[2], 7); + ASSERT_EQ(outputs[0]->shape_[3], 8); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/unsqueeze_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/unsqueeze_infer_test.cc new file mode 100644 index 0000000000..643f2efdde --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/unsqueeze_infer_test.cc @@ -0,0 +1,205 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/unsqueeze_infer.h" +#include "nnacl/unsqueeze_parameter.h" + +namespace mindspore { + +class UnsqueezeInferTest : public mindspore::CommonTest { + public: + UnsqueezeInferTest() {} +}; + +TEST_F(UnsqueezeInferTest, UnsqueezeInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + UnSqueezeParameter *parameter = new UnSqueezeParameter; + parameter->num_dim_ = 1; + parameter->dims_[0] = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = UnsqueezeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 1); + ASSERT_EQ(outputs[0]->shape_[1], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(UnsqueezeInferTest, UnsqueezeInferTest1) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + UnSqueezeParameter *parameter = new UnSqueezeParameter; + parameter->num_dim_ = 1; + parameter->dims_[0] = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = UnsqueezeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 1); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(UnsqueezeInferTest, UnsqueezeInferTest2) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 4; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + UnSqueezeParameter *parameter = new UnSqueezeParameter; + parameter->num_dim_ = 0; + parameter->op_parameter_.infer_flag_ = true; + int ret = UnsqueezeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(UnsqueezeInferTest, UnsqueezeInferTest3) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 6; + inputs[0]->shape_[3] = 7; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + UnSqueezeParameter *parameter = new UnSqueezeParameter; + parameter->num_dim_ = 1; + parameter->dims_[0] = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = UnsqueezeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 5); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 5); + ASSERT_EQ(outputs[0]->shape_[3], 6); + ASSERT_EQ(outputs[0]->shape_[4], 7); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(UnsqueezeInferTest, UnsqueezeInferTest4) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 6; + inputs[0]->shape_[3] = 7; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + UnSqueezeParameter *parameter = new UnSqueezeParameter; + parameter->num_dim_ = 1; + parameter->dims_[0] = 1; + parameter->op_parameter_.infer_flag_ = true; + int ret = UnsqueezeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 5); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 1); + ASSERT_EQ(outputs[0]->shape_[2], 5); + ASSERT_EQ(outputs[0]->shape_[3], 6); + ASSERT_EQ(outputs[0]->shape_[4], 7); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(UnsqueezeInferTest, UnsqueezeInferTest5) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 4; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 5; + inputs[0]->shape_[2] = 6; + inputs[0]->shape_[3] = 7; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + UnSqueezeParameter *parameter = new UnSqueezeParameter; + parameter->num_dim_ = 1; + parameter->dims_[0] = 3; + parameter->op_parameter_.infer_flag_ = true; + int ret = UnsqueezeInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 5); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 5); + ASSERT_EQ(outputs[0]->shape_[2], 6); + ASSERT_EQ(outputs[0]->shape_[3], 1); + ASSERT_EQ(outputs[0]->shape_[4], 7); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/unstack_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/unstack_infer_test.cc new file mode 100644 index 0000000000..682b4a7849 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/unstack_infer_test.cc @@ -0,0 +1,58 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/unstack_infer.h" + +namespace mindspore { + +class UnstackInferTest : public mindspore::CommonTest { + public: + UnstackInferTest() {} +}; + +TEST_F(UnstackInferTest, UnstackInferTest0) { + size_t inputs_size = 1; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 3; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[0]->shape_[2] = 5; + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + UnstackParameter *parameter = new UnstackParameter; + parameter->op_parameter_.infer_flag_ = true; + parameter->axis_ = 1; + int ret = UnstackInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 5); + ASSERT_EQ(outputs[1]->shape_size_, 2); + ASSERT_EQ(outputs[1]->shape_[0], 4); + ASSERT_EQ(outputs[1]->shape_[1], 5); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/where_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/where_infer_test.cc new file mode 100644 index 0000000000..7e0fb716d3 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/where_infer_test.cc @@ -0,0 +1,89 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/where_infer.h" + +namespace mindspore { + +class WhereInferTest : public mindspore::CommonTest { + public: + WhereInferTest() {} +}; + +TEST_F(WhereInferTest, WhereInferTest0) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 2; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 2; + inputs[1]->shape_[0] = 2; + inputs[1]->shape_[1] = 3; + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 2; + inputs[2]->shape_[0] = 2; + inputs[2]->shape_[1] = 3; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = WhereInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 2); + ASSERT_EQ(outputs[0]->shape_[1], 3); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +TEST_F(WhereInferTest, WhereInferTest1) { + size_t inputs_size = 3; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 1; + inputs[0]->shape_[0] = 1; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 1; + inputs[1]->shape_[0] = 4; + inputs[2] = new TensorC; + inputs[2]->shape_size_ = 1; + inputs[2]->shape_[0] = 1; + std::vector<TensorC *> outputs(1, NULL); + outputs[0] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = WhereInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 1); + ASSERT_EQ(outputs[0]->shape_[0], 4); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} +} // namespace mindspore diff --git a/mindspore/lite/test/ut/nnacl/infer/while_infer_test.cc b/mindspore/lite/test/ut/nnacl/infer/while_infer_test.cc new file mode 100644 index 0000000000..32bd9668e2 --- /dev/null +++ b/mindspore/lite/test/ut/nnacl/infer/while_infer_test.cc @@ -0,0 +1,62 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "common/common_test.h" +#include "mindspore/lite/nnacl/infer/while_infer.h" + +namespace mindspore { + +class WhileInferTest : public mindspore::CommonTest { + public: + WhileInferTest() {} +}; + +TEST_F(WhileInferTest, WhileInferTest0) { + size_t inputs_size = 2; + std::vector<TensorC *> inputs(inputs_size, NULL); + inputs[0] = new TensorC; + inputs[0]->shape_size_ = 2; + inputs[0]->shape_[0] = 4; + inputs[0]->shape_[1] = 3; + inputs[1] = new TensorC; + inputs[1]->shape_size_ = 3; + inputs[1]->shape_[0] = 6; + inputs[1]->shape_[1] = 5; + inputs[1]->shape_[2] = 7; + std::vector<TensorC *> outputs(2, NULL); + outputs[0] = new TensorC; + outputs[1] = new TensorC; + OpParameter *parameter = new OpParameter; + parameter->infer_flag_ = true; + int ret = WhileInferShape((const TensorC **)inputs.data(), inputs.size(), outputs.data(), outputs.size(), + reinterpret_cast<OpParameter *>(parameter)); + ASSERT_EQ(ret, NNACL_OK); + ASSERT_EQ(outputs[0]->shape_size_, 2); + ASSERT_EQ(outputs[0]->shape_[0], 4); + ASSERT_EQ(outputs[0]->shape_[1], 3); + ASSERT_EQ(outputs[1]->shape_size_, 3); + ASSERT_EQ(outputs[1]->shape_[0], 6); + ASSERT_EQ(outputs[1]->shape_[1], 5); + ASSERT_EQ(outputs[1]->shape_[2], 7); + delete parameter; + for (size_t i = 0; i < inputs_size; i++) { + delete inputs[i]; + } + for (size_t i = 0; i < outputs.size(); i++) { + delete outputs[i]; + } +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/src/infer_test.cc b/mindspore/lite/test/ut/src/infer_test.cc index 0d9073545d..263d76c800 100644 --- a/mindspore/lite/test/ut/src/infer_test.cc +++ b/mindspore/lite/test/ut/src/infer_test.cc @@ -40,18 +40,15 @@ TEST_F(InferTest, TestConvNode) { node->inputIndex = {0, 1}; node->outputIndex = {2}; node->primitive = std::make_unique<schema::PrimitiveT>(); - node->primitive->value.type = schema::PrimitiveType_Conv2D; - auto primitive = new schema::Conv2DT; - primitive->padMode = schema::PadMode_SAME_UPPER; - primitive->channelIn = 3; - primitive->channelOut = 32; + node->primitive->value.type = schema::PrimitiveType_Conv2DFusion; + auto primitive = new schema::Conv2DFusionT; + primitive->pad_mode = schema::PadMode_SAME; + primitive->in_channel = 3; + primitive->out_channel = 32; primitive->format = schema::Format_NHWC; - primitive->strideH = 1; - primitive->strideW = 1; - primitive->kernelH = 3; - primitive->kernelW = 3; - primitive->dilateH = 1; - primitive->dilateW = 1; + primitive->stride = std::vector<int64_t>{1, 1}; + primitive->kernel_size = std::vector<int64_t>{3, 3}; + primitive->dilation = std::vector<int64_t>{1, 1}; node->primitive->value.value = primitive; node->name = "Conv2D"; meta_graph->nodes.emplace_back(std::move(node)); @@ -163,8 +160,8 @@ TEST_F(InferTest, TestAddNode) { node->inputIndex = {0, 1}; node->outputIndex = {2}; node->primitive = std::make_unique<schema::PrimitiveT>(); - node->primitive->value.type = schema::PrimitiveType_Add; - auto primitive = new schema::AddT; + node->primitive->value.type = schema::PrimitiveType_AddFusion; + auto primitive = new schema::AddFusionT; node->primitive->value.value = primitive; node->name = "Add"; meta_graph->nodes.emplace_back(std::move(node)); @@ -254,8 +251,8 @@ TEST_F(InferTest, TestParallelExecutor) { node->inputIndex = {0, 1}; node->outputIndex = {2}; node->primitive = std::make_unique<schema::PrimitiveT>(); - node->primitive->value.type = schema::PrimitiveType_Add; - auto primitive = new schema::AddT; + node->primitive->value.type = schema::PrimitiveType_AddFusion; + auto primitive = new schema::AddFusionT; node->primitive->value.value = primitive; node->name = "Add"; meta_graph->nodes.emplace_back(std::move(node)); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc index 1322c88811..7126a2ad57 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/common/strided_slice_tests.cc @@ -64,7 +64,7 @@ TEST_F(TestStridedSlice, StridedSlice) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); @@ -110,7 +110,7 @@ TEST_F(TestStridedSlice, StridedSliceInt8) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/reduce_fp16_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/reduce_fp16_tests.cc index 013b297950..bd3e5a637f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/reduce_fp16_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16/reduce_fp16_tests.cc @@ -36,7 +36,7 @@ class TestReduceFp16 : public mindspore::CommonTest { std::vector<lite::Tensor *> inputs_{&in_tensor_}; std::vector<lite::Tensor *> outputs_{&out_tensor_}; ReduceParameter param_ = {{}}; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat16, schema::PrimitiveType_Reduce}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat16, schema::PrimitiveType_ReduceFusion}; lite::InnerContext ctx_ = lite::InnerContext(); kernel::KernelCreator creator_ = nullptr; kernel::LiteKernel *kernel_ = nullptr; @@ -68,13 +68,13 @@ void TestReduceFp16::Prepare(const std::vector<int> &input_shape, const std::vec param_.num_axes_ = num_axis; param_.mode_ = mode; - desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat16, schema::PrimitiveType_Reduce}; + desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat16, schema::PrimitiveType_ReduceFusion}; ctx_ = lite::InnerContext(); ctx_.thread_num_ = thread_num; ASSERT_EQ(lite::RET_OK, ctx_.Init()); creator_ = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator_, nullptr); - kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc, nullptr); + kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc); ASSERT_NE(kernel_, nullptr); } TEST_F(TestReduceFp16, Mean) { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16_grad/activation_grad_fp16_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16_grad/activation_grad_fp16_test.cc index 73ff167f68..0eec475d2b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16_grad/activation_grad_fp16_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16_grad/activation_grad_fp16_test.cc @@ -142,58 +142,4 @@ TEST_F(TestActGradFp16, SigmoidGradFp16) { MS_LOG(INFO) << "SigmoidGradFp16 passed"; } -TEST_F(TestActGradFp16, LogGradFp16) { - size_t output_data_size = 50; - size_t input_size; - std::string input_path = "./test_data/activationGrad/log_x_50.bin"; - auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size)); - ASSERT_NE(input_data, nullptr); - - std::string yt_path = "./test_data/activationGrad/log_yt_50.bin"; - auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size)); - ASSERT_NE(yt_data, nullptr); - - std::string output_path = "./test_data/activationGrad/log_out_50.bin"; - auto ref_data = reinterpret_cast<const float *>(mindspore::lite::ReadFile(output_path.c_str(), &input_size)); - ASSERT_NE(ref_data, nullptr); - EXPECT_EQ(input_size, output_data_size * sizeof(float)); - - auto yt_buf = new float16_t[output_data_size]; - auto input_buf = new float16_t[output_data_size]; - auto output_buf = new float16_t[output_data_size]; - - for (int i = 0; i < output_data_size; i++) { - yt_buf[i] = (float16_t)yt_data[i]; - input_buf[i] = (float16_t)input_data[i]; - } - - Fp16LogGrad(yt_buf, input_buf, 50, output_buf); - - int res = 0; - float error = 0; - std::cout << "======Compare with reference data======" << std::endl; - for (int i = 0; i < output_data_size; i++) { - float diff = std::fabs(static_cast<float>(output_buf[i]) - ref_data[i]); - if (diff > 0.00001) { - error += diff; - } - } - error /= static_cast<float>(output_data_size); - if (error > error_bound) { - printf("error%f while error_bound=%f\n", error, error_bound); - res = 1; - } - - EXPECT_EQ(res, 0); - - delete[] output_buf; - delete[] yt_buf; - delete[] input_buf; - delete[] ref_data; - delete[] yt_data; - delete[] input_data; - - MS_LOG(INFO) << "LogGradFp16 passed"; -} - } // namespace mindspore diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad_tests.cc new file mode 100644 index 0000000000..40d7a4163a --- /dev/null +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp16_grad/arithmetic_fp16_self_grad_tests.cc @@ -0,0 +1,87 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include <iostream> +#include <vector> +#ifdef ENABLE_NEON +#include <arm_neon.h> +#endif +#include "src/common/log_adapter.h" +#include "common/common_test.h" +#include "src/common/file_utils.h" +#include "nnacl/fp16_grad/arithmetic_self_grad.h" + +namespace mindspore { +class TestArithmeticSelfGradFp16 : public mindspore::CommonTest { + public: + TestArithmeticSelfGradFp16() {} + float error_bound = 1e-3; +}; + +TEST_F(TestArithmeticSelfGradFp16, LogGradFp16) { + size_t output_data_size = 50; + size_t input_size; + std::string input_path = "./test_data/activationGrad/log_x_50.bin"; + auto input_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(input_path.c_str(), &input_size)); + ASSERT_NE(input_data, nullptr); + + std::string yt_path = "./test_data/activationGrad/log_yt_50.bin"; + auto yt_data = reinterpret_cast<float *>(mindspore::lite::ReadFile(yt_path.c_str(), &input_size)); + ASSERT_NE(yt_data, nullptr); + + std::string output_path = "./test_data/activationGrad/log_out_50.bin"; + auto ref_data = reinterpret_cast<const float *>(mindspore::lite::ReadFile(output_path.c_str(), &input_size)); + ASSERT_NE(ref_data, nullptr); + EXPECT_EQ(input_size, output_data_size * sizeof(float)); + + auto yt_buf = new float16_t[output_data_size]; + auto input_buf = new float16_t[output_data_size]; + auto output_buf = new float16_t[output_data_size]; + + for (int i = 0; i < output_data_size; i++) { + yt_buf[i] = (float16_t)yt_data[i]; + input_buf[i] = (float16_t)input_data[i]; + } + + Fp16LogGrad(yt_buf, input_buf, 50, output_buf); + + int res = 0; + float error = 0; + std::cout << "======Compare with reference data======" << std::endl; + for (int i = 0; i < output_data_size; i++) { + float diff = std::fabs(static_cast<float>(output_buf[i]) - ref_data[i]); + if (diff > 0.00001) { + error += diff; + } + } + error /= static_cast<float>(output_data_size); + if (error > error_bound) { + printf("error%f while error_bound=%f\n", error, error_bound); + res = 1; + } + + EXPECT_EQ(res, 0); + + delete[] output_buf; + delete[] yt_buf; + delete[] input_buf; + delete[] ref_data; + delete[] yt_data; + delete[] input_data; + + MS_LOG(INFO) << "LogGradFp16 passed"; +} + +} // namespace mindspore diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc index 7ac5752430..6786dbe45d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/activation_fp32_test.cc @@ -126,7 +126,7 @@ TEST_F(TestActivationFp32, HSwishFp32) { ctx.thread_num_ = 7; ASSERT_EQ(lite::RET_OK, ctx.Init()); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor.shape(); kernel->Run(); @@ -170,7 +170,7 @@ TEST_F(TestActivationFp32, HardTanh1) { ctx.thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx.Init()); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor.shape(); kernel->Run(); @@ -214,7 +214,7 @@ TEST_F(TestActivationFp32, HardTanh2) { ctx.thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx.Init()); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor.shape(); kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc index 442b0c03a4..04b9c467c5 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/batchnorm_fp32_tests.cc @@ -59,7 +59,7 @@ TEST_F(TestBatchnormFp32, BNTest) { ctx.thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx.Init()); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor.shape(); kernel->Run(); @@ -116,7 +116,7 @@ TEST_F(TestBatchnormFp32, FusedBNTest) { ctx.thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx.Init()); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc); ASSERT_NE(kernel, nullptr); kernel->Run(); @@ -167,7 +167,7 @@ TEST_F(TestBatchnormFp32, easyTest) { ctx.thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx.Init()); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc); ASSERT_NE(kernel, nullptr); kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc index f84a7e5ede..bf9430e450 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/constant_of_shape_fp32_test.cc @@ -57,7 +57,7 @@ TEST_F(TestConstantOfShapeFp32, Simple) { ctx->thread_num_ = 4; ASSERT_EQ(lite::RET_OK, ctx->Init()); kernel::ConstantOfShapeCPUKernel *op = - new kernel::ConstantOfShapeCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx, nullptr); + new kernel::ConstantOfShapeCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx); op->Init(); op->Run(); float correct[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc index fc61ba3eea..dc9afdfa4d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/convolution_depthwise_fp32_tests.cc @@ -113,11 +113,10 @@ TEST_F(TestConvolutionDwFp32, ConvDwFp32Accuracy) { InitConvDwCreator(&inputs, &outputs, conv_param); // register op - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_DepthwiseConv2D}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Conv2DFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - kernel::LiteKernel *kernel = - creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), ctx, desc, nullptr); + kernel::LiteKernel *kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), ctx, desc); ASSERT_NE(kernel, nullptr); // op run kernel->Run(); @@ -165,11 +164,10 @@ TEST_F(TestConvolutionDwFp32, ConvDwFp32Performance) { InitConvDwCreator(&inputs, &outputs, conv_param); // register op - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_DepthwiseConv2D}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Conv2DFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - kernel::LiteKernel *kernel = - creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), ctx, desc, nullptr); + kernel::LiteKernel *kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), ctx, desc); ASSERT_NE(kernel, nullptr); /* running warm up */ diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc index 605dc7cc49..f26e6e535f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/crop_fp32_test.cc @@ -273,7 +273,7 @@ TEST_F(CropTestFp32, CropTest11) { crop_param.axis_ = 2; crop_param.offset_[0] = 0; crop_param.offset_[1] = 0; - auto kernel = new kernel::CropCPUKernel(reinterpret_cast<OpParameter *>(&crop_param), inputs, outputs, ctx, nullptr); + auto kernel = new kernel::CropCPUKernel(reinterpret_cast<OpParameter *>(&crop_param), inputs, outputs, ctx); kernel->Init(); kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc index 716931bbcd..b4cfb75286 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/deconvolution_fp32_tests.cc @@ -482,7 +482,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest1) { float *correct; int total_size = DeConvTestInit1(&inputs_, &outputs_, deconv_param, &correct); auto *deconv = - new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx, nullptr); + new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx); deconv->Init(); deconv->Run(); @@ -550,7 +550,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest2) { ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); auto *deconv = - new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx, nullptr); + new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx); deconv->Init(); deconv->Run(); @@ -628,7 +628,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest3) { ctx->thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx->Init()); auto *deconv = - new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx, nullptr); + new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx); deconv->Init(); deconv->Run(); @@ -697,7 +697,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest4) { ctx->thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx->Init()); auto *deconv = - new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx, nullptr); + new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx); deconv->Init(); deconv->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc index 2c8c0ca8a2..bd3ef00e34 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/detection_post_process_test.cc @@ -125,7 +125,7 @@ TEST_F(TestDetectionPostProcessFp32, Fast) { ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); kernel::DetectionPostProcessCPUKernel *op = - new kernel::DetectionPostProcessCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx, nullptr); + new kernel::DetectionPostProcessCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx); op->Init(); op->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc index 52255efd4c..6be70d436b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/elu_fp32_test.cc @@ -54,7 +54,7 @@ TEST_F(TestEluFp32, EluTest) { ctx->thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx->Init()); kernel::EluCPUKernel *elu = - new kernel::EluCPUKernel(reinterpret_cast<OpParameter *>(elu_param_), inputs_, outputs_, ctx, nullptr); + new kernel::EluCPUKernel(reinterpret_cast<OpParameter *>(elu_param_), inputs_, outputs_, ctx); elu->Init(); elu->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc index a96d6df352..2987e1f69b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/embedding_lookup_fp32_test.cc @@ -69,7 +69,7 @@ TEST_F(TestEmbeddingLookupFp32, ElTest) { ctx->thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx->Init()); kernel::EmbeddingLookupCPUKernel *el = new kernel::EmbeddingLookupCPUKernel( - reinterpret_cast<OpParameter *>(embedding_lookup_param_), inputs_, outputs_, ctx, nullptr); + reinterpret_cast<OpParameter *>(embedding_lookup_param_), inputs_, outputs_, ctx); el->Init(); el->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc index 4e80dfa2de..4c703da6d6 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/fullconnection_fp32_tests.cc @@ -22,6 +22,7 @@ #include "src/common/file_utils.h" #include "src/common/log_adapter.h" #include "src/runtime/kernel/arm/fp32/fullconnection_fp32.h" +#include "src/runtime/infer_manager.h" namespace mindspore { using mindspore::lite::Tensor; @@ -67,6 +68,9 @@ int FcTestInit1(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor * matmal_param->a_transpose_ = false; matmal_param->has_bias_ = true; matmal_param->act_type_ = ActType_No; + matmal_param->op_parameter_.type_ = 67; + matmal_param->op_parameter_.infer_flag_ = true; + KernelInferShape(*inputs_, outputs_, reinterpret_cast<OpParameter *>(matmal_param)); return out_t->ElementsNum(); } @@ -79,8 +83,7 @@ TEST_F(TestFcFp32, FcTest1) { auto *ctx = new lite::InnerContext; ctx->thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto *fc = - new kernel::FullconnectionCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx, nullptr); + auto *fc = new kernel::FullconnectionCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx); fc->Init(); fc->Run(); @@ -125,6 +128,9 @@ int FcTestInit2(std::vector<lite::Tensor *> *inputs_, std::vector<lite::Tensor * matmal_param->a_transpose_ = false; matmal_param->has_bias_ = true; matmal_param->act_type_ = ActType_No; + matmal_param->op_parameter_.type_ = 67; + matmal_param->op_parameter_.infer_flag_ = true; + KernelInferShape(*inputs_, outputs_, reinterpret_cast<OpParameter *>(matmal_param)); return out_t->ElementsNum(); } @@ -137,8 +143,7 @@ TEST_F(TestFcFp32, FcTest2) { auto *ctx = new lite::InnerContext; ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto *fc = - new kernel::FullconnectionCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx, nullptr); + auto *fc = new kernel::FullconnectionCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx); fc->Init(); fc->Run(); @@ -187,8 +192,7 @@ TEST_F(TestFcFp32, FcTest3) { auto *ctx = new lite::InnerContext; ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto *fc = - new kernel::FullconnectionCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx, nullptr); + auto *fc = new kernel::FullconnectionCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx); fc->Init(); struct timeval start, end; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/l2norm_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/l2norm_fp32_test.cc index 1c45310307..f90f293e8f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/l2norm_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/l2norm_fp32_test.cc @@ -63,13 +63,13 @@ void TestL2NormFp32::Init(const std::vector<int> &input_shape, const std::vector param_.epsilon_ = 1e-6; param_.act_type_ = activation_type; - desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_L2Norm}; + desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_L2NormalizeFusion}; ctx_ = lite::InnerContext(); ctx_.thread_num_ = thread_num; ASSERT_EQ(lite::RET_OK, ctx_.Init()); creator_ = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator_, nullptr); - kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc, nullptr); + kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc); ASSERT_NE(kernel_, nullptr); } diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lsh_projection_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lsh_projection_fp32_tests.cc index eb1c8d788c..111fda99d0 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lsh_projection_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lsh_projection_fp32_tests.cc @@ -63,7 +63,7 @@ TEST_F(TestLshProjectionFp32, Dense1DInputs) { auto ctx = std::make_shared<lite::InnerContext>(); ctx->thread_num_ = 3; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); @@ -103,7 +103,7 @@ TEST_F(TestLshProjectionFp32, Sparse1DInputs) { auto ctx = std::make_shared<lite::InnerContext>(); ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); @@ -147,7 +147,7 @@ TEST_F(TestLshProjectionFp32, Sparse3DInputs) { auto ctx = std::make_shared<lite::InnerContext>(); ctx->thread_num_ = 3; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc index 954d64b704..9929a697a3 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/lstm_fp32_tests.cc @@ -150,11 +150,10 @@ TEST_F(LstmFp32, LstmForwardFp32Accuracy) { InitLstmForwardCreator(&inputs, &outputs, lstm_param); // register op - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, mindspore::schema::PrimitiveType_Lstm}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, mindspore::schema::PrimitiveType_LSTM}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - kernel::LiteKernel *kernel = - creator(inputs, outputs, reinterpret_cast<OpParameter *>(lstm_param), ctx, desc, nullptr); + kernel::LiteKernel *kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(lstm_param), ctx, desc); ASSERT_NE(kernel, nullptr); // op run kernel->Run(); @@ -299,11 +298,10 @@ TEST_F(LstmFp32, LstmBackwardFp32Accuracy) { InitLstmBackwardCreator(&inputs, &outputs, lstm_param); // register op - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, mindspore::schema::PrimitiveType_Lstm}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, mindspore::schema::PrimitiveType_LSTM}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - kernel::LiteKernel *kernel = - creator(inputs, outputs, reinterpret_cast<OpParameter *>(lstm_param), ctx, desc, nullptr); + kernel::LiteKernel *kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(lstm_param), ctx, desc); ASSERT_NE(kernel, nullptr); // op run kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc index 5de567f478..8cc9d9fbda 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/matmul_fp32_tests.cc @@ -135,7 +135,7 @@ TEST_F(TestMatMulFp32, simple) { auto ctx = new lite::InnerContext; ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto mm = new kernel::MatmulCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx, nullptr); + auto mm = new kernel::MatmulCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx); mm->Init(); mm->Run(); float correct[] = {-0.1256939023733139, -0.07744802534580231, 0.07410638779401779, @@ -168,7 +168,7 @@ TEST_F(TestMatMulFp32, simple_bias) { auto ctx = new lite::InnerContext; ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto mm = new kernel::MatmulCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx, nullptr); + auto mm = new kernel::MatmulCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx); mm->Init(); mm->Run(); float correct[] = {-0.1256939023733139 + 1, -0.07744802534580231 + 2, 0.07410638779401779 + 3, @@ -220,7 +220,7 @@ TEST_F(TestMatMulFp32, simple2) { auto ctx = new lite::InnerContext; ctx->thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto mm = new kernel::MatmulCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx, nullptr); + auto mm = new kernel::MatmulCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx); mm->Init(); mm->Run(); float correct[] = { @@ -290,7 +290,7 @@ TEST_F(TestMatMulFp32, simple_transb) { auto ctx = new lite::InnerContext; ctx->thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto mm = new kernel::MatmulCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx, nullptr); + auto mm = new kernel::MatmulCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx); mm->Init(); mm->Run(); float correct[] = {0.00533547, 0.002545945, 0.062974121, -0.445441471, -0.246223617, -0.142070031}; @@ -340,7 +340,7 @@ TEST_F(TestMatMulFp32, batch) { auto ctx = new lite::InnerContext; ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto mm = new kernel::MatmulCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx, nullptr); + auto mm = new kernel::MatmulCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx); mm->Init(); mm->Run(); float correct[] = {21.38518524169922, -14.514888763427734, -11.040614128112793, 16.91403579711914, diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/non_max_suppression_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/non_max_suppression_fp32_tests.cc index cac9120e0b..6cdecae50c 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/non_max_suppression_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/non_max_suppression_fp32_tests.cc @@ -86,7 +86,7 @@ void TestNMSFp32::Init(const std::vector<int> &box_tensor_shape, float *box_data ASSERT_EQ(lite::RET_OK, ctx_.Init()); creator_ = lite::KernelRegistry::GetInstance()->GetCreator(desc_); ASSERT_NE(creator_, nullptr); - kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc_, nullptr); + kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc_); ASSERT_NE(kernel_, nullptr); } diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/one_hot_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/one_hot_fp32_test.cc index d7c7a240fa..5ceef9686d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/one_hot_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/one_hot_fp32_test.cc @@ -74,7 +74,7 @@ void TestOneHotFp32::Prepare(const std::vector<int> &indices_shape, int *indices ctx_.thread_num_ = thread_num; ctx_.Init(); creator_ = lite::KernelRegistry::GetInstance()->GetCreator(desc); - kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(param_), &ctx_, desc, nullptr); + kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(param_), &ctx_, desc); } // 3 3 axis -1 -> 3 3 4 diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/pad_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/pad_fp32_test.cc index 99cb437480..a9a5be9202 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/pad_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/pad_fp32_test.cc @@ -45,7 +45,7 @@ class TestPadFp32 : public mindspore::CommonTest { PadParameter param_; std::vector<lite::Tensor *> inputs_{&in_tensor_}; std::vector<lite::Tensor *> outputs_{&out_tensor_}; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Pad}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_PadFusion}; lite::InnerContext ctx_ = lite::InnerContext(); kernel::KernelCreator creator_ = nullptr; kernel::LiteKernel *kernel_ = nullptr; @@ -82,13 +82,13 @@ void TestPadFp32::Prepare(const std::vector<int> &input_shape, const std::vector inputs_.emplace_back(&paddings_tensor_); } - desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Pad}; + desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_PadFusion}; ctx_ = lite::InnerContext(); ctx_.thread_num_ = thread_num; ctx_.Init(); creator_ = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator_, nullptr); - kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc, nullptr); + kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc); ASSERT_NE(kernel_, nullptr); } diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc index 81aa537599..c2a1beffc0 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/power_fp32_tests.cc @@ -75,7 +75,7 @@ TEST_F(TestPowerFp32, Simple) { auto ctx = new lite::InnerContext; ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto *op = new kernel::PowerCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx, nullptr); + auto *op = new kernel::PowerCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx); op->Init(); op->Run(); float correct[] = {1, 64, 2187, 65536}; @@ -99,7 +99,7 @@ TEST_F(TestPowerFp32, Broadcast) { auto ctx = new lite::InnerContext; ctx->thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto *op = new kernel::PowerCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx, nullptr); + auto *op = new kernel::PowerCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx); op->Init(); op->Run(); float correct[] = {1, 4, 9, 16}; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc index bb1a294d0b..7da0c81eda 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reduce_fp32_tests.cc @@ -53,7 +53,7 @@ class TestReduceFp32 : public mindspore::CommonTest { Tensor out_tensor_; std::vector<Tensor *> inputs{&in_tensor_}; std::vector<Tensor *> outputs{&out_tensor_}; - kernel::KernelKey desc_ = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Reduce}; + kernel::KernelKey desc_ = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_ReduceFusion}; kernel::KernelCreator creator_ = nullptr; lite::InnerContext *ctx_ = nullptr; kernel::LiteKernel *kernel_ = nullptr; @@ -89,7 +89,7 @@ void TestReduceFp32::Prepare(const std::vector<int> &in_shape, const std::vector ctx_->allocator = Allocator::Create(); } ctx_->thread_num_ = thread_num_; - kernel_ = creator_(inputs, outputs, reinterpret_cast<OpParameter *>(&param_), ctx_, desc_, nullptr); + kernel_ = creator_(inputs, outputs, reinterpret_cast<OpParameter *>(&param_), ctx_, desc_); } TEST_F(TestReduceFp32, Mean1) { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc index b6b9b216b1..bc589e7c24 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_bilinear_fp32_tests.cc @@ -69,7 +69,7 @@ void TestResizeBilinearFp32::Prepare(const std::vector<int> &input_shape, const ASSERT_EQ(lite::RET_OK, ctx_.Init()); creator_ = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator_, nullptr); - kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc, nullptr); + kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc); ASSERT_NE(kernel_, nullptr); } diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc index d7e4a2eadc..0ee0cba94f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/resize_nearest_neighbor_fp32_tests.cc @@ -64,7 +64,7 @@ void TestResizeNearestNeighborFp32::Prepare(const std::vector<int> &input_shape, ASSERT_EQ(lite::RET_OK, ctx_.Init()); creator_ = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator_, nullptr); - kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc, nullptr); + kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc); ASSERT_NE(kernel_, nullptr); } // 1*1 -> 1*1 diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc index cab70b2c5e..4d7fff18a3 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/reverse_sequence_fp32_tests.cc @@ -51,7 +51,7 @@ TEST_F(TestReverseSequenceFp32, BatchLessSeq) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); EXPECT_NE(kernel, nullptr); auto ret = kernel->Run(); @@ -95,7 +95,7 @@ TEST_F(TestReverseSequenceFp32, BatchGreaterSeq) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); EXPECT_NE(kernel, nullptr); auto ret = kernel->Run(); @@ -139,7 +139,7 @@ TEST_F(TestReverseSequenceFp32, BatchSeqNotAdjacent) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); EXPECT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc index d018ec9818..45f0495e96 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/roi_pooling_fp32_tests.cc @@ -62,7 +62,7 @@ TEST_F(TestROIPoolingFp32, Simple) { auto ctx = new lite::InnerContext; ctx->thread_num_ = 3; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto *op = new kernel::ROIPoolingCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx, nullptr); + auto *op = new kernel::ROIPoolingCPUKernel(reinterpret_cast<OpParameter *>(param), inputs_, outputs_, ctx); op->Init(); op->Run(); float correct[] = {25, 31, 34, 35, 25, 31, 34, 35}; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/scale_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/scale_fp32_tests.cc index f66464beca..4f9927237e 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/scale_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/scale_fp32_tests.cc @@ -48,7 +48,7 @@ class TestScaleFp32 : public mindspore::CommonTest { ScaleParameter param_; std::vector<lite::Tensor *> inputs_{&in_tensor_, &scale_tensor_, &offset_tensor_}; std::vector<lite::Tensor *> outputs_{&out_tensor_}; - kernel::KernelKey desc_ = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Scale}; + kernel::KernelKey desc_ = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_ScaleFusion}; lite::InnerContext ctx_ = lite::InnerContext(); kernel::KernelCreator creator_ = nullptr; kernel::LiteKernel *kernel_ = nullptr; @@ -89,7 +89,7 @@ void TestScaleFp32::Prepare(const std::vector<int> &input_shape, const std::vect ctx_.Init(); creator_ = lite::KernelRegistry::GetInstance()->GetCreator(desc_); ASSERT_NE(creator_, nullptr); - kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc_, nullptr); + kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc_); ASSERT_NE(kernel_, nullptr); } diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc index 4778dea26c..245426f51b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/skip_gram_fp32.cc @@ -61,7 +61,7 @@ TEST_F(TestSkipGramFp32, ElTest) { ctx->thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx->Init()); kernel::SkipGramCPUKernel *el = - new kernel::SkipGramCPUKernel(reinterpret_cast<OpParameter *>(skip_gram_param_), inputs_, outputs_, ctx, nullptr); + new kernel::SkipGramCPUKernel(reinterpret_cast<OpParameter *>(skip_gram_param_), inputs_, outputs_, ctx); el->Init(); el->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/softmax_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/softmax_tests.cc index 856e61ce5f..c2b1ab5396 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/softmax_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/softmax_tests.cc @@ -36,14 +36,14 @@ TEST_F(TestSoftmaxFp32, 001) { std::vector<lite::Tensor *> outputs = {&out_tensor}; SoftmaxParameter parameter = {{}, -1, {2, 1, 1, 5}, 10, 4}; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_SoftMax}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Softmax}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc index d3bc8f4047..82c53c6568 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/space_to_depth_fp32_tests.cc @@ -82,7 +82,7 @@ TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc); ASSERT_NE(kernel, nullptr); kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32_tests.cc index aabf787a41..8a985af705 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/sparse_to_dense_fp32_tests.cc @@ -88,7 +88,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test1) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -172,7 +172,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -256,7 +256,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test3) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -338,7 +338,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test4) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -420,7 +420,7 @@ TEST_F(TestSparseToDenseFp32, SparseToDense_test5) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/strided_slice_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/strided_slice_fp32_tests.cc index f313878ceb..6b2345468b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/strided_slice_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/strided_slice_fp32_tests.cc @@ -157,7 +157,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice3) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(strided_slice_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(strided_slice_param), ctx, desc); ASSERT_NE(kernel, nullptr); kernel->Run(); delete ctx; @@ -207,7 +207,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice4) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(strided_slice_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(strided_slice_param), ctx, desc); ASSERT_NE(kernel, nullptr); kernel->Run(); delete ctx; @@ -264,7 +264,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice5) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(strided_slice_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(strided_slice_param), ctx, desc); ASSERT_NE(kernel, nullptr); kernel->Run(); delete ctx; @@ -321,7 +321,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice6) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(strided_slice_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(strided_slice_param), ctx, desc); ASSERT_NE(kernel, nullptr); kernel->Run(); delete ctx; @@ -370,7 +370,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice7) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(strided_slice_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(strided_slice_param), ctx, desc); ASSERT_NE(kernel, nullptr); kernel->Run(); delete ctx; @@ -427,7 +427,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice8) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(strided_slice_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(strided_slice_param), ctx, desc); ASSERT_NE(kernel, nullptr); kernel->Run(); delete ctx; @@ -577,7 +577,7 @@ TEST_F(TestStridedSliceFp32, StridedSlice9) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(strided_slice_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(strided_slice_param), ctx, desc); ASSERT_NE(kernel, nullptr); kernel->Run(); delete ctx; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc index 167d2cbff0..3e9e33c783 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/tile_fp32_tests.cc @@ -46,14 +46,14 @@ TEST_F(TestTileFp32, Tile) { parameter.out_strides_[0] = 6; parameter.out_strides_[1] = 1; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Tile}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_TileFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); EXPECT_NE(creator, nullptr); auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); EXPECT_NE(kernel, nullptr); auto ret = kernel->Run(); @@ -89,7 +89,7 @@ TEST_F(TestTileFp32, SimpleTile1) { parameter.out_strides_[0] = 2; parameter.out_strides_[1] = 1; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Tile}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_TileFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); EXPECT_NE(creator, nullptr); @@ -98,7 +98,7 @@ TEST_F(TestTileFp32, SimpleTile1) { ASSERT_EQ(lite::RET_OK, ctx->Init()); auto context = ctx.get(); context->thread_num_ = 2; - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), context, desc); EXPECT_NE(kernel, nullptr); auto ret = kernel->Run(); @@ -134,7 +134,7 @@ TEST_F(TestTileFp32, SimpleTile2) { parameter.out_strides_[0] = 4; parameter.out_strides_[1] = 1; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Tile}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_TileFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); EXPECT_NE(creator, nullptr); @@ -143,7 +143,7 @@ TEST_F(TestTileFp32, SimpleTile2) { ASSERT_EQ(lite::RET_OK, ctx->Init()); auto context = ctx.get(); context->thread_num_ = 2; - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), context, desc); EXPECT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc index d73a8998b8..705b017a2f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/topk_fp32_tests.cc @@ -41,14 +41,14 @@ TEST_F(TestTopKFp32, TopK) { std::vector<lite::Tensor *> outputs = {&out_tensor0, &out_tensor1}; TopkParameter parameter = {{}, 2, true, 3, 4}; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_TopK}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_TopKFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc index a409819f7e..c0c63da22c 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/transpose_fp32_tests.cc @@ -202,7 +202,7 @@ TEST_F(TestTransposeFp32, TransposeFp32_test5) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&param), &ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&param), &ctx, desc); ASSERT_NE(kernel, nullptr); kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc index 0246336a73..cb2582d07d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unique_fp32_tests.cc @@ -47,7 +47,7 @@ TEST_F(TestUniqueFp32, Unique) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, &parameter, ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, &parameter, ctx.get(), desc); EXPECT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc index c77e05eeda..0eb1ca0099 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/unstack_fp32_tests.cc @@ -53,7 +53,7 @@ TEST_F(TestUnstackFp32, Unstack) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); EXPECT_NE(kernel, nullptr); auto ret = kernel->Run(); @@ -101,7 +101,7 @@ TEST_F(TestUnstackFp32, Unstack2) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); EXPECT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/upsample_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/upsample_fp32_tests.cc deleted file mode 100644 index 4145bcfb45..0000000000 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/upsample_fp32_tests.cc +++ /dev/null @@ -1,247 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include <vector> -#include "common/common_test.h" -#include "mindspore/lite/src/kernel_registry.h" -#include "mindspore/lite/src/lite_kernel.h" -#include "mindspore/lite/src/tensor.h" -#include "nnacl/upsample_parameter.h" -#include "schema/ops_generated.h" -#include "src/ops/upsample.h" -using mindspore::schema::Format_NHWC; - -namespace mindspore { - -class TestUpsampleFp32 : public mindspore::CommonTest { - public: - TestUpsampleFp32() = default; - void Prepare(const std::vector<int> &input_shape, float *input_data, float *scale_data, float *output_data, - schema::ResizeMethod method, const int thread_num); - - void TearDown() override; - - public: - float err_tol = 1e-5; - lite::Tensor in_tensor_; - lite::Tensor scale_tensor_; - lite::Tensor out_tensor_; - std::vector<lite::Tensor *> inputs_{&in_tensor_, &scale_tensor_}; - std::vector<lite::Tensor *> outputs_{&out_tensor_}; - UpsampleParameter *param_ = nullptr; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Upsample}; - lite::InnerContext ctx_ = lite::InnerContext(); - kernel::KernelCreator creator_ = nullptr; - kernel::LiteKernel *kernel_ = nullptr; - lite::Upsample *upsample_ = nullptr; -}; - -void TestUpsampleFp32::TearDown() { - in_tensor_.set_data(nullptr); - scale_tensor_.set_data(nullptr); - out_tensor_.set_data(nullptr); - delete upsample_; - delete kernel_; -} - -void TestUpsampleFp32::Prepare(const std::vector<int> &input_shape, float *input_data, float *scale_data, - float *output_data, schema::ResizeMethod method, const int thread_num) { - in_tensor_.set_data_type(kNumberTypeFloat32); - in_tensor_.set_format(Format_NHWC); - in_tensor_.set_shape(input_shape); - in_tensor_.set_data(input_data); - scale_tensor_.set_data_type(kNumberTypeFloat32); - scale_tensor_.set_data(scale_data); - scale_tensor_.set_shape({4}); - out_tensor_.set_data_type(kNumberTypeFloat32); - out_tensor_.set_data(output_data); - upsample_ = new (std::nothrow) lite::Upsample; - upsample_->InferShape(inputs_, outputs_); - param_ = reinterpret_cast<UpsampleParameter *>(malloc(sizeof(UpsampleParameter))); - param_->method_ = static_cast<int>(method); - desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_Upsample}; - ctx_ = lite::InnerContext(); - ctx_.thread_num_ = thread_num; - - ASSERT_EQ(lite::RET_OK, ctx_.Init()); - creator_ = lite::KernelRegistry::GetInstance()->GetCreator(desc); - ASSERT_NE(creator_, nullptr); - kernel_ = creator_(inputs_, outputs_, reinterpret_cast<OpParameter *>(param_), &ctx_, desc, nullptr); - ASSERT_NE(kernel_, nullptr); -} - -// 2*2 -> 4*4 1thread -TEST_F(TestUpsampleFp32, test1) { - float input_data[] = {0.0, 1.0, 2.0, 3.0}; - float output_data[16] = {0.0f}; - std::vector<int> input_shape = {1, 2, 2, 1}; - float scale_data[] = {1.0f, 2.0f, 2.0f, 1.0f}; - std::vector<float> expect = {0.0, 0.5, 1.0, 1.0, 1.0, 1.5, 2.0, 2.0, 2.0, 2.5, 3.0, 3.0, 2.0, 2.5, 3.0, 3.0}; - - Prepare(input_shape, input_data, scale_data, output_data, schema::ResizeMethod_LINEAR, 1); - auto ret = kernel_->Run(); - EXPECT_EQ(0, ret); - auto output_size = 16; - ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol)); -} - -// 2*2 -> 4*4 2thread -TEST_F(TestUpsampleFp32, test2) { - float input_data[] = {0.0, 1.0, 2.0, 3.0}; - float output_data[16] = {0.0f}; - std::vector<int> input_shape = {1, 2, 2, 1}; - float scale_data[] = {1.0f, 2.0f, 2.0f, 1.0f}; - std::vector<float> expect = {0.0, 0.5, 1.0, 1.0, 1.0, 1.5, 2.0, 2.0, 2.0, 2.5, 3.0, 3.0, 2.0, 2.5, 3.0, 3.0}; - - Prepare(input_shape, input_data, scale_data, output_data, schema::ResizeMethod_LINEAR, 2); - auto ret = kernel_->Run(); - EXPECT_EQ(0, ret); - auto output_size = 16; - ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol)); -} - -// 2*2*2*5 -> 2*4*4*5 thread num 1 -TEST_F(TestUpsampleFp32, test3) { - float input_data[] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, - 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, - 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0}; - float output_data[160] = {0}; - std::vector<int> input_shape = {2, 2, 2, 5}; - float scale_data[] = {1.0f, 2.0f, 2.0f, 1.0f}; - std::vector<float> expect = { - 0.0, 1.0, 2.0, 3.0, 4.0, 2.5, 3.5, 4.5, 5.5, 6.5, 5.0, 6.0, 7.0, 8.0, 9.0, 5.0, 6.0, 7.0, - 8.0, 9.0, 5.0, 6.0, 7.0, 8.0, 9.0, 7.5, 8.5, 9.5, 10.5, 11.5, 10.0, 11.0, 12.0, 13.0, 14.0, 10.0, - 11.0, 12.0, 13.0, 14.0, 10.0, 11.0, 12.0, 13.0, 14.0, 12.5, 13.5, 14.5, 15.5, 16.5, 15.0, 16.0, 17.0, 18.0, - 19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 10.0, 11.0, 12.0, 13.0, 14.0, 12.5, 13.5, 14.5, 15.5, 16.5, 15.0, 16.0, - 17.0, 18.0, 19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 22.5, 23.5, 24.5, 25.5, 26.5, - 25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 27.5, 28.5, 29.5, - 30.5, 31.5, 30.0, 31.0, 32.0, 33.0, 34.0, 30.0, 31.0, 32.0, 33.0, 34.0, 30.0, 31.0, 32.0, 33.0, 34.0, 32.5, - 33.5, 34.5, 35.5, 36.5, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0, 30.0, 31.0, 32.0, 33.0, - 34.0, 32.5, 33.5, 34.5, 35.5, 36.5, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0}; - auto output_size = 160; - - Prepare(input_shape, input_data, scale_data, output_data, schema::ResizeMethod_LINEAR, 1); - auto ret = kernel_->Run(); - EXPECT_EQ(0, ret); - - ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol)); -} - -// 2*2*2*5 -> 2*4*4*5 thread_num 2 -TEST_F(TestUpsampleFp32, test4) { - float input_data[] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, - 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, - 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0}; - float output_data[160] = {0}; - std::vector<int> input_shape = {2, 2, 2, 5}; - std::vector<float> expect = { - 0.0, 1.0, 2.0, 3.0, 4.0, 2.5, 3.5, 4.5, 5.5, 6.5, 5.0, 6.0, 7.0, 8.0, 9.0, 5.0, 6.0, 7.0, - 8.0, 9.0, 5.0, 6.0, 7.0, 8.0, 9.0, 7.5, 8.5, 9.5, 10.5, 11.5, 10.0, 11.0, 12.0, 13.0, 14.0, 10.0, - 11.0, 12.0, 13.0, 14.0, 10.0, 11.0, 12.0, 13.0, 14.0, 12.5, 13.5, 14.5, 15.5, 16.5, 15.0, 16.0, 17.0, 18.0, - 19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 10.0, 11.0, 12.0, 13.0, 14.0, 12.5, 13.5, 14.5, 15.5, 16.5, 15.0, 16.0, - 17.0, 18.0, 19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 22.5, 23.5, 24.5, 25.5, 26.5, - 25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 27.5, 28.5, 29.5, - 30.5, 31.5, 30.0, 31.0, 32.0, 33.0, 34.0, 30.0, 31.0, 32.0, 33.0, 34.0, 30.0, 31.0, 32.0, 33.0, 34.0, 32.5, - 33.5, 34.5, 35.5, 36.5, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0, 30.0, 31.0, 32.0, 33.0, - 34.0, 32.5, 33.5, 34.5, 35.5, 36.5, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0}; - float scale_data[] = {1.0f, 2.0f, 2.0f, 1.0f}; - auto output_size = 160; - std::vector<float> output(output_size, 0.0); - Prepare(input_shape, input_data, scale_data, output_data, schema::ResizeMethod_LINEAR, 2); - auto ret = kernel_->Run(); - EXPECT_EQ(0, ret); - - ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol)); -} - -// 1 5 5 5 -> 1 2 2 5 thread num 1 -TEST_F(TestUpsampleFp32, test5) { - float input_data[] = { - 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, - 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, - 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, - 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0, - 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, - 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, - 96.0, 97.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0, 105.0, 106.0, 107.0, 108.0, 109.0, 110.0, 111.0, - 112.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0, 123.0, 124.0}; - float output_data[20] = {0}; - std::vector<int> input_shape = {1, 5, 5, 5}; - std::vector<float> expect = {0.0, 1.0, 2.0, 3.0, 4.0, 12.5, 13.5, 14.5, 15.5, 16.5, - 62.5, 63.5, 64.5, 65.5, 66.5, 75.0, 76.0, 77.0, 78.0, 79.0}; - float scale_data[] = {1.0f, 0.4f, 0.4f, 1.0f}; - auto output_size = 20; - - Prepare(input_shape, input_data, scale_data, output_data, schema::ResizeMethod_LINEAR, 2); - auto ret = kernel_->Run(); - EXPECT_EQ(0, ret); - - ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol)); -} - -// 2 2 2 5 -> 2 4 4 5 thread num 1 -TEST_F(TestUpsampleFp32, test6) { - float input_data[] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, - 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, - 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0}; - float output_data[160] = {0}; - std::vector<int> input_shape = {2, 2, 2, 5}; - std::vector<int> output_shape = {2, 4, 4, 5}; - std::vector<float> expect = { - 0.0, 1.0, 2.0, 3.0, 4.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 5.0, 6.0, 7.0, - 8.0, 9.0, 0.0, 1.0, 2.0, 3.0, 4.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 5.0, - 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 10.0, 11.0, 12.0, 13.0, 14.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 20.0, 21.0, 22.0, 23.0, 24.0, - 25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 20.0, 21.0, 22.0, 23.0, 24.0, 20.0, 21.0, 22.0, - 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 30.0, - 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0, 30.0, 31.0, 32.0, 33.0, - 34.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0}; - size_t output_size = 160; - float scale_data[] = {1.0f, 2.0f, 2.0f, 1.0f}; - Prepare(input_shape, input_data, scale_data, output_data, schema::ResizeMethod_NEAREST, 1); - auto ret = kernel_->Run(); - EXPECT_EQ(0, ret); - - ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol)); -} - -// 2 2 2 5 -> 2 4 4 5 thread num 2 -TEST_F(TestUpsampleFp32, test7) { - float input_data[] = {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, - 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, - 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0}; - float output_data[160] = {0}; - std::vector<int> input_shape = {2, 2, 2, 5}; - std::vector<int> output_shape = {2, 4, 4, 5}; - std::vector<float> expect = { - 0.0, 1.0, 2.0, 3.0, 4.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 5.0, 6.0, 7.0, - 8.0, 9.0, 0.0, 1.0, 2.0, 3.0, 4.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 5.0, - 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, - 19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 10.0, 11.0, 12.0, 13.0, 14.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, - 17.0, 18.0, 19.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 20.0, 21.0, 22.0, 23.0, 24.0, - 25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 20.0, 21.0, 22.0, 23.0, 24.0, 20.0, 21.0, 22.0, - 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 30.0, - 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0, 30.0, 31.0, 32.0, 33.0, - 34.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 35.0, 36.0, 37.0, 38.0, 39.0}; - size_t output_size = 160; - float scale_data[] = {1.0f, 2.0f, 2.0f, 1.0f}; - Prepare(input_shape, input_data, scale_data, output_data, schema::ResizeMethod_NEAREST, 2); - auto ret = kernel_->Run(); - EXPECT_EQ(0, ret); - - ASSERT_EQ(0, CompareOutputData(output_data, expect.data(), output_size, err_tol)); -} -} // namespace mindspore diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc index f6f47086aa..ce5982ff4f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/arithmetic_grad_fp32_tests.cc @@ -16,13 +16,14 @@ #include <iostream> #include <memory> #include <vector> + +#include "schema/inner/model_generated.h" #include "src/common/log_adapter.h" #include "common/common_test.h" #include "src/common/file_utils.h" #include "nnacl/fp32/reduce_fp32.h" #include "src/runtime/kernel/arm/fp32_grad/arithmetic_grad.h" #include "src/kernel_registry.h" -#include "src/ops/arithmetic_grad.h" namespace mindspore { @@ -44,13 +45,6 @@ ArithmeticParameter *PopulateArithmeticParameter(mindspore::schema::PrimitiveTyp } prim->value.type = type; - auto agrad = mindspore::lite::ArithmeticGrad(prim); - agrad.InferShape(inputs, outputs); - - arithmetic_param->ndim_ = agrad.NDims(); - for (size_t i = 0; i < agrad.dyShape().size(); i++) arithmetic_param->out_shape_[i] = (agrad.dyShape())[i]; - for (size_t i = 0; i < agrad.x1Shape().size(); i++) arithmetic_param->in_shape0_[i] = (agrad.x1Shape())[i]; - for (size_t i = 0; i < agrad.x2Shape().size(); i++) arithmetic_param->in_shape1_[i] = (agrad.x2Shape())[i]; return arithmetic_param; } @@ -216,7 +210,7 @@ TEST_F(TestArithmeticGradFp32, TestAddGradFp32) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_AddGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -258,7 +252,7 @@ TEST_F(TestArithmeticGradFp32, TestAddGrad2Fp32) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_AddGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -302,7 +296,7 @@ TEST_F(TestArithmeticGradFp32, TestAddGrad3Fp32) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_AddGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -347,7 +341,7 @@ TEST_F(TestArithmeticGradFp32, TestSubGradFp32) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_SubGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -392,7 +386,7 @@ TEST_F(TestArithmeticGradFp32, TestSubGrad2Fp32) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_SubGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -435,7 +429,7 @@ TEST_F(TestArithmeticGradFp32, TestMulGradFp32) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_MulGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); int loop_count = 1000; auto time_start = mindspore::lite::GetTimeUs(); @@ -487,7 +481,7 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad2Fp32) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_MulGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -531,7 +525,7 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad3Fp32) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_MulGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -575,7 +569,7 @@ TEST_F(TestArithmeticGradFp32, TestMulGrad4Fp32) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_MulGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -619,7 +613,7 @@ TEST_F(TestArithmeticGradFp32, TestDivGradFp32) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_DivGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -663,7 +657,7 @@ TEST_F(TestArithmeticGradFp32, TestDivGrad2Fp32) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_DivGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -708,7 +702,7 @@ TEST_F(TestArithmeticGradFp32, TestDivGrad3Fp32) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_DivGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -752,7 +746,7 @@ TEST_F(TestArithmeticGradFp32, Test3DDivGrad2Fp32) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_DivGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -834,7 +828,7 @@ TEST_F(TestArithmeticGradFp32, TestMaximumGradBroadcastFp32) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_MaximumGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc index b0d94b9d2c..b7c130c7db 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bias_grad_fp32_tests.cc @@ -54,10 +54,10 @@ TEST_F(TestBiasGradFp32, BiasGradFp32) { ctx.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx.Init()); - kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_BiasGrad}; + kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_BiasAddGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(bias_param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(bias_param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -103,10 +103,10 @@ TEST_F(TestBiasGradFp32, BiasGrad2DFp32) { ctx.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx.Init()); - kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_BiasGrad}; + kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_BiasAddGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(bias_param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(bias_param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bn_grad_fp32_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bn_grad_fp32_test.cc index 29fb911a23..ada6620a1a 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bn_grad_fp32_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/bn_grad_fp32_test.cc @@ -50,7 +50,6 @@ TEST_F(TestBNGradFp32, BNGradFp32) { ASSERT_NE(bn_param, nullptr); bn_param->epsilon_ = 1e-2; - bn_param->momentum_ = 0.1; const int batch = 2; const int channels = 3; const int height = 4; @@ -82,10 +81,10 @@ TEST_F(TestBNGradFp32, BNGradFp32) { ctx.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx.Init()); - kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_BNGrad}; + kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_BatchNormGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(bn_param), &ctx, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(bn_param), &ctx, desc); ASSERT_NE(kernel_obj, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel_obj->workspace_size()); @@ -178,7 +177,7 @@ TEST_F(TestBNGradFp32, BNTtrainFp32) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(bn_param), &context, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(bn_param), &context, desc); ASSERT_NE(kernel_obj, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel_obj->workspace_size()); float *save_mean = reinterpret_cast<float *>(save_mean_tensor.MutableData()); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc index 7ed5805484..b49f1368dd 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/convolution_grad_fp32_tests.cc @@ -114,10 +114,10 @@ TEST_F(TestConvolutionGradFp32, ConvFp32FilterGrad) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DGradFilter}; + kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DBackpropFilterFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc); ASSERT_NE(kernel, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); // warm up loop @@ -191,10 +191,10 @@ TEST_F(TestConvolutionGradFp32, ConvFp32InputGrad) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DGradInput}; + kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DBackpropInputFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc); ASSERT_NE(kernel, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); @@ -267,10 +267,10 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupFilterGrad) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DGradFilter}; + kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DBackpropFilterFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc); ASSERT_NE(kernel, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); kernel->Run(); @@ -340,10 +340,10 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupInputGrad) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DGradInput}; + kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DBackpropInputFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc); ASSERT_NE(kernel, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); // warm up loop @@ -415,10 +415,10 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationFilterGrad) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DGradFilter}; + kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DBackpropFilterFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc); ASSERT_NE(kernel, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); @@ -491,10 +491,10 @@ TEST_F(TestConvolutionGradFp32, ConvFp32GroupDilationInputGrad) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DGradInput}; + kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DBackpropInputFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc); ASSERT_NE(kernel, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); @@ -563,7 +563,7 @@ TEST_F(TestConvolutionGradFp32, ConvGroupDilation) { ASSERT_EQ(lite::RET_OK, context.Init()); auto *kernel = new mindspore::kernel::ConvolutionTrainCPUKernel(reinterpret_cast<OpParameter *>(conv_param), inputs, - outputs, &context, 0); + outputs, &context); ASSERT_NE(kernel, nullptr); kernel->Init(); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); @@ -668,10 +668,10 @@ TEST_F(TestConvolutionGradFp32, ConvFp32Dilation2Group2Stride2FilterGrad) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DGradFilter}; + kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DBackpropFilterFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc); ASSERT_NE(kernel, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); @@ -775,10 +775,10 @@ TEST_F(TestConvolutionGradFp32, ConvGroup2Dilation2Stride2) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DGradInput}; + kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_Conv2DBackpropInputFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc); ASSERT_NE(kernel, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_fp32_tests.cc index 66b16b5567..74367174b7 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/deconvolution_grad_fp32_tests.cc @@ -96,7 +96,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32FilterGrad) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_DeConv2DGradFilter}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc); ASSERT_NE(kernel, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); @@ -202,7 +202,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2FilterGrad) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_DeConv2DGradFilter}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc); ASSERT_NE(kernel, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); for (int i = 0; i < 3; i++) { @@ -308,7 +308,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3FilterGrad) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_DeConv2DGradFilter}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc); ASSERT_NE(kernel, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); @@ -411,7 +411,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group3Stride1FilterGrad) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_DeConv2DGradFilter}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc); ASSERT_NE(kernel, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); @@ -517,7 +517,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group2Stride2FilterGrad) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_DeConv2DGradFilter}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc); ASSERT_NE(kernel, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); @@ -626,7 +626,7 @@ TEST_F(TestDeConvolutionGradFp32, DeConvFp32Dilation2Group12Stride2FilterGrad) { kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_DeConv2DGradFilter}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(conv_param), &context, desc); ASSERT_NE(kernel, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel->workspace_size()); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc index 4da792359a..cf1ef8b732 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/network_test.cc @@ -90,7 +90,7 @@ TEST_F(NetworkTest, tuning_layer) { node->primitive->value.type = schema::PrimitiveType_Activation; auto primitive = new schema::ActivationT; ASSERT_NE(primitive, nullptr); - primitive->type = schema::ActivationType_RELU; + primitive->activation_type = schema::ActivationType_RELU; node->primitive->value.value = primitive; node->name = "ReLU"; meta_graph->nodes.emplace_back(std::move(node)); @@ -103,8 +103,8 @@ TEST_F(NetworkTest, tuning_layer) { node->primitive->value.type = schema::PrimitiveType_MatMul; auto primitive = new schema::MatMulT; ASSERT_NE(primitive, nullptr); - primitive->transposeA = false; - primitive->transposeB = true; + primitive->transpose_a = false; + primitive->transpose_b = true; node->primitive->value.value = primitive; node->name = "MatMul1"; meta_graph->nodes.emplace_back(std::move(node)); @@ -117,7 +117,6 @@ TEST_F(NetworkTest, tuning_layer) { node->primitive->value.type = schema::PrimitiveType_BiasAdd; auto primitive = new schema::BiasAddT; ASSERT_NE(primitive, nullptr); - primitive->axis.push_back(0); node->primitive->value.value = primitive; node->name = "BiasAdd"; meta_graph->nodes.emplace_back(std::move(node)); @@ -127,11 +126,11 @@ TEST_F(NetworkTest, tuning_layer) { node->inputIndex = {5, 6}; node->outputIndex = {14, 7}; node->primitive = std::make_unique<schema::PrimitiveT>(); - node->primitive->value.type = schema::PrimitiveType_SoftmaxCrossEntropy; - auto primitive = new schema::SoftmaxCrossEntropyT; + node->primitive->value.type = schema::PrimitiveType_SoftmaxCrossEntropyWithLogits; + auto primitive = new schema::SoftmaxCrossEntropyWithLogitsT; ASSERT_NE(primitive, nullptr); node->primitive->value.value = primitive; - node->name = "SoftmaxCrossEntropy"; + node->name = "SoftmaxCrossEntropyWithLogits"; meta_graph->nodes.emplace_back(std::move(node)); } { @@ -139,8 +138,8 @@ TEST_F(NetworkTest, tuning_layer) { node->inputIndex = {7}; node->outputIndex = {8}; node->primitive = std::make_unique<schema::PrimitiveT>(); - node->primitive->value.type = schema::PrimitiveType_BiasGrad; - auto primitive = new schema::BiasGradT; + node->primitive->value.type = schema::PrimitiveType_BiasAddGrad; + auto primitive = new schema::BiasAddGradT; ASSERT_NE(primitive, nullptr); node->primitive->value.value = primitive; node->name = "BiasGrad"; @@ -154,8 +153,8 @@ TEST_F(NetworkTest, tuning_layer) { node->primitive->value.type = schema::PrimitiveType_MatMul; auto primitive = new schema::MatMulT; ASSERT_NE(primitive, nullptr); - primitive->transposeA = true; - primitive->transposeB = false; + primitive->transpose_a = true; + primitive->transpose_b = false; node->primitive->value.value = primitive; node->name = "MatMul2"; meta_graph->nodes.emplace_back(std::move(node)); @@ -393,7 +392,7 @@ TEST_F(NetworkTest, tuning_layer) { auto ret = session->RunGraph(); ASSERT_EQ(lite::RET_OK, ret); - auto outputs = session->GetOutputsByNodeName("SoftmaxCrossEntropy"); + auto outputs = session->GetOutputsByNodeName("SoftmaxCrossEntropyWithLogits"); ASSERT_EQ(outputs.size(), 1); auto outTensor = (outputs.at(0)); ASSERT_NE(nullptr, outTensor); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc index 0af6e5a66c..2e425d3361 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/pooling_grad_fp32_tests.cc @@ -16,7 +16,6 @@ #include <iostream> #include <memory> -#include "src/ops/primitive_c.h" #include "mindspore/lite/include/context.h" #include "src/common/log_adapter.h" #include "common/common_test.h" @@ -155,10 +154,10 @@ TEST_F(TestPoolingGradFp32, AvgPoolingKernelGradFp32) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; + kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_AvgPoolGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(pooling_param), &context, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(pooling_param), &context, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -224,10 +223,10 @@ TEST_F(TestPoolingGradFp32, AvgPoolingBatchGradFp32) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; + kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_AvgPoolGrad}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(pooling_param), &context, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(pooling_param), &context, desc); ASSERT_NE(kernel_obj, nullptr); kernel_obj->Run(); @@ -292,10 +291,10 @@ TEST_F(TestPoolingGradFp32, AvgPoolGradStride2Fp32) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey pool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; + kernel::KernelKey pool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_AvgPoolGrad}; auto pool_creator = lite::KernelRegistry::GetInstance()->GetCreator(pool_desc); ASSERT_NE(pool_creator, nullptr); - auto kernel = pool_creator(inputs, outputs, reinterpret_cast<OpParameter *>(pool), &context, pool_desc, nullptr); + auto kernel = pool_creator(inputs, outputs, reinterpret_cast<OpParameter *>(pool), &context, pool_desc); ASSERT_NE(kernel, nullptr); kernel->Init(); @@ -359,10 +358,10 @@ TEST_F(TestPoolingGradFp32, AvgPoolGradStride3Fp32) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey pool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; + kernel::KernelKey pool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_AvgPoolGrad}; auto pool_creator = lite::KernelRegistry::GetInstance()->GetCreator(pool_desc); ASSERT_NE(pool_creator, nullptr); - auto kernel = pool_creator(inputs, outputs, reinterpret_cast<OpParameter *>(pool), &context, pool_desc, nullptr); + auto kernel = pool_creator(inputs, outputs, reinterpret_cast<OpParameter *>(pool), &context, pool_desc); ASSERT_NE(kernel, nullptr); kernel->Init(); @@ -492,11 +491,11 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradBatchFp32) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey maxpool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; + kernel::KernelKey maxpool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_MaxPoolGrad}; auto maxpool_creator = lite::KernelRegistry::GetInstance()->GetCreator(maxpool_desc); ASSERT_NE(maxpool_creator, nullptr); - auto kernel = maxpool_creator(maxpool_inputs, maxpool_outputs, reinterpret_cast<OpParameter *>(maxpool), &context, - maxpool_desc, nullptr); + auto kernel = + maxpool_creator(maxpool_inputs, maxpool_outputs, reinterpret_cast<OpParameter *>(maxpool), &context, maxpool_desc); ASSERT_NE(kernel, nullptr); kernel->Init(); @@ -570,11 +569,11 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride2Fp32) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey maxpool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; + kernel::KernelKey maxpool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_MaxPoolGrad}; auto maxpool_creator = lite::KernelRegistry::GetInstance()->GetCreator(maxpool_desc); ASSERT_NE(maxpool_creator, nullptr); - auto kernel = maxpool_creator(maxpool_inputs, maxpool_outputs, reinterpret_cast<OpParameter *>(maxpool), &context, - maxpool_desc, nullptr); + auto kernel = + maxpool_creator(maxpool_inputs, maxpool_outputs, reinterpret_cast<OpParameter *>(maxpool), &context, maxpool_desc); ASSERT_NE(kernel, nullptr); kernel->Init(); @@ -648,11 +647,11 @@ TEST_F(TestPoolingGradFp32, MaxPoolGradStride3Fp32) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey maxpool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_PoolingGrad}; + kernel::KernelKey maxpool_desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_MaxPoolGrad}; auto maxpool_creator = lite::KernelRegistry::GetInstance()->GetCreator(maxpool_desc); ASSERT_NE(maxpool_creator, nullptr); - auto kernel = maxpool_creator(maxpool_inputs, maxpool_outputs, reinterpret_cast<OpParameter *>(maxpool), &context, - maxpool_desc, nullptr); + auto kernel = + maxpool_creator(maxpool_inputs, maxpool_outputs, reinterpret_cast<OpParameter *>(maxpool), &context, maxpool_desc); ASSERT_NE(kernel, nullptr); kernel->Init(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc index a58994bf39..a8f5dd82d3 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_crossentropy_fp32_tests.cc @@ -70,10 +70,11 @@ TEST_F(TestSoftmaxCrossEntropyFp32, SoftmaxCrossEntropyFp32) { context.thread_num_ = 1; ASSERT_EQ(lite::RET_OK, context.Init()); - kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, schema::PrimitiveType_SoftmaxCrossEntropy}; + kernel::KernelKey desc = {kernel::kCPU, TypeId::kNumberTypeFloat32, + schema::PrimitiveType_SoftmaxCrossEntropyWithLogits}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(sce_param), &context, desc, nullptr); + auto kernel_obj = creator(inputs, outputs, reinterpret_cast<OpParameter *>(sce_param), &context, desc); ASSERT_NE(kernel_obj, nullptr); mindspore::kernel::LiteKernel::AllocWorkspace(kernel_obj->workspace_size()); kernel_obj->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_grad_fp32_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_grad_fp32_tests.cc index 3300a6974a..39963e33fe 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_grad_fp32_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32_grad/softmax_grad_fp32_tests.cc @@ -17,7 +17,6 @@ #include <iostream> #include <memory> #include <vector> -#include "src/ops/primitive_c.h" #include "mindspore/lite/include/context.h" #include "src/common/log_adapter.h" #include "common/common_test.h" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc index f4e6fbbf6a..367cc22506 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/add_int8_tests.cc @@ -50,14 +50,14 @@ TEST_F(TestQuantizedAdd, Add) { std::vector<lite::Tensor *> outputs = {&out_tensor}; OpParameter parameter = {}; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Add}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_AddFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc index 7112a34320..9cb0edeccc 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/arithmetic_self_int8_tests.cc @@ -71,7 +71,7 @@ TEST_F(TestArithmeticSelfInt8, floor_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -131,7 +131,7 @@ TEST_F(TestArithmeticSelfInt8, floor_quant1_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -191,7 +191,7 @@ TEST_F(TestArithmeticSelfInt8, round_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -251,7 +251,7 @@ TEST_F(TestArithmeticSelfInt8, round_quant1_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -311,7 +311,7 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -371,7 +371,7 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant1_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -431,7 +431,7 @@ TEST_F(TestArithmeticSelfInt8, abs_quant0_thread0) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -491,7 +491,7 @@ TEST_F(TestArithmeticSelfInt8, abs_quant1_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -551,7 +551,7 @@ TEST_F(TestArithmeticSelfInt8, sin_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -611,7 +611,7 @@ TEST_F(TestArithmeticSelfInt8, cos_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -671,7 +671,7 @@ TEST_F(TestArithmeticSelfInt8, log_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -731,7 +731,7 @@ TEST_F(TestArithmeticSelfInt8, sqrt_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -791,7 +791,7 @@ TEST_F(TestArithmeticSelfInt8, rsqrt_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -851,7 +851,7 @@ TEST_F(TestArithmeticSelfInt8, square_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -911,7 +911,7 @@ TEST_F(TestArithmeticSelfInt8, square_quant1_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -971,7 +971,7 @@ TEST_F(TestArithmeticSelfInt8, logical_not_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc index ee567e4aea..f059a3917b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/batchnorm_int8_test.cc @@ -105,7 +105,7 @@ TEST_F(TestBatchnormInt8, FusedTest) { ctx.thread_num_ = 3; ASSERT_EQ(lite::RET_OK, ctx.Init()); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor.shape(); @@ -186,7 +186,7 @@ TEST_F(TestBatchnormInt8, BNTest) { ctx.thread_num_ = 3; ASSERT_EQ(lite::RET_OK, ctx.Init()); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor.shape(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc index 312867cf21..f6a49bcdb2 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/concat_int8_tests.cc @@ -84,7 +84,7 @@ TEST_F(TestConcatInt8, Concat1_axis0) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -156,7 +156,7 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -229,7 +229,7 @@ TEST_F(TestConcatInt8, Concat1_axis1_thread2_quant1) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc index 7910a435a7..182543d34a 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/conv_1x1_int8_tests.cc @@ -125,8 +125,8 @@ TEST_F(TestConv1x1Int8, Conv1x1TestPerChannel) { ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); int total_size = Conv1x1Int8TestInit1_perchannel(&inputs_, &outputs_, conv_param, &correct); - kernel::Convolution1x1Int8CPUKernel *conv1x1 = new kernel::Convolution1x1Int8CPUKernel( - reinterpret_cast<OpParameter *>(conv_param), inputs_, outputs_, ctx, nullptr); + kernel::Convolution1x1Int8CPUKernel *conv1x1 = + new kernel::Convolution1x1Int8CPUKernel(reinterpret_cast<OpParameter *>(conv_param), inputs_, outputs_, ctx); conv1x1->Init(); conv1x1->Run(); @@ -194,8 +194,8 @@ TEST_F(TestConv1x1Int8, Conv1x1Int8Test1) { ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); int total_size = Conv1x1Int8TestInit1(&inputs_, &outputs_, conv_param, &correct); - kernel::Convolution1x1Int8CPUKernel *conv1x1 = new kernel::Convolution1x1Int8CPUKernel( - reinterpret_cast<OpParameter *>(conv_param), inputs_, outputs_, ctx, nullptr); + kernel::Convolution1x1Int8CPUKernel *conv1x1 = + new kernel::Convolution1x1Int8CPUKernel(reinterpret_cast<OpParameter *>(conv_param), inputs_, outputs_, ctx); conv1x1->Init(); conv1x1->Run(); @@ -271,8 +271,8 @@ TEST_F(TestConv1x1Int8, Conv1x1Int8Test2) { ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); int total_size = Conv1x1Int8TestInit2(&inputs_, &outputs_, conv_param, &correct); - auto *conv1x1 = new kernel::Convolution1x1Int8CPUKernel(reinterpret_cast<OpParameter *>(conv_param), inputs_, - outputs_, ctx, nullptr); + auto *conv1x1 = + new kernel::Convolution1x1Int8CPUKernel(reinterpret_cast<OpParameter *>(conv_param), inputs_, outputs_, ctx); conv1x1->Init(); conv1x1->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc index 73a33292c2..5ac676a46d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/crop_int8_tests.cc @@ -76,7 +76,7 @@ TEST_F(TestCropInt8, crop_1d_axis0_offset0_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -140,7 +140,7 @@ TEST_F(TestCropInt8, crop_2d_axis1_offset0_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -204,7 +204,7 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread0) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -269,7 +269,7 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -333,7 +333,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread0) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -397,7 +397,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset0_quant0_thread0) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -464,7 +464,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant0_thread0) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -531,7 +531,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant1_thread0) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -597,7 +597,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -663,7 +663,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread3) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc index 8224faee7b..b7250da49c 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/deconv_int8_tests.cc @@ -323,8 +323,8 @@ TEST_F(TestDeconvInt8, DeConvInt8Test1) { ASSERT_EQ(lite::RET_OK, ctx->Init()); int8_t *correct; int total_size = DeConvInt8TestInit1(&inputs_, &outputs_, deconv_param, &correct); - auto *deconv = new mindspore::kernel::DeConvInt8CPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, - outputs_, ctx, nullptr); + auto *deconv = + new mindspore::kernel::DeConvInt8CPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx); deconv->Init(); deconv->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc index 1330b43f7d..ef695fb188 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/fullconnection_int8_tests.cc @@ -138,7 +138,7 @@ TEST_F(TestFcInt8, fctest1) { ASSERT_EQ(lite::RET_OK, ctx->Init()); kernel::FullconnectionInt8CPUKernel *fc = - new kernel::FullconnectionInt8CPUKernel(reinterpret_cast<OpParameter *>(fc_param), inputs, outputs, ctx, nullptr); + new kernel::FullconnectionInt8CPUKernel(reinterpret_cast<OpParameter *>(fc_param), inputs, outputs, ctx); fc->Init(); fc->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc index 8a1cbaa0f6..fe5c1dc21d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gatherNd_int8_test.cc @@ -81,7 +81,7 @@ TEST_F(TestGatherNdInt8, GatherNdTest) { ctx.thread_num_ = 3; ASSERT_EQ(lite::RET_OK, ctx.Init()); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor.shape(); kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc index 4190b723ba..80ab86929e 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/gather_int8_test.cc @@ -36,7 +36,6 @@ TEST_F(TestGatherInt8, GatherTest) { GatherParameter op_param; op_param.op_parameter_.type_ = schema::PrimitiveType_Gather; op_param.axis_ = 0; - op_param.batchDims_ = 1; std::vector<int> shape = {2, 1, 3, 2}; lite::QuantArg input_quant_arg; @@ -80,7 +79,7 @@ TEST_F(TestGatherInt8, GatherTest) { ctx.thread_num_ = 3; ASSERT_EQ(lite::RET_OK, ctx.Init()); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor.shape(); kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc index 4d6668ebd9..8b95aeed1e 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/hswish_int8_tests.cc @@ -58,7 +58,7 @@ TEST_F(TestHSwishInt8, HSwish) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/l2_norm_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/l2_norm_int8_tests.cc index f15631ed0d..cf6339502a 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/l2_norm_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/l2_norm_int8_tests.cc @@ -50,14 +50,14 @@ TEST_F(TestL2NormInt8, norm) { param_.epsilon_ = 1e-6; param_.act_type_ = ActType_No; param_.shape_ = nullptr; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_L2Norm}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_L2NormalizeFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&param_), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&param_), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); @@ -94,14 +94,14 @@ TEST_F(TestL2NormInt8, norm2) { param_.epsilon_ = 1e-6; param_.act_type_ = ActType_No; param_.shape_ = nullptr; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_L2Norm}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_L2NormalizeFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&param_), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&param_), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc index 4130948a5f..c5ef9807a3 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/matmul_int8_tests.cc @@ -126,7 +126,7 @@ TEST_F(TestMatmulInt8, mmtest1) { ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); kernel::MatmulInt8CPUKernel *mm = - new kernel::MatmulInt8CPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs, outputs, ctx, nullptr); + new kernel::MatmulInt8CPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs, outputs, ctx); mm->Init(); mm->Run(); @@ -243,7 +243,7 @@ TEST_F(TestMatmulInt8, mmtest2) { ctx->thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx->Init()); kernel::MatmulInt8CPUKernel *mm = - new kernel::MatmulInt8CPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs, outputs, ctx, nullptr); + new kernel::MatmulInt8CPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs, outputs, ctx); mm->Init(); mm->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc index 4465782e31..8496a77419 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/mul_int8_tests.cc @@ -75,15 +75,15 @@ TEST_F(TestMulInt8, Mul_quant0) { outputs_tensor[0] = output0_tensor; MulParameter op_param; - op_param.op_parameter_.type_ = schema::PrimitiveType_Mul; + op_param.op_parameter_.type_ = schema::PrimitiveType_MulFusion; lite::InnerContext *ctx = new lite::InnerContext; ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Mul}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_MulFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -146,15 +146,15 @@ TEST_F(TestMulInt8, Mul_quant0_thread0) { outputs_tensor[0] = output0_tensor; MulParameter op_param; - op_param.op_parameter_.type_ = schema::PrimitiveType_Mul; + op_param.op_parameter_.type_ = schema::PrimitiveType_MulFusion; lite::InnerContext *ctx = new lite::InnerContext; ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Mul}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_MulFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -217,15 +217,15 @@ TEST_F(TestMulInt8, Mul_quant1) { outputs_tensor[0] = output0_tensor; MulParameter op_param; - op_param.op_parameter_.type_ = schema::PrimitiveType_Mul; + op_param.op_parameter_.type_ = schema::PrimitiveType_MulFusion; lite::InnerContext *ctx = new lite::InnerContext; ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Mul}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_MulFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -288,15 +288,15 @@ TEST_F(TestMulInt8, Mul_quant1_thread1) { outputs_tensor[0] = output0_tensor; MulParameter op_param; - op_param.op_parameter_.type_ = schema::PrimitiveType_Mul; + op_param.op_parameter_.type_ = schema::PrimitiveType_MulFusion; lite::InnerContext *ctx = new lite::InnerContext; ctx->thread_num_ = 3; ASSERT_EQ(lite::RET_OK, ctx->Init()); - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Mul}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_MulFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -359,15 +359,15 @@ TEST_F(TestMulInt8, test) { outputs_tensor[0] = output0_tensor; MulParameter op_param; - op_param.op_parameter_.type_ = schema::PrimitiveType_Mul; + op_param.op_parameter_.type_ = schema::PrimitiveType_MulFusion; lite::InnerContext *ctx = new lite::InnerContext; ctx->thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx->Init()); - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Mul}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_MulFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc index 25d642fd7e..63fffccb13 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/pad_int8_tests.cc @@ -70,7 +70,7 @@ TEST_F(TestPadInt8, PadInt8Test1) { int8_t *correct; int total_size = PadInt8TestInit1(&inputs_, &outputs_, pad_param, &correct); kernel::PadInt8CPUKernel *pad = - new kernel::PadInt8CPUKernel(reinterpret_cast<OpParameter *>(pad_param), inputs_, outputs_, ctx, nullptr); + new kernel::PadInt8CPUKernel(reinterpret_cast<OpParameter *>(pad_param), inputs_, outputs_, ctx); pad->Init(); pad->Run(); @@ -123,7 +123,7 @@ TEST_F(TestPadInt8, PadInt8Test2) { int8_t *correct; int total_size = PadInt8TestInit2(&inputs_, &outputs_, pad_param, &correct); kernel::PadInt8CPUKernel *pad = - new kernel::PadInt8CPUKernel(reinterpret_cast<OpParameter *>(pad_param), inputs_, outputs_, ctx, nullptr); + new kernel::PadInt8CPUKernel(reinterpret_cast<OpParameter *>(pad_param), inputs_, outputs_, ctx); pad->Init(); pad->Run(); @@ -191,7 +191,7 @@ TEST_F(TestPadInt8, PadInt8TestInit4) { int8_t *correct; int total_size = PadInt8TestInit2(&inputs_, &outputs_, pad_param, &correct); kernel::PadInt8CPUKernel *pad = - new kernel::PadInt8CPUKernel(reinterpret_cast<OpParameter *>(pad_param), inputs_, outputs_, ctx, nullptr); + new kernel::PadInt8CPUKernel(reinterpret_cast<OpParameter *>(pad_param), inputs_, outputs_, ctx); pad->Init(); pad->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc index 264e54d467..b90cce8318 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/power_int8_tests.cc @@ -34,7 +34,7 @@ TEST_F(TestPowerInt8, PowerInt8) { std::vector<lite::Tensor *> outputs_tensor; PowerParameter op_param; - op_param.op_parameter_.type_ = schema::PrimitiveType_Power; + op_param.op_parameter_.type_ = schema::PrimitiveType_PowFusion; op_param.power_ = 2; op_param.scale_ = 1; op_param.shift_ = 0; @@ -68,12 +68,12 @@ TEST_F(TestPowerInt8, PowerInt8) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Power}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_PowFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx.get(), desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor.shape(); kernel->Run(); @@ -90,7 +90,7 @@ TEST_F(TestPowerInt8, normal) { std::vector<lite::Tensor *> outputs_tensor; PowerParameter op_param; - op_param.op_parameter_.type_ = schema::PrimitiveType_Power; + op_param.op_parameter_.type_ = schema::PrimitiveType_PowFusion; op_param.scale_ = 1; op_param.shift_ = 0; @@ -137,12 +137,12 @@ TEST_F(TestPowerInt8, normal) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Power}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_PowFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx.get(), desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor.shape(); kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc index e1b2c75465..c59167be69 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/prelu_int8_tests.cc @@ -65,18 +65,18 @@ TEST_F(TestPreluInt8, prelu_1) { outputs_tensor[0] = output0_tensor; LeakyReluQuantArg op_param; - op_param.op_parameter_.type_ = schema::PrimitiveType_LeakyReLU; + op_param.op_parameter_.type_ = schema::PrimitiveType_LeakyRelu; op_param.slope_ = 0.25; lite::InnerContext *ctx = new lite::InnerContext; ctx->thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx->Init()); op_param.axis_ = 0; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_LeakyReLU}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_LeakyRelu}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc index f631a5df9e..5589d44e76 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/quant_dtype_cast_tests.cc @@ -69,7 +69,7 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest1) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&param), &ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&param), &ctx, desc); ASSERT_NE(kernel, nullptr); kernel->Run(); @@ -116,7 +116,7 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&param), &ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&param), &ctx, desc); ASSERT_NE(kernel, nullptr); kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reduce_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reduce_int8_tests.cc index d83e1705f9..9811946cf0 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reduce_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reduce_int8_tests.cc @@ -47,7 +47,7 @@ class TestReduceInt8 : public mindspore::CommonTest { Tensor out_tensor_; std::vector<Tensor *> inputs{&in_tensor_}; std::vector<Tensor *> outputs{&out_tensor_}; - kernel::KernelKey desc_ = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Reduce}; + kernel::KernelKey desc_ = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_ReduceFusion}; kernel::KernelCreator creator_ = nullptr; lite::InnerContext ctx_ = lite::InnerContext(); kernel::LiteKernel *kernel_ = nullptr; @@ -81,7 +81,7 @@ void TestReduceInt8::Prepare(const std::vector<int> &in_shape, const std::vector ctx_.thread_num_ = thread_num_; ASSERT_EQ(lite::RET_OK, ctx_.Init()); - kernel_ = creator_(inputs, outputs, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc_, nullptr); + kernel_ = creator_(inputs, outputs, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc_); } TEST_F(TestReduceInt8, Mean) { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc index 98e0188aaa..167013bced 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/relux_int8_tests.cc @@ -56,7 +56,7 @@ TEST_F(TestReluXInt8, Relu) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); @@ -100,7 +100,7 @@ TEST_F(TestReluXInt8, Relu6) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc index 25ca86ceae..9fc8743ece 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/reshape_int8_tests.cc @@ -72,7 +72,7 @@ TEST_F(TestReshapeInt8, reshape_quant0) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); @@ -132,7 +132,7 @@ TEST_F(TestReshapeInt8, reshape_quant1_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc index cb358f041d..0b7f38a91d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_bilinear_int8_tests.cc @@ -79,7 +79,7 @@ void TestResizeBilinearInt8::Prepare(const std::vector<int> &in_shape, const std ctx_.thread_num_ = thread_num; ASSERT_EQ(lite::RET_OK, ctx_.Init()); - kernel_ = creator_(inputs, outputs, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc_, nullptr); + kernel_ = creator_(inputs, outputs, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc_); } TEST_F(TestResizeBilinearInt8, Bilinear0) { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc index a2dff79171..f9e8926137 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/resize_nearest_neighbor_int8_tests.cc @@ -74,7 +74,7 @@ void TestResizeNearestNeighborInt8::Prepare(const std::vector<int> &in_shape, co ctx_.thread_num_ = thread_num; ASSERT_EQ(lite::RET_OK, ctx_.Init()); - kernel_ = creator_(inputs, outputs, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc_, nullptr); + kernel_ = creator_(inputs, outputs, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc_); } void TestResizeNearestNeighborInt8::TearDown() { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/scale_int8.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/scale_int8.cc index fe400d754d..2052f53a70 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/scale_int8.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/scale_int8.cc @@ -43,7 +43,7 @@ class TestScaleInt8 : public mindspore::CommonTest { Tensor out_tensor_; std::vector<Tensor *> inputs; std::vector<Tensor *> outputs = {&out_tensor_}; - kernel::KernelKey desc_ = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Scale}; + kernel::KernelKey desc_ = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_ScaleFusion}; kernel::KernelCreator creator_ = nullptr; lite::InnerContext ctx_ = lite::InnerContext(); kernel::LiteKernel *kernel_ = nullptr; @@ -94,7 +94,7 @@ void TestScaleInt8::Prepare(const std::vector<int> &in_shape, int8_t *input_data ctx_.thread_num_ = thread_num_; ASSERT_EQ(lite::RET_OK, ctx_.Init()); - kernel_ = creator_(inputs, outputs, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc_, nullptr); + kernel_ = creator_(inputs, outputs, reinterpret_cast<OpParameter *>(&param_), &ctx_, desc_); } TEST_F(TestScaleInt8, scale1) { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc index 9a74a06daa..574b742774 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sigmoid_int8_tests.cc @@ -55,7 +55,7 @@ TEST_F(TestSigmoidInt8, Sigmoid) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc index 0801d945fd..19276d70aa 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/slice_int8_tests.cc @@ -54,14 +54,14 @@ TEST_F(TestSliceInt8, SliceInt8) { parameter.size_[2] = -1; parameter.param_length_ = 3; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Slice}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_SliceFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); EXPECT_EQ(0, ret); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc index 5d8c181945..48f0cb6755 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/softmax_int8_tests.cc @@ -34,7 +34,7 @@ TEST_F(TestSoftmaxInt8, SoftmaxInt8) { std::vector<lite::Tensor *> outputs_tensor; SoftmaxParameter op_param; - op_param.op_parameter_.type_ = schema::PrimitiveType_SoftMax; + op_param.op_parameter_.type_ = schema::PrimitiveType_Softmax; op_param.axis_ = 2; op_param.element_size_ = 24; op_param.input_shape_[0] = 1; @@ -72,12 +72,12 @@ TEST_F(TestSoftmaxInt8, SoftmaxInt8) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_SoftMax}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Softmax}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx.get(), desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor.shape(); kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/space_to_batch_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/space_to_batch_int8_tests.cc index 535ed559bf..9bfd0de740 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/space_to_batch_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/space_to_batch_int8_tests.cc @@ -42,7 +42,7 @@ TEST_F(SpaceToBatchTestInt8, test1) { auto ctx = std::make_shared<lite::InnerContext>(); ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc index 1d882bdf3a..542f140e20 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/split_int8_tests.cc @@ -86,7 +86,7 @@ TEST_F(TestSplitInt8, Split_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output1_tensor_shape = output1_tensor->shape(); auto output2_tensor_shape = output2_tensor->shape(); @@ -175,7 +175,7 @@ TEST_F(TestSplitInt8, Split_quant0_thread2_num) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output1_tensor_shape = output1_tensor->shape(); auto output2_tensor_shape = output2_tensor->shape(); @@ -272,7 +272,7 @@ TEST_F(TestSplitInt8, Split_quant1_thread2_num) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output1_tensor_shape = output1_tensor->shape(); auto output2_tensor_shape = output2_tensor->shape(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc index 6092b26cdf..bac156827d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/squeeze_int8_tests.cc @@ -72,7 +72,7 @@ TEST_F(TestSqueezeInt8, Squeeze_1d_axis0_offset0_quant0_thread2) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sub_int_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sub_int_tests.cc index 97f4b601af..4ccd6a976c 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sub_int_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/sub_int_tests.cc @@ -51,7 +51,7 @@ TEST_F(TestSubInt8, SubInt8) { std::vector<lite::Tensor *> outputs = {&out_tensor}; OpParameter parameter = {}; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Sub}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_SubFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); @@ -59,7 +59,7 @@ TEST_F(TestSubInt8, SubInt8) { auto ctx = std::make_shared<lite::InnerContext>(); ctx->thread_num_ = 1; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); @@ -98,7 +98,7 @@ TEST_F(TestSubInt8, SubInt8T2) { std::vector<lite::Tensor *> outputs = {&out_tensor}; OpParameter parameter = {}; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Sub}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_SubFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); @@ -106,7 +106,7 @@ TEST_F(TestSubInt8, SubInt8T2) { auto ctx = std::make_shared<lite::InnerContext>(); ctx->thread_num_ = 2; ASSERT_EQ(lite::RET_OK, ctx->Init()); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), ctx.get(), desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc index 803cb6411a..cb2855535f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/topk_int8_tests.cc @@ -41,12 +41,12 @@ TEST_F(TestTopKInt8, TopK) { std::vector<lite::Tensor *> outputs = {&out_tensor0, &out_tensor1}; TopkParameter parameter = {{}, 2, true, 3, 4}; - kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_TopK}; + kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_TopKFusion}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); - auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), nullptr, desc, nullptr); + auto kernel = creator(inputs, outputs, reinterpret_cast<OpParameter *>(&parameter), nullptr, desc); ASSERT_NE(kernel, nullptr); auto ret = kernel->Run(); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc index 77ec5ee446..38aec8665f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/int8/unsqueeze_int8_tests.cc @@ -74,7 +74,7 @@ TEST_F(TestUnsqueezeInt8, Unsqueeze_1) { auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); ASSERT_NE(creator, nullptr); kernel::LiteKernel *kernel = - creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc, nullptr); + creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), ctx, desc); ASSERT_NE(kernel, nullptr); auto output_tensor_shape = output0_tensor->shape(); ASSERT_EQ(output_tensor_shape, output_shape); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/arm/string/normalize.cc b/mindspore/lite/test/ut/src/runtime/kernel/arm/string/normalize.cc index ddec37f15a..8f68599842 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/arm/string/normalize.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/arm/string/normalize.cc @@ -65,7 +65,7 @@ TEST_F(TestNormalize, TestSentence) { ASSERT_EQ(lite::RET_OK, ctx_.Init()); creator_ = lite::KernelRegistry::GetInstance()->GetCreator(desc_); ASSERT_NE(creator_, nullptr); - kernel_ = creator_(inputs_, outputs_, &parameter_, &ctx_, desc_, nullptr); + kernel_ = creator_(inputs_, outputs_, &parameter_, &ctx_, desc_); ASSERT_NE(kernel_, nullptr); auto ret = kernel_->Init(); ASSERT_EQ(ret, 0); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/argminmax_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/argminmax_tests.cc index 540eb0d6b1..171aa1562f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/argminmax_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/argminmax_tests.cc @@ -21,8 +21,8 @@ namespace mindspore::lite::opencl::test { class TestOpenCL_ArgMinMax : public CommonTest {}; namespace { -// PrimitiveType_ArgMin: src/ops/populate/argmin_populate.cc -// PrimitiveType_ArgMax: src/ops/populate/argmax_populate.cc +// PrimitiveType_ArgMinFusion: src/ops/populate/argmin_populate.cc +// PrimitiveType_ArgMaxFusion: src/ops/populate/argmax_populate.cc OpParameter *CreateParameter(schema::PrimitiveType type, int axis, int topk, bool out_value, bool keep_dims = false, int axis_type = 0) { auto *param = test::CreateParameter<ArgMinMaxParameter>(type); @@ -36,7 +36,7 @@ OpParameter *CreateParameter(schema::PrimitiveType type, int axis, int topk, boo } // namespace TEST_F(TestOpenCL_ArgMinMax, axis0topk2index) { - schema::PrimitiveType type = schema::PrimitiveType_ArgMax; + schema::PrimitiveType type = schema::PrimitiveType_ArgMaxFusion; int axis = 0; int topk = 2; bool out_value = false; @@ -51,7 +51,7 @@ TEST_F(TestOpenCL_ArgMinMax, axis0topk2index) { } TEST_F(TestOpenCL_ArgMinMax, axis0topk2value) { - schema::PrimitiveType type = schema::PrimitiveType_ArgMax; + schema::PrimitiveType type = schema::PrimitiveType_ArgMaxFusion; int axis = 0; int topk = 2; bool out_value = true; @@ -66,7 +66,7 @@ TEST_F(TestOpenCL_ArgMinMax, axis0topk2value) { } TEST_F(TestOpenCL_ArgMinMax, axis1topk2index) { - schema::PrimitiveType type = schema::PrimitiveType_ArgMax; + schema::PrimitiveType type = schema::PrimitiveType_ArgMaxFusion; int axis = 1; int topk = 2; bool out_value = false; @@ -82,7 +82,7 @@ TEST_F(TestOpenCL_ArgMinMax, axis1topk2index) { } TEST_F(TestOpenCL_ArgMinMax, axis1topk2value) { - schema::PrimitiveType type = schema::PrimitiveType_ArgMax; + schema::PrimitiveType type = schema::PrimitiveType_ArgMaxFusion; int axis = 1; int topk = 2; bool out_value = true; @@ -99,7 +99,7 @@ TEST_F(TestOpenCL_ArgMinMax, axis1topk2value) { } TEST_F(TestOpenCL_ArgMinMax, axis2topk1index) { - schema::PrimitiveType type = schema::PrimitiveType_ArgMax; + schema::PrimitiveType type = schema::PrimitiveType_ArgMaxFusion; int axis = 2; int topk = 1; bool out_value = false; @@ -116,7 +116,7 @@ TEST_F(TestOpenCL_ArgMinMax, axis2topk1index) { } TEST_F(TestOpenCL_ArgMinMax, axis2topk2value) { - schema::PrimitiveType type = schema::PrimitiveType_ArgMax; + schema::PrimitiveType type = schema::PrimitiveType_ArgMaxFusion; int axis = 2; int topk = 2; bool out_value = true; @@ -134,7 +134,7 @@ TEST_F(TestOpenCL_ArgMinMax, axis2topk2value) { } TEST_F(TestOpenCL_ArgMinMax, axis2topk2index) { - schema::PrimitiveType type = schema::PrimitiveType_ArgMax; + schema::PrimitiveType type = schema::PrimitiveType_ArgMaxFusion; int axis = 2; int topk = 2; bool out_value = false; @@ -152,7 +152,7 @@ TEST_F(TestOpenCL_ArgMinMax, axis2topk2index) { } TEST_F(TestOpenCL_ArgMinMax, axis3topk2index) { - schema::PrimitiveType type = schema::PrimitiveType_ArgMax; + schema::PrimitiveType type = schema::PrimitiveType_ArgMaxFusion; int axis = 3; int topk = 2; bool out_value = false; @@ -169,7 +169,7 @@ TEST_F(TestOpenCL_ArgMinMax, axis3topk2index) { } TEST_F(TestOpenCL_ArgMinMax, axis3topk2value) { - schema::PrimitiveType type = schema::PrimitiveType_ArgMax; + schema::PrimitiveType type = schema::PrimitiveType_ArgMaxFusion; int axis = 3; int topk = 2; bool out_value = true; @@ -186,7 +186,7 @@ TEST_F(TestOpenCL_ArgMinMax, axis3topk2value) { } } TEST_F(TestOpenCL_ArgMinMax, dim32axis1topk1index) { - schema::PrimitiveType type = schema::PrimitiveType_ArgMax; + schema::PrimitiveType type = schema::PrimitiveType_ArgMaxFusion; int axis = 1; int topk = 1; bool out_value = false; @@ -201,7 +201,7 @@ TEST_F(TestOpenCL_ArgMinMax, dim32axis1topk1index) { } } TEST_F(TestOpenCL_ArgMinMax, dim43axis2topk1index) { - schema::PrimitiveType type = schema::PrimitiveType_ArgMax; + schema::PrimitiveType type = schema::PrimitiveType_ArgMaxFusion; int axis = 2; int topk = 1; bool out_value = false; @@ -220,7 +220,7 @@ TEST_F(TestOpenCL_ArgMinMax, dim43axis2topk1index) { } } TEST_F(TestOpenCL_ArgMinMax, dim21axis2topk1index) { - schema::PrimitiveType type = schema::PrimitiveType_ArgMax; + schema::PrimitiveType type = schema::PrimitiveType_ArgMaxFusion; int axis = 0; int topk = 1; bool out_value = false; @@ -235,7 +235,7 @@ TEST_F(TestOpenCL_ArgMinMax, dim21axis2topk1index) { } } TEST_F(TestOpenCL_ArgMinMax, dim10axis2topk1index) { - schema::PrimitiveType type = schema::PrimitiveType_ArgMax; + schema::PrimitiveType type = schema::PrimitiveType_ArgMaxFusion; int axis = 0; int topk = 1; bool out_value = false; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc index 703c43aa33..023790828a 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/arithmetic_tests.cc @@ -64,7 +64,7 @@ TEST_F(TestOpenCL_Arithmetic, ElementwiseAdd) { float output_data[] = {2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24}; for (auto fp16_enable : {false, true}) { - auto *param = CreateParameter(schema::PrimitiveType_Add, input0_shape, input1_shape); + auto *param = CreateParameter(schema::PrimitiveType_AddFusion, input0_shape, input1_shape); TestMain({{input0_shape, input0_data, VAR}, {input1_shape, input1_data, CONST_TENSOR}}, {output_shape, output_data}, param, fp16_enable); } @@ -78,7 +78,7 @@ TEST_F(TestOpenCL_Arithmetic, ScalarMul) { float input1_data[] = {2}; float output_data[] = {2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24}; for (auto fp16_enable : {false, true}) { - auto *param = CreateParameter(schema::PrimitiveType_Mul, input0_shape, input1_shape); + auto *param = CreateParameter(schema::PrimitiveType_MulFusion, input0_shape, input1_shape); TestMain({{input0_shape, input0_data, VAR}, {input1_shape, input1_data, CONST_TENSOR}}, {output_shape, output_data}, param, fp16_enable); } @@ -92,7 +92,8 @@ TEST_F(TestOpenCL_Arithmetic, BroadcastSubReLU6) { float input1_data[] = {1, 2, 3}; float output_data[] = {0, 0, 0, 3, 3, 3, 6, 6, 6, 6, 6, 6}; for (auto fp16_enable : {false, true}) { - auto *param = CreateParameter(schema::PrimitiveType_Sub, input0_shape, input1_shape, schema::ActivationType_RELU6); + auto *param = + CreateParameter(schema::PrimitiveType_SubFusion, input0_shape, input1_shape, schema::ActivationType_RELU6); TestMain({{input0_shape, input0_data, VAR}, {input1_shape, input1_data, CONST_TENSOR}}, {output_shape, output_data}, param, fp16_enable); } @@ -106,7 +107,7 @@ TEST_F(TestOpenCL_Arithmetic, BroadcastSub2) { float input1_data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; float output_data[] = {0, 0, 0, -3, -3, -3, -6, -6, -6, -9, -9, -9}; for (auto fp16_enable : {false, true}) { - auto *param = CreateParameter(schema::PrimitiveType_Sub, input0_shape, input1_shape); + auto *param = CreateParameter(schema::PrimitiveType_SubFusion, input0_shape, input1_shape); TestMain({{input0_shape, input0_data, VAR}, {input1_shape, input1_data, CONST_TENSOR}}, {output_shape, output_data}, param, fp16_enable); } @@ -120,7 +121,7 @@ TEST_F(TestOpenCL_Arithmetic, BroadcastSub3) { float input1_data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; float output_data[] = {0, 0, 0, -3, -3, -3, -6, -6, -6, -9, -9, -9, 0, 0, 0, -3, -3, -3, -6, -6, -6, -9, -9, -9}; for (auto fp16_enable : {false, true}) { - auto *param = CreateParameter(schema::PrimitiveType_Sub, input0_shape, input1_shape); + auto *param = CreateParameter(schema::PrimitiveType_SubFusion, input0_shape, input1_shape); TestMain({{input0_shape, input0_data, VAR}, {input1_shape, input1_data, CONST_TENSOR}}, {output_shape, output_data}, param, fp16_enable); } @@ -202,7 +203,7 @@ TEST_F(TestOpenCL_Arithmetic, ElementwiseDiv) { float input1_data[] = {1, 1, 1, 2, 2, 2, 1, 1, 1, 2, 2, 2}; float output_data[] = {1, 2, 3, 2, 2.5, 3, 7, 8, 9, 5, 5.5, 6}; for (auto fp16_enable : {false, true}) { - auto *param = CreateParameter(schema::PrimitiveType_Div, input0_shape, input1_shape); + auto *param = CreateParameter(schema::PrimitiveType_DivFusion, input0_shape, input1_shape); TestMain({{input0_shape, input0_data, VAR}, {input1_shape, input1_data, CONST_TENSOR}}, {output_shape, output_data}, param, fp16_enable); } diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/cast_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/cast_tests.cc index 917a618abe..3ba76fd8cb 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/cast_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/cast_tests.cc @@ -72,8 +72,8 @@ TEST_F(TestCastSelfOpenCL, Castfp32tofp16) { std::vector<lite::Tensor *> inputs{input_tensor}; std::vector<lite::Tensor *> outputs{output_tensor}; - auto *cast_kernel = new (std::nothrow) - kernel::CastOpenCLKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs, nullptr, nullptr); + auto *cast_kernel = + new (std::nothrow) kernel::CastOpenCLKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs, nullptr); if (cast_kernel == nullptr) { MS_LOG(INFO) << " new kernel::CastOpenCLKernel failed "; for (auto tensor : inputs) { @@ -158,8 +158,8 @@ TEST_F(TestCastSelfOpenCL, Castfp16tofp32) { std::vector<lite::Tensor *> inputs{input_tensor}; std::vector<lite::Tensor *> outputs{output_tensor}; - auto *cast_kernel = new (std::nothrow) - kernel::CastOpenCLKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs, nullptr, nullptr); + auto *cast_kernel = + new (std::nothrow) kernel::CastOpenCLKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs, nullptr); if (cast_kernel == nullptr) { MS_LOG(INFO) << " new kernel::CastOpenCLKernel failed "; for (auto tensor : inputs) { diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/common.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/common.cc index fd554ebc11..79d12609da 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/common.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/common.cc @@ -19,6 +19,7 @@ #include "src/kernel_registry.h" #include "src/runtime/kernel/opencl/opencl_subgraph.h" #include "nnacl/conv_parameter.h" +#include "schema/model_v0_generated.h" using mindspore::kernel::LiteKernel; using mindspore::kernel::OpenCLSubGraph; @@ -40,9 +41,14 @@ void TestMain(const std::vector<ArgsTuple> &input_infos, const std::vector<ArgsT void TestMain(const std::vector<ArgsTupleWithDtype> &input_infos, const std::vector<ArgsTupleOut> &output_info, OpParameter *op_parameter, bool fp16_enable, float atol, float rtol, bool print_data) { auto primitive_type = static_cast<schema::PrimitiveType>(op_parameter->type_); - static std::set<schema::PrimitiveType> packed_op = { - schema::PrimitiveType_Conv2D, schema::PrimitiveType_DeConv2D, schema::PrimitiveType_DepthwiseConv2D, - schema::PrimitiveType_DeDepthwiseConv2D, schema::PrimitiveType_MatMul}; +#ifdef ENABLE_V0 + static std::set<int> packed_op = {schema::v0::PrimitiveType_Conv2D, schema::v0::PrimitiveType_DeConv2D, + schema::v0::PrimitiveType_DepthwiseConv2D, + schema::v0::PrimitiveType_DeDepthwiseConv2D, schema::v0::PrimitiveType_MatMul}; +#else + static std::vector<int> packed_ops = {schema::PrimitiveType_Conv2DFusion, schema::PrimitiveType_Conv2dTransposeFusion, + schema::PrimitiveType_MatMul}; +#endif // simulating benchmark: session::LiteSession::CreateSession() -> session->Init() MS_LOG(DEBUG) << "initialize OpenCLRuntime and OpenCLAllocator"; @@ -104,7 +110,7 @@ void TestMain(const std::vector<ArgsTupleWithDtype> &input_infos, const std::vec free(op_parameter); FAIL(); } - auto *kernel = creator(kernel_inputs, outputs, op_parameter, nullptr, key, nullptr); + auto *kernel = creator(kernel_inputs, outputs, op_parameter, nullptr, key); if (kernel == nullptr) { std::cerr << "call registry function error: " << schema::EnumNamePrimitiveType(primitive_type) << std::endl; free(op_parameter); @@ -180,8 +186,8 @@ void TestMain(const std::vector<ArgsTupleWithDtype> &input_infos, std::tuple<std OpParameter *op_parameter, bool fp16_enable, float atol, float rtol, bool print_data) { auto primitive_type = static_cast<schema::PrimitiveType>(op_parameter->type_); static std::set<schema::PrimitiveType> packed_op = { - schema::PrimitiveType_Conv2D, schema::PrimitiveType_DeConv2D, schema::PrimitiveType_DepthwiseConv2D, - schema::PrimitiveType_DeDepthwiseConv2D, schema::PrimitiveType_MatMul}; + schema::PrimitiveType_Conv2DFusion, schema::PrimitiveType_Conv2dTransposeFusion, schema::PrimitiveType_Conv2DFusion, + schema::PrimitiveType_Conv2dTransposeFusion, schema::PrimitiveType_MatMul}; // simulating benchmark: session::LiteSession::CreateSession() -> session->Init() MS_LOG(DEBUG) << "initialize OpenCLRuntime and OpenCLAllocator"; @@ -237,7 +243,7 @@ void TestMain(const std::vector<ArgsTupleWithDtype> &input_infos, std::tuple<std free(op_parameter); FAIL(); } - auto *kernel = creator(kernel_inputs, {&output}, op_parameter, nullptr, key, nullptr); + auto *kernel = creator(kernel_inputs, {&output}, op_parameter, nullptr, key); if (kernel == nullptr) { std::cerr << "call registry function error: " << schema::EnumNamePrimitiveType(primitive_type) << std::endl; free(op_parameter); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_tests.cc index 56b5371292..06854f179d 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_tests.cc @@ -23,7 +23,7 @@ class TestOpenCL_Conv2D : public CommonTest {}; namespace { // PrimitiveType_Conv2D: src/ops/populate/conv2d_populate.cc ConvParameter *CreateParameter(const std::string &attr, ActType act_type) { - auto *param = test::CreateParameter<ConvParameter>(schema::PrimitiveType_Conv2D); + auto *param = test::CreateParameter<ConvParameter>(schema::PrimitiveType_Conv2DFusion); param->act_type_ = act_type; sscanf(attr.c_str(), "inputNHWC_%dx%dx%dx%d_outputNHWC_%dx%dx%dx%d_kernelHW_%dx%d_strideHW_%dx%d_padTopBottomLeftRight_%dx%dx%dx%d_" diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_transpose_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_transpose_tests.cc index 65c4b05937..0377b39e46 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_transpose_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/conv2d_transpose_tests.cc @@ -25,7 +25,7 @@ namespace { OpParameter *CreateParameter(int n, int h, int w, int ci, int co, int kh, int kw, std::vector<int> pad, int oh, int ow, std::vector<int> *input_shape, std::vector<int> *weight_shape, std::vector<int> *bias_shape, std::vector<int> *output_shape) { - auto *param = test::CreateParameter<ConvParameter>(schema::PrimitiveType_DeConv2D); + auto *param = test::CreateParameter<ConvParameter>(schema::PrimitiveType_Conv2dTransposeFusion); param->kernel_h_ = kh; param->kernel_w_ = kw; param->stride_h_ = 2; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/depthwise_conv2d_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/depthwise_conv2d_tests.cc index b9d1344b33..dcb27c114a 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/depthwise_conv2d_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/depthwise_conv2d_tests.cc @@ -24,7 +24,7 @@ namespace { // PrimitiveType_DepthwiseConv2D: src/ops/populate/depthwise_conv2d_populate.cc OpParameter *CreateParameter(int kernel_h, int kernel_w, int stride_h, int stride_w, int pad_u, int pad_d, int pad_l, int pad_r, int dilation_h, int dilation_w, ActType act_type, int input_channel) { - auto *param = test::CreateParameter<ConvParameter>(schema::PrimitiveType_DepthwiseConv2D); + auto *param = test::CreateParameter<ConvParameter>(schema::PrimitiveType_Conv2DFusion); param->kernel_h_ = kernel_h; param->kernel_w_ = kernel_w; param->stride_h_ = stride_h; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/fill_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/fill_tests.cc index 4ad5ddd18b..523b3f515b 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/fill_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/fill_tests.cc @@ -59,8 +59,8 @@ TEST_F(TestFillOpenCLCI, Fp32testfill) { return; } - auto *fill_kernel = new (std::nothrow) - kernel::FillOpenCLKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs, nullptr, nullptr); + auto *fill_kernel = + new (std::nothrow) kernel::FillOpenCLKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs, nullptr); if (fill_kernel == nullptr) { MS_LOG(INFO) << " new kernel::FillOpenCLKernel failed "; delete param; @@ -115,8 +115,8 @@ TEST_F(TestFillOpenCLCI, Fp32testshape) { return; } - auto *fill_kernel = new (std::nothrow) - kernel::FillOpenCLKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs, nullptr, nullptr); + auto *fill_kernel = + new (std::nothrow) kernel::FillOpenCLKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs, nullptr); if (fill_kernel == nullptr) { MS_LOG(INFO) << " new kernel::FillOpenCLKernel failed "; delete param; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/layer_norm_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/layer_norm_tests.cc index 634fb84489..88cdbe1317 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/layer_norm_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/layer_norm_tests.cc @@ -23,7 +23,7 @@ class TestOpenCL_LayerNorm : public CommonTest {}; namespace { // PrimitiveType_Stack: src/ops/populate/stack_populate.cc OpParameter *CreateParameter(float epsilon, int begin_norm_axis_, int begin_param_axis_) { - auto *param = test::CreateParameter<LayerNormParameter>(schema::PrimitiveType_LayerNorm); + auto *param = test::CreateParameter<LayerNormParameter>(schema::PrimitiveType_LayerNormFusion); param->epsilon_ = epsilon; param->begin_norm_axis_ = begin_norm_axis_; param->begin_params_axis_ = begin_param_axis_; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/pad_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/pad_tests.cc index f22ba50b70..12d06b34b0 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/pad_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/pad_tests.cc @@ -23,7 +23,7 @@ class TestOpenCL_Pad : public CommonTest {}; namespace { // PrimitiveType_Pad: src/ops/populate/pad_populate.cc OpParameter *CreateParameter(const std::vector<int> &paddings, float constant_value) { - auto *param = test::CreateParameter<PadParameter>(schema::PrimitiveType_Pad); + auto *param = test::CreateParameter<PadParameter>(schema::PrimitiveType_PadFusion); param->pad_mode_ = schema::PaddingMode_CONSTANT; param->constant_value_ = constant_value; param->padding_length = MAX_PAD_SIZE; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/pooling_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/pooling_tests.cc index 9fd3991f6f..2141e086f4 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/pooling_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/pooling_tests.cc @@ -25,7 +25,7 @@ namespace { OpParameter *CreateParameter(PoolMode pool_mode, int window_h, int window_w, int stride_h, int stride_w, int pad_u, int pad_d, int pad_l, int pad_r, RoundMode round_mode = RoundMode_No, ActType act_type = ActType_No) { - auto *param = test::CreateParameter<PoolingParameter>(schema::PrimitiveType_Pooling); + auto *param = test::CreateParameter<PoolingParameter>(schema::PrimitiveType_MaxPoolFusion); param->global_ = false; param->window_w_ = window_w; param->window_h_ = window_h; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/power_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/power_tests.cc index e4b55a67b3..0e230d8c0f 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/power_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/power_tests.cc @@ -16,7 +16,7 @@ #include "ut/src/runtime/kernel/opencl/common.h" #include "mindspore/lite/src/runtime/kernel/opencl/kernel/power.h" -// PrimitiveType_Power: src/ops/populate/power_populate.cc +// PrimitiveType_PowFusion: src/ops/populate/power_populate.cc using mindspore::lite::Tensor; using mindspore::schema::Format::Format_NHWC; @@ -27,7 +27,7 @@ class TestPowerOpenCLCI : public CommonTest { }; // PrimitiveType_Concat: src/ops/populate/concat_populate.cc OpParameter *CreateParameter(bool broadcast_, float shift_, float scale_, float power_ = 2) { - auto *param = test::CreateParameter<PowerParameter>(schema::PrimitiveType_Power); + auto *param = test::CreateParameter<PowerParameter>(schema::PrimitiveType_PowFusion); param->power_ = power_; param->broadcast_ = broadcast_; param->shift_ = shift_; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/prelu_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/prelu_tests.cc index 3c612ab027..dd25e8b8a1 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/prelu_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/prelu_tests.cc @@ -23,7 +23,7 @@ class TestOpenCL_PRrelu : public CommonTest {}; namespace { // PrimitiveType_PReLU: src/ops/populate/p_relu_populate.cc OpParameter *CreateParameter() { - auto *param = test::CreateParameter<PReluParameter>(schema::PrimitiveType_PReLU); + auto *param = test::CreateParameter<PReluParameter>(schema::PrimitiveType_PReLUFusion); return reinterpret_cast<OpParameter *>(param); } } // namespace diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/reduce_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/reduce_tests.cc index 05d10ca76f..0831f160b1 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/reduce_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/reduce_tests.cc @@ -24,7 +24,7 @@ namespace { // PrimitiveType_Reduce: src/ops/populate/reduce_populate.cc // PrimitiveType_Mean: src/ops/populate/mean_populate.cc OpParameter *CreateParameter(const std::vector<int> &axis, schema::ReduceMode mode, bool keep_dims) { - auto *param = test::CreateParameter<ReduceParameter>(schema::PrimitiveType_Reduce); + auto *param = test::CreateParameter<ReduceParameter>(schema::PrimitiveType_ReduceFusion); param->keep_dims_ = keep_dims; param->reduce_to_end_ = false; param->coeff = 0.f; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/scale_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/scale_tests.cc index aeb9ef7b68..dbd10f2a12 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/scale_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/scale_tests.cc @@ -23,7 +23,7 @@ class TestOpenCL_Scale : public CommonTest {}; namespace { // PrimitiveType_Resize: src/ops/populate/scale_populate.cc OpParameter *CreateParameter(int axis, int activation_type = schema::ActivationType_NO_ACTIVATION) { - auto *param = test::CreateParameter<ScaleParameter>(schema::PrimitiveType_Scale); + auto *param = test::CreateParameter<ScaleParameter>(schema::PrimitiveType_ScaleFusion); param->axis_ = axis; param->activation_type_ = activation_type; return reinterpret_cast<OpParameter *>(param); diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/slice_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/slice_tests.cc index d87cc1dbcd..6ce0819597 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/slice_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/slice_tests.cc @@ -23,7 +23,7 @@ class TestOpenCL_Slice : public CommonTest {}; namespace { // PrimitiveType_Slice: src/ops/populate/slice_populate.cc OpParameter *CreateParameter(const std::vector<int> &begin, const std::vector<int> &size) { - auto *param = test::CreateParameter<SliceParameter>(schema::PrimitiveType_Slice); + auto *param = test::CreateParameter<SliceParameter>(schema::PrimitiveType_SliceFusion); param->param_length_ = begin.size(); for (int i = 0; i < begin.size(); ++i) { param->begin_[i] = begin[i]; diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/softmax_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/softmax_tests.cc index b696111e3b..b1ff2d5039 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/softmax_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/softmax_tests.cc @@ -23,7 +23,7 @@ class TestOpenCL_SoftMax : public CommonTest {}; namespace { // PrimitiveType_SoftMax: src/ops/populate/softmax_populate.cc OpParameter *CreateParameter(int axis) { - auto *param = test::CreateParameter<SoftmaxParameter>(schema::PrimitiveType_SoftMax); + auto *param = test::CreateParameter<SoftmaxParameter>(schema::PrimitiveType_Softmax); param->axis_ = axis; return reinterpret_cast<OpParameter *>(param); } diff --git a/mindspore/lite/test/ut/src/runtime/kernel/opencl/to_format_tests.cc b/mindspore/lite/test/ut/src/runtime/kernel/opencl/to_format_tests.cc index d085279637..898ae7b990 100644 --- a/mindspore/lite/test/ut/src/runtime/kernel/opencl/to_format_tests.cc +++ b/mindspore/lite/test/ut/src/runtime/kernel/opencl/to_format_tests.cc @@ -57,7 +57,7 @@ TEST_F(TestToFormatOpenCL, ToFormatNHWC2NCHW) { } std::vector<lite::Tensor *> inputs{tensor_x}; std::vector<lite::Tensor *> outputs{tensor_out}; - auto arith_kernel_ptr = std::make_unique<kernel::ToFormatOpenCLKernel>(nullptr, inputs, outputs, nullptr, nullptr); + auto arith_kernel_ptr = std::make_unique<kernel::ToFormatOpenCLKernel>(nullptr, inputs, outputs, nullptr); auto arith_kernel = arith_kernel_ptr.get(); if (arith_kernel == nullptr) { MS_LOG(ERROR) << "arith_kernel create error."; diff --git a/mindspore/lite/test/ut/src/scheduler_test.cc b/mindspore/lite/test/ut/src/scheduler_test.cc index 7514d2ed1a..f1c499616f 100644 --- a/mindspore/lite/test/ut/src/scheduler_test.cc +++ b/mindspore/lite/test/ut/src/scheduler_test.cc @@ -24,7 +24,6 @@ using mindspore::kernel::KernelKey; using mindspore::kernel::LiteKernel; using mindspore::lite::InnerContext; using mindspore::lite::LiteSession; -using mindspore::lite::PrimitiveC; using mindspore::lite::Tensor; using mindspore::schema::PrimitiveType_Abs; using mindspore::TypeId::kNumberTypeFloat32; @@ -45,8 +44,8 @@ TEST_F(SchedulerTest, TestConstructSubGraphsTwoBranch) { split->primitive = std::make_unique<mindspore::schema::PrimitiveT>(); split->primitive->value.type = mindspore::schema::PrimitiveType_Split; auto primitive = new mindspore::schema::SplitT; - primitive->numberSplit = 2; - primitive->splitDim = 3; + primitive->output_num = 2; + primitive->axis = 3; split->primitive->value.value = primitive; split->name = "split"; @@ -64,7 +63,7 @@ TEST_F(SchedulerTest, TestConstructSubGraphsTwoBranch) { cons1->outputIndex = {4}; cons1->primitive = std::make_unique<mindspore::schema::PrimitiveT>(); cons1->primitive->value.type = mindspore::schema::PrimitiveType_Cos; - auto cons1_primitive = new mindspore::schema::AsinT; + auto cons1_primitive = new mindspore::schema::CosT; cons1->primitive->value.value = cons1_primitive; cons1->name = "cpu1"; @@ -82,7 +81,7 @@ TEST_F(SchedulerTest, TestConstructSubGraphsTwoBranch) { cons2->outputIndex = {6}; cons2->primitive = std::make_unique<mindspore::schema::PrimitiveT>(); cons2->primitive->value.type = mindspore::schema::PrimitiveType_Cos; - auto cons2_primitive = new mindspore::schema::AsinT; + auto cons2_primitive = new mindspore::schema::CosT; cons2->primitive->value.value = cons2_primitive; cons2->name = "cpu2"; @@ -188,8 +187,8 @@ TEST_F(SchedulerTest, TestConstructSubGraphsThreeBranch) { split->primitive = std::make_unique<mindspore::schema::PrimitiveT>(); split->primitive->value.type = mindspore::schema::PrimitiveType_Split; auto primitive = new mindspore::schema::SplitT; - primitive->numberSplit = 3; - primitive->splitDim = 3; + primitive->output_num = 3; + primitive->axis = 3; split->primitive->value.value = primitive; split->name = "split"; @@ -216,7 +215,7 @@ TEST_F(SchedulerTest, TestConstructSubGraphsThreeBranch) { cons1->outputIndex = {6}; cons1->primitive = std::make_unique<mindspore::schema::PrimitiveT>(); cons1->primitive->value.type = mindspore::schema::PrimitiveType_Cos; - auto cons1_primitive = new mindspore::schema::AsinT; + auto cons1_primitive = new mindspore::schema::CosT; cons1->primitive->value.value = cons1_primitive; cons1->name = "cpu1"; @@ -243,7 +242,7 @@ TEST_F(SchedulerTest, TestConstructSubGraphsThreeBranch) { cons2->outputIndex = {9}; cons2->primitive = std::make_unique<mindspore::schema::PrimitiveT>(); cons2->primitive->value.type = mindspore::schema::PrimitiveType_Cos; - auto cons2_primitive = new mindspore::schema::AsinT; + auto cons2_primitive = new mindspore::schema::CosT; cons2->primitive->value.value = cons2_primitive; cons2->name = "cpu2"; diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc index dda48616bf..dd5ee4f817 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_activation_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -34,7 +34,7 @@ TEST_F(TestTfliteParserRelu, OpType) { TEST_F(TestTfliteParserRelu, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsActivation(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsActivation(); - ASSERT_EQ(val->type, schema::ActivationType_RELU); + ASSERT_EQ(val->activation_type, schema::ActivationType_RELU); } class TestTfliteParserRelu6 : public TestTfliteParser { @@ -52,7 +52,7 @@ TEST_F(TestTfliteParserRelu6, OpType) { TEST_F(TestTfliteParserRelu6, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsActivation(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsActivation(); - ASSERT_EQ(val->type, schema::ActivationType_RELU6); + ASSERT_EQ(val->activation_type, schema::ActivationType_RELU6); } class TestTfliteParserTanh : public TestTfliteParser { @@ -70,7 +70,7 @@ TEST_F(TestTfliteParserTanh, OpType) { TEST_F(TestTfliteParserTanh, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsActivation(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsActivation(); - ASSERT_EQ(val->type, schema::ActivationType_TANH); + ASSERT_EQ(val->activation_type, schema::ActivationType_TANH); } class TestTfliteParserLogistic : public TestTfliteParser { @@ -87,7 +87,7 @@ TEST_F(TestTfliteParserLogistic, OpType) { TEST_F(TestTfliteParserLogistic, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsActivation(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsActivation(); - ASSERT_EQ(val->type, schema::ActivationType_SIGMOID); + ASSERT_EQ(val->activation_type, schema::ActivationType_SIGMOID); } class TestTfliteParserHardSwish : public TestTfliteParser { @@ -104,7 +104,7 @@ TEST_F(TestTfliteParserHardSwish, OpType) { TEST_F(TestTfliteParserHardSwish, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsActivation(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsActivation(); - ASSERT_EQ(val->type, schema::ActivationType_SIGMOID); + ASSERT_EQ(val->activation_type, schema::ActivationType_SIGMOID); } class TestTfliteParserPrelu : public TestTfliteParser { @@ -128,14 +128,14 @@ class TestTfliteParserLeakyRelu : public TestTfliteParser { TEST_F(TestTfliteParserLeakyRelu, OpType) { ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_LeakyReLU) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_LeakyRelu) << "wrong Op Type"; } TEST_F(TestTfliteParserLeakyRelu, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsLeakyReLU(), nullptr); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsLeakyRelu(), nullptr); auto val = meta_graph->nodes.front()->primitive->value; - ASSERT_EQ(val.AsLeakyReLU()->negativeSlope, 0.20000000298023224); - ASSERT_EQ(val.type, schema::PrimitiveType_LeakyReLU); + ASSERT_EQ(val.AsLeakyRelu()->negative_slope, 0.20000000298023224); + ASSERT_EQ(val.type, schema::PrimitiveType_LeakyRelu); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_argmax_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_argmax_parser_test.cc index 465930039d..f6d5090c12 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_argmax_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_argmax_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,17 +28,16 @@ TEST_F(TestTfliteParserArgmax, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_ArgMax) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_ArgMaxFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserArgmax, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsArgMax(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsArgMax(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsArgMaxFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsArgMaxFusion(); ASSERT_EQ(val->axis, 1); - ASSERT_EQ(val->topK, 1); - ASSERT_EQ(val->axisType, 1); - ASSERT_EQ(val->keepDims, false); - ASSERT_EQ(val->outMaxValue, false); + ASSERT_EQ(val->top_k, 1); + ASSERT_EQ(val->keep_dims, false); + ASSERT_EQ(val->out_max_value, false); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_argmin_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_argmin_parser_test.cc index 03f9e1c1bf..0173e72dd8 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_argmin_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_argmin_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,17 +28,16 @@ TEST_F(TestTfliteParserArgmin, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_ArgMin) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_ArgMinFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserArgmin, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsArgMin(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsArgMin(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsArgMinFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsArgMinFusion(); ASSERT_EQ(val->axis, 1); - ASSERT_EQ(val->topK, 1); - ASSERT_EQ(val->axisType, 1); - ASSERT_EQ(val->keepDims, false); - ASSERT_EQ(val->outMaxValue, false); + ASSERT_EQ(val->top_k, 1); + ASSERT_EQ(val->keep_dims, false); + ASSERT_EQ(val->out_max_value, false); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_arithmetic_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_arithmetic_parser_test.cc index 70c032f71c..06b8e480f9 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_arithmetic_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_arithmetic_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ TEST_F(TestTfliteParserAdd, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Add) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_AddFusion) << "wrong Op Type"; } class TestTfliteParserSub : public TestTfliteParser { @@ -42,7 +42,7 @@ TEST_F(TestTfliteParserSub, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Sub) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_SubFusion) << "wrong Op Type"; } class TestTfliteParserMul : public TestTfliteParser { @@ -55,7 +55,7 @@ TEST_F(TestTfliteParserMul, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Mul) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_MulFusion) << "wrong Op Type"; } class TestTfliteParserDiv : public TestTfliteParser { @@ -68,7 +68,7 @@ TEST_F(TestTfliteParserDiv, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Div) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_DivFusion) << "wrong Op Type"; } class TestTfliteParserFloorDiv : public TestTfliteParser { public: @@ -106,7 +106,7 @@ TEST_F(TestTfliteParserRealDiv, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Div) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_DivFusion) << "wrong Op Type"; } class TestTfliteParserSquaredDifference : public TestTfliteParser { @@ -133,15 +133,14 @@ TEST_F(TestTfliteParserPow, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Power) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_PowFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserPow, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsPower(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsPower(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsPowFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsPowFusion(); ASSERT_EQ(val->scale, 1.0); ASSERT_EQ(val->shift, 0.0); - ASSERT_EQ(val->power, 0.0); } class TestTfliteParserMaximum : public TestTfliteParser { @@ -194,7 +193,7 @@ TEST_F(TestTfliteParserExp, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Exp) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_ExpFusion) << "wrong Op Type"; } class TestTfliteParserSqrt : public TestTfliteParser { diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_batch_to_space_nd_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_batch_to_space_nd_parser_test.cc index 8091bc6598..8180f0a2a6 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_batch_to_space_nd_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_batch_to_space_nd_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -34,10 +34,8 @@ TEST_F(TestTfliteParserBatchToSpaceNd, OpType) { TEST_F(TestTfliteParserBatchToSpaceNd, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsBatchToSpace(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsBatchToSpace(); - const std::vector<int> blockShape = {2, 2}; - ASSERT_EQ(val->blockShape, blockShape); - const std::vector<int> crops = {0, 0, 2, 0}; - ASSERT_EQ(val->crops, crops); + const std::vector<int64_t> blockShape = {2, 2}; + ASSERT_EQ(val->block_size, blockShape); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_cast_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_cast_parser_test.cc index 5fc7a4e31d..c56604c15e 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_cast_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_cast_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -31,11 +31,4 @@ TEST_F(TestTfliteParserCast, OpType) { ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Cast) << "wrong Op Type"; } - -TEST_F(TestTfliteParserCast, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsCast(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsCast(); - ASSERT_EQ(val->srcT, 43); - ASSERT_EQ(val->dstT, 34); -} } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_conv_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_conv_parser_test.cc index 919972dc8c..676d56e0b4 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_conv_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_conv_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,28 +28,22 @@ TEST_F(TestTfliteParserConv, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Conv2D) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Conv2DFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserConv, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsConv2D(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsConv2D(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsConv2DFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsConv2DFusion(); ASSERT_EQ(val->format, schema::Format_NHWC); ASSERT_EQ(val->group, 1); - ASSERT_EQ(val->activationType, schema::ActivationType_NO_ACTIVATION); - ASSERT_EQ(val->channelIn, 1); - ASSERT_EQ(val->channelOut, 4); - ASSERT_EQ(val->kernelH, 3); - ASSERT_EQ(val->kernelW, 3); - ASSERT_EQ(val->strideH, 1); - ASSERT_EQ(val->strideW, 1); - ASSERT_EQ(val->dilateH, 1); - ASSERT_EQ(val->dilateW, 1); - ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER); - ASSERT_EQ(val->padUp, 1); - ASSERT_EQ(val->padDown, 1); - ASSERT_EQ(val->padLeft, 1); - ASSERT_EQ(val->padRight, 1); + ASSERT_EQ(val->activation_type, schema::ActivationType_NO_ACTIVATION); + ASSERT_EQ(val->in_channel, 1); + ASSERT_EQ(val->out_channel, 4); + ASSERT_EQ(val->kernel_size, (std::vector<int64_t>{3, 3})); + ASSERT_EQ(val->stride, (std::vector<int64_t>{1, 1})); + ASSERT_EQ(val->dilation, (std::vector<int64_t>{1, 1})); + ASSERT_EQ(val->pad_mode, schema::PadMode_SAME); + ASSERT_EQ(val->pad_list, (std::vector<int64_t>{1, 1, 1, 1})); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_deconv_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_deconv_parser_test.cc index 5e34384159..0eaec3aede 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_deconv_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_deconv_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,29 +28,24 @@ TEST_F(TestTfliteParserDeConv, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_DeConv2D) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Conv2dTransposeFusion) + << "wrong Op Type"; } TEST_F(TestTfliteParserDeConv, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsDeConv2D(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsDeConv2D(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsConv2dTransposeFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsConv2dTransposeFusion(); ASSERT_EQ(val->format, schema::Format_NHWC); ASSERT_EQ(val->group, 1); - ASSERT_EQ(val->activationType, schema::ActivationType_NO_ACTIVATION); + ASSERT_EQ(val->activation_type, schema::ActivationType_NO_ACTIVATION); - ASSERT_EQ(val->channelIn, 1); - ASSERT_EQ(val->channelOut, 4); - ASSERT_EQ(val->kernelH, 3); - ASSERT_EQ(val->kernelW, 3); - ASSERT_EQ(val->strideH, 1); - ASSERT_EQ(val->strideW, 1); - ASSERT_EQ(val->dilateH, 1); - ASSERT_EQ(val->dilateW, 1); - ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER); - ASSERT_EQ(val->padUp, 1); - ASSERT_EQ(val->padDown, 1); - ASSERT_EQ(val->padLeft, 1); - ASSERT_EQ(val->padRight, 1); + ASSERT_EQ(val->in_channel, 1); + ASSERT_EQ(val->out_channel, 4); + ASSERT_EQ(val->kernel_size, (std::vector<int64_t>{3, 3})); + ASSERT_EQ(val->stride, (std::vector<int64_t>{1, 1})); + ASSERT_EQ(val->dilation, (std::vector<int64_t>{1, 1})); + ASSERT_EQ(val->pad_mode, schema::PadMode_SAME); + ASSERT_EQ(val->pad_list, (std::vector<int64_t>{1, 1, 1, 1})); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depth_to_space_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depth_to_space_parser_test.cc index b47b4997b6..9b37aaa9c0 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depth_to_space_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depth_to_space_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ TEST_F(TestTfliteParserDepthToSpace, OpType) { TEST_F(TestTfliteParserDepthToSpace, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsDepthToSpace(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsDepthToSpace(); - ASSERT_EQ(val->blockSize, 4); + ASSERT_EQ(val->block_size, 4); ASSERT_EQ(val->format, schema::Format_NHWC); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depthwise_conv_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depthwise_conv_parser_test.cc index b6efbb3121..a2c91c98ac 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depthwise_conv_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_depthwise_conv_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,28 +28,22 @@ TEST_F(TestTfliteParserDepthwiseConv1, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Conv2D) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Conv2DFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserDepthwiseConv1, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsConv2D(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsConv2D(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsConv2DFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsConv2DFusion(); ASSERT_EQ(val->format, schema::Format_NHWC); ASSERT_EQ(val->group, 0); - ASSERT_EQ(val->activationType, schema::ActivationType_NO_ACTIVATION); - ASSERT_EQ(val->channelIn, 1); - ASSERT_EQ(val->channelOut, 4); - ASSERT_EQ(val->kernelH, 3); - ASSERT_EQ(val->kernelW, 3); - ASSERT_EQ(val->strideH, 1); - ASSERT_EQ(val->strideW, 1); - ASSERT_EQ(val->dilateH, 1); - ASSERT_EQ(val->dilateW, 1); - ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER); - ASSERT_EQ(val->padUp, 1); - ASSERT_EQ(val->padDown, 1); - ASSERT_EQ(val->padLeft, 1); - ASSERT_EQ(val->padRight, 1); + ASSERT_EQ(val->activation_type, schema::ActivationType_NO_ACTIVATION); + ASSERT_EQ(val->in_channel, 1); + ASSERT_EQ(val->out_channel, 4); + ASSERT_EQ(val->kernel_size, (std::vector<int64_t>{3, 3})); + ASSERT_EQ(val->stride, (std::vector<int64_t>{1, 1})); + ASSERT_EQ(val->dilation, (std::vector<int64_t>{1, 1})); + ASSERT_EQ(val->pad_mode, schema::PadMode_SAME); + ASSERT_EQ(val->pad_list, (std::vector<int64_t>{1, 1, 1, 1})); } class TestTfliteParserDepthwiseConv2 : public TestTfliteParser { @@ -62,27 +56,20 @@ TEST_F(TestTfliteParserDepthwiseConv2, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_DepthwiseConv2D) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Conv2DFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserDepthwiseConv2, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsDepthwiseConv2D(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsDepthwiseConv2D(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsConv2DFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsConv2DFusion(); ASSERT_EQ(val->format, schema::Format_NHWC); - ASSERT_EQ(val->activationType, schema::ActivationType_NO_ACTIVATION); - ASSERT_EQ(val->channelIn, 2); - ASSERT_EQ(val->channelMultiplier, 1); - ASSERT_EQ(val->kernelH, 3); - ASSERT_EQ(val->kernelW, 3); - ASSERT_EQ(val->strideH, 1); - ASSERT_EQ(val->strideW, 1); - ASSERT_EQ(val->dilateH, 1); - ASSERT_EQ(val->dilateW, 1); - ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER); - ASSERT_EQ(val->padUp, 1); - ASSERT_EQ(val->padDown, 1); - ASSERT_EQ(val->padLeft, 1); - ASSERT_EQ(val->padRight, 1); + ASSERT_EQ(val->activation_type, schema::ActivationType_NO_ACTIVATION); + ASSERT_EQ(val->in_channel, 2); + ASSERT_EQ(val->kernel_size, (std::vector<int64_t>{3, 3})); + ASSERT_EQ(val->stride, (std::vector<int64_t>{1, 1})); + ASSERT_EQ(val->dilation, (std::vector<int64_t>{1, 1})); + ASSERT_EQ(val->pad_mode, schema::PadMode_SAME); + ASSERT_EQ(val->pad_list, (std::vector<int64_t>{1, 1, 1, 1})); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_fill_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_fill_parser_test.cc index eae2d770d8..e29bd236d5 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_fill_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_fill_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -30,11 +30,4 @@ TEST_F(TestTfliteParserFill, OpType) { ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Fill) << "wrong Op Type"; } - -TEST_F(TestTfliteParserFill, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsFill(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsFill(); - std::vector<int32_t> dims = {9}; - ASSERT_EQ(val->dims, dims); -} } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_gather_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_gather_parser_test.cc index 071738a15a..aaf3664819 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_gather_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_gather_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -30,12 +30,4 @@ TEST_F(TestTfliteParserGather, OpType) { ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Gather) << "wrong Op Type"; } - -TEST_F(TestTfliteParserGather, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsGather(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsGather(); - ASSERT_EQ(val->axis, 0); - ASSERT_EQ(val->batchDims, 0); -} - } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_l2norm_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_l2norm_parser_test.cc index 675d8f82a7..bc2fad9bd9 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_l2norm_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_l2norm_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,14 +28,15 @@ TEST_F(TestTfliteParserL2Norm, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_L2Norm) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_L2NormalizeFusion) + << "wrong Op Type"; } TEST_F(TestTfliteParserL2Norm, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsL2Norm(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsL2Norm(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsL2NormalizeFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsL2NormalizeFusion(); ASSERT_EQ(val->epsilon, 0.0); - std::vector<int32_t> axis = {0, 1, 2, 3}; + std::vector<int64_t> axis = {0, 1, 2, 3}; ASSERT_EQ(val->axis, axis); } diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_lrn_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_lrn_parser_test.cc index 6451cec50f..101dfc366d 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_lrn_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_lrn_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,13 +28,12 @@ TEST_F(TestTfliteParserLRN, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_LocalResponseNormalization) - << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_LRN) << "wrong Op Type"; } TEST_F(TestTfliteParserLRN, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsLocalResponseNormalization(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsLocalResponseNormalization(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsLRN(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsLRN(); ASSERT_EQ(val->alpha, 1); ASSERT_EQ(val->beta, 0.5); ASSERT_EQ(val->bias, 1); diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pad_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pad_parser_test.cc index 1d33e1a8fc..add7c5823a 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pad_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pad_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,14 +28,11 @@ TEST_F(TestTfliteParserPad, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Pad) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_PadFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserPad, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsPad(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsPad(); - std::vector<int32_t> paddings = {1, 1, 2, 2, 3, 3, 4, 4}; - ASSERT_EQ(val->paddings, paddings); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsPadFusion(), nullptr); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.cc index 204b1c550d..e7002674a6 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_parsers_test_utils.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,18 +17,11 @@ #include "ut/tools/converter/parser/tflite/tflite_parsers_test_utils.h" #include <string> #include "schema/inner/model_generated.h" -#include "tools/converter/parser/tflite/tflite_model_parser.h" namespace mindspore { -schema::MetaGraphT *TestTfliteParser::LoadAndConvert(const string &model_path, const string &weight_path) { - lite::TfliteModelParser parser; - meta_graph = parser.ParseToFb(model_path, weight_path, schema::QuantType_QUANT_NONE); - if (meta_graph == nullptr) { - MS_LOG(ERROR) << "Parse to metaGraph return nullptr"; - return nullptr; - } - return meta_graph; +schema::MetaGraphT *TestTfliteParser::LoadAndConvert(const std::string &model_path, const std::string &weight_path) { + return nullptr; } void TestTfliteParser::TearDown() { free(meta_graph); } diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc index 337c24f4f8..93e3fa27d6 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_pooling_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,25 +29,19 @@ TEST_F(TestTfliteParserMaxPooling, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Pooling) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_MaxPoolFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserMaxPooling, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsPooling(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsPooling(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsMaxPoolFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsMaxPoolFusion(); ASSERT_EQ(val->format, schema::Format_NHWC); - ASSERT_EQ(val->poolingMode, schema::PoolMode_MAX_POOLING); ASSERT_EQ(val->global, false); - ASSERT_EQ(val->windowW, 2); - ASSERT_EQ(val->windowH, 2); - ASSERT_EQ(val->strideW, 1); - ASSERT_EQ(val->strideH, 1); - ASSERT_EQ(val->padMode, schema::PadMode_VALID); - ASSERT_EQ(val->padUp, 0); - ASSERT_EQ(val->padDown, 0); - ASSERT_EQ(val->padLeft, 0); - ASSERT_EQ(val->padRight, 0); - ASSERT_EQ(val->roundMode, schema::RoundMode_FLOOR); + ASSERT_EQ(val->kernel_size, (std::vector<int64_t>{2, 2})); + ASSERT_EQ(val->strides, (std::vector<int64_t>{1, 1})); + ASSERT_EQ(val->pad_mode, schema::PadMode_VALID); + ASSERT_EQ(val->pad, (std::vector<int64_t>{0, 0, 0, 0})); + ASSERT_EQ(val->round_mode, schema::RoundMode_FLOOR); } class TestTfliteParserAvgPooling : public TestTfliteParser { @@ -60,24 +54,18 @@ TEST_F(TestTfliteParserAvgPooling, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Pooling) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_AvgPoolFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserAvgPooling, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsPooling(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsPooling(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsAvgPoolFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsAvgPoolFusion(); ASSERT_EQ(val->format, schema::Format_NHWC); - ASSERT_EQ(val->poolingMode, schema::PoolMode_MEAN_POOLING); ASSERT_EQ(val->global, false); - ASSERT_EQ(val->windowW, 2); - ASSERT_EQ(val->windowH, 2); - ASSERT_EQ(val->strideW, 1); - ASSERT_EQ(val->strideH, 1); - ASSERT_EQ(val->padMode, schema::PadMode_SAME_UPPER); - ASSERT_EQ(val->padUp, 0); - ASSERT_EQ(val->padDown, 1); - ASSERT_EQ(val->padLeft, 0); - ASSERT_EQ(val->padRight, 1); - ASSERT_EQ(val->roundMode, schema::RoundMode_FLOOR); + ASSERT_EQ(val->kernel_size, (std::vector<int64_t>{2, 2})); + ASSERT_EQ(val->strides, (std::vector<int64_t>{1, 1})); + ASSERT_EQ(val->pad, (std::vector<int64_t>{0, 1, 0, 1})); + ASSERT_EQ(val->pad_mode, schema::PadMode_SAME); + ASSERT_EQ(val->round_mode, schema::RoundMode_FLOOR); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reduce_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reduce_parser_test.cc index 86928b867b..b2a518be1f 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reduce_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reduce_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,16 +28,14 @@ TEST_F(TestTfliteParserReduceMax, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Reduce) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_ReduceFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserReduceMax, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsReduce(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsReduce(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsReduceFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsReduceFusion(); ASSERT_EQ(val->mode, schema::ReduceMode_ReduceMax); - ASSERT_EQ(val->keepDims, false); - std::vector<int32_t> axes = {2}; - ASSERT_EQ(val->axes, axes); + ASSERT_EQ(val->keep_dims, false); } class TestTfliteParserReduceMin : public TestTfliteParser { @@ -50,16 +48,14 @@ TEST_F(TestTfliteParserReduceMin, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Reduce) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_ReduceFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserReduceMin, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsReduce(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsReduce(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsReduceFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsReduceFusion(); ASSERT_EQ(val->mode, schema::ReduceMode_ReduceMin); - ASSERT_EQ(val->keepDims, false); - std::vector<int32_t> axes = {2}; - ASSERT_EQ(val->axes, axes); + ASSERT_EQ(val->keep_dims, false); } class TestTfliteParserReduceProd : public TestTfliteParser { @@ -72,16 +68,14 @@ TEST_F(TestTfliteParserReduceProd, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Reduce) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_ReduceFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserReduceProd, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsReduce(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsReduce(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsReduceFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsReduceFusion(); ASSERT_EQ(val->mode, schema::ReduceMode_ReduceProd); - ASSERT_EQ(val->keepDims, false); - std::vector<int32_t> axes = {2}; - ASSERT_EQ(val->axes, axes); + ASSERT_EQ(val->keep_dims, false); } class TestTfliteParserSum : public TestTfliteParser { @@ -95,16 +89,14 @@ TEST_F(TestTfliteParserSum, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Reduce) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_ReduceFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserSum, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsReduce(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsReduce(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsReduceFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsReduceFusion(); ASSERT_EQ(val->mode, schema::ReduceMode_ReduceSum); - ASSERT_EQ(val->keepDims, false); - std::vector<int32_t> axes = {2}; - ASSERT_EQ(val->axes, axes); + ASSERT_EQ(val->keep_dims, false); } class TestTfliteParserMean : public TestTfliteParser { @@ -118,16 +110,14 @@ TEST_F(TestTfliteParserMean, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Reduce) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_ReduceFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserMean, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsReduce(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsReduce(); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsReduceFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsReduceFusion(); ASSERT_EQ(val->mode, schema::ReduceMode_ReduceMean); - ASSERT_EQ(val->keepDims, true); - std::vector<int32_t> axes = {2, 3}; - ASSERT_EQ(val->axes, axes); + ASSERT_EQ(val->keep_dims, true); } // reduceAny diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reshape_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reshape_parser_test.cc index b3bdaad5ae..249c4bbd31 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reshape_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reshape_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -31,11 +31,4 @@ TEST_F(TestTfliteParserReshape, OpType) { ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Reshape) << "wrong Op Type"; } - -TEST_F(TestTfliteParserReshape, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsReshape(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsReshape(); - std::vector<int64_t> shape = {3, 5, 20}; - ASSERT_EQ(val->shape, shape); -} } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_resize_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_resize_parser_test.cc index 960ea20913..63118737a0 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_resize_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_resize_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,11 +35,10 @@ TEST_F(TestTfliteParserResizeNN, OpType) { TEST_F(TestTfliteParserResizeNN, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsResize(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsResize(); - ASSERT_EQ(val->alignCorners, false); - ASSERT_EQ(val->newHeight, 3); - ASSERT_EQ(val->newWidth, 100); + ASSERT_EQ(val->new_height, 3); + ASSERT_EQ(val->new_width, 100); ASSERT_EQ(val->format, schema::Format_NHWC); - ASSERT_EQ(val->preserveAspectRatio, false); + ASSERT_EQ(val->preserve_aspect_ratio, false); ASSERT_EQ(val->method, schema::ResizeMethod_NEAREST); } @@ -59,11 +58,10 @@ TEST_F(TestTfliteParserResizeBilinear, OpType) { TEST_F(TestTfliteParserResizeBilinear, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsResize(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsResize(); - ASSERT_EQ(val->alignCorners, false); - ASSERT_EQ(val->newHeight, 75); - ASSERT_EQ(val->newWidth, 4); + ASSERT_EQ(val->new_height, 75); + ASSERT_EQ(val->new_width, 4); ASSERT_EQ(val->format, schema::Format_NHWC); - ASSERT_EQ(val->preserveAspectRatio, false); + ASSERT_EQ(val->preserve_aspect_ratio, false); ASSERT_EQ(val->method, schema::ResizeMethod_LINEAR); } diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reverse_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reverse_parser_test.cc index e4a03440ba..a92a4eb4f4 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reverse_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reverse_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,13 +28,13 @@ TEST_F(TestTfliteParserReverse, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Reverse) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_ReverseV2) << "wrong Op Type"; } TEST_F(TestTfliteParserReverse, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsReverse(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsReverse(); - std::vector<int32_t> axis = {3}; + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsReverseV2(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsReverseV2(); + std::vector<int64_t> axis = {3}; ASSERT_EQ(val->axis, axis); } diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reverse_sequence_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reverse_sequence_parser_test.cc index fe7d37ae02..2324463d99 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reverse_sequence_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_reverse_sequence_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,7 +35,6 @@ TEST_F(TestTfliteParserReverseSequence, OpType) { TEST_F(TestTfliteParserReverseSequence, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsReverseSequence(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsReverseSequence(); - ASSERT_EQ(val->seqAxis, 1); - ASSERT_EQ(val->seqAxis, 1); + ASSERT_EQ(val->seq_dim, 1); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_slice_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_slice_parser_test.cc index 655f114eed..3d035a03ba 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_slice_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_slice_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,17 +29,11 @@ TEST_F(TestTfliteParserSlice, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Slice) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_SliceFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserSlice, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsSlice(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsSlice(); - ASSERT_EQ(val->format, schema::Format_NHWC); - std::vector<int32_t> begin = {1, 0, 0}; - ASSERT_EQ(val->begin, begin); - std::vector<int32_t> size = {1, 1, 3}; - ASSERT_EQ(val->size, size); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsSliceFusion(), nullptr); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_softmax_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_softmax_parser_test.cc index a35ebaf8d9..ebe51527b7 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_softmax_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_softmax_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,13 +29,13 @@ TEST_F(TestTfliteParserSoftmax, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_SoftMax) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Softmax) << "wrong Op Type"; } TEST_F(TestTfliteParserSoftmax, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsSoftMax(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsSoftMax(); - ASSERT_EQ(val->axis, -1); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsSoftmax(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsSoftmax(); + ASSERT_EQ(val->axis[0], -1); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser_test.cc index cbc0be98ef..9a8b0f92c6 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,9 +35,7 @@ TEST_F(TestTfliteParserSpaceToBatchND, OpType) { TEST_F(TestTfliteParserSpaceToBatchND, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsSpaceToBatchND(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsSpaceToBatchND(); - std::vector<int> blockshape = {2, 2}; - ASSERT_EQ(val->blockShape, blockshape); - std::vector<int> padding = {0, 0, 2, 0}; - ASSERT_EQ(val->paddings, padding); + std::vector<int64_t> blockshape = {2, 2}; + ASSERT_EQ(val->block_shape, blockshape); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_depth_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_depth_parser_test.cc index 87a040edfe..5a9c63634a 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_depth_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_space_to_depth_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ TEST_F(TestTfliteParserSpaceToDepth, OpType) { TEST_F(TestTfliteParserSpaceToDepth, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsSpaceToDepth(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsSpaceToDepth(); - ASSERT_EQ(val->blockSize, 2); + ASSERT_EQ(val->block_size, 2); ASSERT_EQ(val->format, schema::Format_NHWC); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_split_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_split_parser_test.cc index 97cb01d999..395eff9a04 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_split_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_split_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,10 +35,10 @@ TEST_F(TestTfliteParserSplit, OpType) { TEST_F(TestTfliteParserSplit, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsSplit(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsSplit(); - ASSERT_EQ(val->splitDim, 2); - ASSERT_EQ(val->numberSplit, 2); - const std::vector<int> sizeSplits = {2, 2}; - ASSERT_EQ(val->sizeSplits, sizeSplits); + ASSERT_EQ(val->axis, 2); + ASSERT_EQ(val->output_num, 2); + const std::vector<int64_t> sizeSplits = {2, 2}; + ASSERT_EQ(val->size_splits, sizeSplits); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_split_v_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_split_v_parser_test.cc index b0c6e78105..2008f3f4ac 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_split_v_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_split_v_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,10 +35,10 @@ TEST_F(TestTfliteParserSplitV, OpType) { TEST_F(TestTfliteParserSplitV, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsSplit(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsSplit(); - ASSERT_EQ(val->splitDim, 0); - ASSERT_EQ(val->numberSplit, 2); - const std::vector<int> sizeSplits = {1, 3}; - ASSERT_EQ(val->sizeSplits, sizeSplits); + ASSERT_EQ(val->axis, 0); + ASSERT_EQ(val->output_num, 2); + const std::vector<int64_t> sizeSplits = {1, 3}; + ASSERT_EQ(val->size_splits, sizeSplits); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_stack_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_stack_parser_test.cc index ff6d01841a..a1ce8bea8d 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_stack_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_stack_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -36,9 +36,6 @@ TEST_F(TestTfliteParserStack, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsStack(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsStack(); ASSERT_EQ(val->axis, 1); - ASSERT_EQ(val->n, 2); - const std::vector<int> isScale = {3, 2, 3}; - ASSERT_EQ(val->isScale, isScale); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_strided_slice_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_strided_slice_parser_test.cc index c7ad4dc069..3520416d8e 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_strided_slice_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_strided_slice_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,17 +35,7 @@ TEST_F(TestTfliteParserStridedSlice, OpType) { TEST_F(TestTfliteParserStridedSlice, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsStridedSlice(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsStridedSlice(); - ASSERT_EQ(val->beginMask, 0); - ASSERT_EQ(val->endMask, 0); - ASSERT_EQ(val->beginMask, 0); - ASSERT_EQ(val->beginMask, 0); - std::vector<int> begin = {1, -1, 0}; - ASSERT_EQ(val->begin, begin); - std::vector<int> end = {2, -3, 3}; - ASSERT_EQ(val->end, end); - std::vector<int> stride = {1, -1, 1}; - ASSERT_EQ(val->stride, stride); - std::vector<int> isscale = {3, 2, 3}; - ASSERT_EQ(val->isScale, isscale); + ASSERT_EQ(val->end_mask, 0); + ASSERT_EQ(val->begin_mask, 0); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_tile_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_tile_parser_test.cc index 1060f2a870..105d8cfa46 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_tile_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_tile_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,13 +29,13 @@ TEST_F(TestTfliteParserTile, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Tile) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_TileFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserTile, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsTile(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsTile(); - std::vector<int> multiply = {2, 3, 4}; - ASSERT_EQ(val->multiples, multiply); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsTileFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsTileFusion(); + std::vector<int64_t> dims = {2, 3, 4}; + ASSERT_EQ(val->dims, dims); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_topk_v2_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_topk_v2_parser_test.cc index 62d42ded26..efe62eee45 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_topk_v2_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_topk_v2_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,13 +29,12 @@ TEST_F(TestTfliteParserTopKV2, OpType) { ASSERT_NE(meta_graph, nullptr); ASSERT_GT(meta_graph->nodes.size(), 0); ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); - ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_TopK) << "wrong Op Type"; + ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_TopKFusion) << "wrong Op Type"; } TEST_F(TestTfliteParserTopKV2, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsTopK(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsTopK(); - ASSERT_EQ(val->k, 3); + ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsTopKFusion(), nullptr); + auto val = meta_graph->nodes.front()->primitive->value.AsTopKFusion(); ASSERT_EQ(val->sorted, true); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_transpose_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_transpose_parser_test.cc index 28bb1ba51f..a15abaf694 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_transpose_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_transpose_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -32,11 +32,4 @@ TEST_F(TestTfliteParserTranspose, OpType) { ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Transpose) << "wrong Op Type"; } -TEST_F(TestTfliteParserTranspose, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsTranspose(), nullptr); - auto val = meta_graph->nodes.front()->primitive->value.AsTranspose(); - std::vector<int32_t> perm = {1, 0}; - ASSERT_EQ(val->perm, perm); -} - } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unique_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unique_parser_test.cc index 2b1ef44f5c..133027e6ca 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unique_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unique_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -31,9 +31,4 @@ TEST_F(TestTfliteParserUnique, OpType) { ASSERT_NE(meta_graph->nodes.front()->primitive.get(), nullptr); ASSERT_EQ(meta_graph->nodes.front()->primitive->value.type, schema::PrimitiveType_Unique) << "wrong Op Type"; } - -TEST_F(TestTfliteParserUnique, AttrValue) { - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsUnique(), nullptr); - ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsUnique(), nullptr); -} } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unstack_parser_test.cc b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unstack_parser_test.cc index 9cb73131e4..481cc730a3 100644 --- a/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unstack_parser_test.cc +++ b/mindspore/lite/test/ut/tools/converter/parser/tflite/tflite_unstack_parser_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,7 +35,6 @@ TEST_F(TestTfliteParserUnstack, OpType) { TEST_F(TestTfliteParserUnstack, AttrValue) { ASSERT_NE(meta_graph->nodes.front()->primitive->value.AsUnstack(), nullptr); auto val = meta_graph->nodes.front()->primitive->value.AsUnstack(); - ASSERT_EQ(val->num, 5); ASSERT_EQ(val->axis, 1); } } // namespace mindspore diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc index c63ebb19bc..67e899eb82 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/constant_folding_fusion_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,6 +26,7 @@ #include "tools/converter/anf_transform.h" #include "tools/optimizer/fusion/constant_folding_fusion.h" #include "tools/anf_exporter/anf_exporter.h" +#include "test/common/import_from_meta_graphT.h" namespace mindspore { class ConstantFoldingFusionTest : public mindspore::CommonTest { @@ -148,8 +149,8 @@ MetaGraphTptr BuildMixGraph() { add_node->inputIndex = {0, 1}; add_node->outputIndex = {2}; add_node->primitive = std::make_unique<schema::PrimitiveT>(); - add_node->primitive->value.type = schema::PrimitiveType_Add; - add_node->primitive->value.value = new schema::AddT; + add_node->primitive->value.type = schema::PrimitiveType_AddFusion; + add_node->primitive->value.value = new schema::AddFusionT; add_node->name = "add"; meta_graph->nodes.emplace_back(std::move(add_node)); @@ -160,8 +161,8 @@ MetaGraphTptr BuildMixGraph() { mul_node->inputIndex = {2, 3}; mul_node->outputIndex = {4}; mul_node->primitive = std::make_unique<schema::PrimitiveT>(); - mul_node->primitive->value.type = schema::PrimitiveType_Mul; - mul_node->primitive->value.value = new schema::MulT; + mul_node->primitive->value.type = schema::PrimitiveType_MulFusion; + mul_node->primitive->value.value = new schema::MulFusionT; mul_node->name = "mul"; meta_graph->nodes.emplace_back(std::move(mul_node)); @@ -246,8 +247,8 @@ MetaGraphTptr BuildSplitGraph() { split_node->primitive = std::make_unique<schema::PrimitiveT>(); split_node->primitive->value.type = schema::PrimitiveType_Split; std::unique_ptr<schema::SplitT> attr = std::make_unique<schema::SplitT>(); - attr->numberSplit = 2; - attr->splitDim = 1; + attr->output_num = 2; + attr->axis = 1; split_node->primitive->value.value = attr.release(); split_node->name = "split"; meta_graph->nodes.emplace_back(std::move(split_node)); @@ -259,8 +260,8 @@ MetaGraphTptr BuildSplitGraph() { mul_node1->inputIndex = {1, 3}; mul_node1->outputIndex = {5}; mul_node1->primitive = std::make_unique<schema::PrimitiveT>(); - mul_node1->primitive->value.type = schema::PrimitiveType_Mul; - std::unique_ptr<schema::MulT> mul_attr = std::make_unique<schema::MulT>(); + mul_node1->primitive->value.type = schema::PrimitiveType_MulFusion; + std::unique_ptr<schema::MulFusionT> mul_attr = std::make_unique<schema::MulFusionT>(); mul_node1->primitive->value.value = mul_attr.release(); mul_node1->name = "mul1"; meta_graph->nodes.emplace_back(std::move(mul_node1)); @@ -269,8 +270,8 @@ MetaGraphTptr BuildSplitGraph() { mul_node2->inputIndex = {2, 4}; mul_node2->outputIndex = {6}; mul_node2->primitive = std::make_unique<schema::PrimitiveT>(); - mul_node2->primitive->value.type = schema::PrimitiveType_Mul; - std::unique_ptr<schema::MulT> mul2_attr = std::make_unique<schema::MulT>(); + mul_node2->primitive->value.type = schema::PrimitiveType_MulFusion; + std::unique_ptr<schema::MulFusionT> mul2_attr = std::make_unique<schema::MulFusionT>(); mul_node2->primitive->value.value = mul2_attr.release(); mul_node2->name = "mul2"; meta_graph->nodes.emplace_back(std::move(mul_node2)); @@ -368,8 +369,8 @@ MetaGraphTptr BuildSplitGraph() { } } // namespace TEST_F(ConstantFoldingFusionTest, TestADDConstantFold) { - auto meta_graph = BuildGraph(schema::PrimitiveType_Add, new schema::AddT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto meta_graph = BuildGraph(schema::PrimitiveType_AddFusion, new schema::AddFusionT); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -382,7 +383,7 @@ TEST_F(ConstantFoldingFusionTest, TestADDConstantFold) { TEST_F(ConstantFoldingFusionTest, TestMixedConstantFold) { auto meta_graph = BuildMixGraph(); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -394,8 +395,8 @@ TEST_F(ConstantFoldingFusionTest, TestMixedConstantFold) { } TEST_F(ConstantFoldingFusionTest, TestSubConstantFold) { - auto meta_graph = BuildGraph(schema::PrimitiveType_Sub, new schema::SubT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto meta_graph = BuildGraph(schema::PrimitiveType_SubFusion, new schema::SubFusionT); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -407,8 +408,8 @@ TEST_F(ConstantFoldingFusionTest, TestSubConstantFold) { } TEST_F(ConstantFoldingFusionTest, TestMulConstantFold) { - auto meta_graph = BuildGraph(schema::PrimitiveType_Mul, new schema::MulT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto meta_graph = BuildGraph(schema::PrimitiveType_MulFusion, new schema::MulFusionT); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -421,41 +422,8 @@ TEST_F(ConstantFoldingFusionTest, TestMulConstantFold) { TEST_F(ConstantFoldingFusionTest, TestTransposeConstantFold) { auto transposeT = new schema::TransposeT; - transposeT->perm = {3, 0, 1, 2}; auto meta_graph = BuildGraph(schema::PrimitiveType_Transpose, transposeT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); - auto optimizer = std::make_shared<opt::GraphOptimizer>(); - auto pm = std::make_shared<opt::PassManager>(); - pm->AddPass(std::make_shared<opt::ConstFoldPass>()); - optimizer->AddPassManager(pm); - FuncGraphPtr new_graph = optimizer->Optimize(func_graph); - ASSERT_NE(nullptr, new_graph); - auto new_meta_graph = lite::Export(new_graph); - ASSERT_EQ(new_meta_graph->nodes.size(), 0); -} - -TEST_F(ConstantFoldingFusionTest, TestTileConstantFold) { - auto tileT = new schema::TileT; - tileT->multiples = {1, 2, 2, 2}; - auto meta_graph = BuildGraph(schema::PrimitiveType_Tile, tileT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); - auto optimizer = std::make_shared<opt::GraphOptimizer>(); - auto pm = std::make_shared<opt::PassManager>(); - pm->AddPass(std::make_shared<opt::ConstFoldPass>()); - optimizer->AddPassManager(pm); - FuncGraphPtr new_graph = optimizer->Optimize(func_graph); - ASSERT_NE(nullptr, new_graph); - auto new_meta_graph = lite::Export(new_graph); - ASSERT_EQ(new_meta_graph->nodes.size(), 0); -} - -TEST_F(ConstantFoldingFusionTest, TestStridedSliceConstantFold) { - auto stridedSliceT = new schema::StridedSliceT; - stridedSliceT->begin = {1}; - stridedSliceT->end = {3}; - stridedSliceT->stride = {1}; - auto meta_graph = BuildGraphForOneInput(schema::PrimitiveType_StridedSlice, stridedSliceT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -470,7 +438,7 @@ TEST_F(ConstantFoldingFusionTest, TestStackConstantFold) { auto stackT = new schema::StackT; stackT->axis = 1; auto meta_graph = BuildGraph(schema::PrimitiveType_Stack, stackT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -482,9 +450,9 @@ TEST_F(ConstantFoldingFusionTest, TestStackConstantFold) { } TEST_F(ConstantFoldingFusionTest, TestSliceConstantFold) { - auto sliceT = new schema::SliceT; - auto meta_graph = BuildGraph(schema::PrimitiveType_Slice, sliceT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto sliceT = new schema::SliceFusionT; + auto meta_graph = BuildGraph(schema::PrimitiveType_SliceFusion, sliceT); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -498,7 +466,7 @@ TEST_F(ConstantFoldingFusionTest, TestSliceConstantFold) { TEST_F(ConstantFoldingFusionTest, TestShapeConstantFold) { auto shapeT = new schema::ShapeT; auto meta_graph = BuildGraphForOneInput(schema::PrimitiveType_Shape, shapeT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -512,7 +480,7 @@ TEST_F(ConstantFoldingFusionTest, TestShapeConstantFold) { TEST_F(ConstantFoldingFusionTest, TestRsqrtConstantFold) { auto rsqrtT = new schema::RsqrtT; auto meta_graph = BuildGraphForOneInput(schema::PrimitiveType_Rsqrt, rsqrtT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -525,9 +493,8 @@ TEST_F(ConstantFoldingFusionTest, TestRsqrtConstantFold) { TEST_F(ConstantFoldingFusionTest, TestReshapeConstantFold) { auto reshapeT = new schema::ReshapeT; - reshapeT->shape = {2, 6}; auto meta_graph = BuildGraphForOneInput(schema::PrimitiveType_Reshape, reshapeT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -544,7 +511,7 @@ TEST_F(ConstantFoldingFusionTest, TestRangeConstantFold) { rangeT->start = 1; rangeT->delta = 1; auto meta_graph = BuildGraphForOneInput(schema::PrimitiveType_Range, rangeT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -557,7 +524,7 @@ TEST_F(ConstantFoldingFusionTest, TestRangeConstantFold) { TEST_F(ConstantFoldingFusionTest, TestMatmulConstantFold) { auto matmulT = new schema::MatMulT; auto meta_graph = BuildGraph(schema::PrimitiveType_MatMul, matmulT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -571,7 +538,7 @@ TEST_F(ConstantFoldingFusionTest, TestMatmulConstantFold) { TEST_F(ConstantFoldingFusionTest, TestExpandDimsConstantFold) { auto expandDimsT = new schema::ExpandDimsT; auto meta_graph = BuildGraphForOneInput(schema::PrimitiveType_ExpandDims, expandDimsT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -585,7 +552,7 @@ TEST_F(ConstantFoldingFusionTest, TestExpandDimsConstantFold) { TEST_F(ConstantFoldingFusionTest, TestConcatDimsConstantFold) { auto concatT = new schema::ConcatT; auto meta_graph = BuildGraph(schema::PrimitiveType_Concat, concatT); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -598,12 +565,10 @@ TEST_F(ConstantFoldingFusionTest, TestConcatDimsConstantFold) { TEST_F(ConstantFoldingFusionTest, TestCastDimsConstantFold) { auto castT = new schema::CastT; - castT->srcT = kNumberTypeUInt8; - castT->dstT = kNumberTypeFloat32; auto meta_graph = BuildGraphForOneInput(schema::PrimitiveType_Cast, castT); auto input_tensor = meta_graph->allTensors.at(0).get(); input_tensor->dataType = kNumberTypeUInt8; - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>(); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); @@ -618,7 +583,7 @@ TEST_F(ConstantFoldingFusionTest, TestSplitConstantFold) { auto meta_graph = BuildSplitGraph(); auto input_tensor = meta_graph->allTensors.at(0).get(); input_tensor->dataType = kNumberTypeFloat32; - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto optimizer = std::make_shared<opt::GraphOptimizer>(); auto pm = std::make_shared<opt::PassManager>("test", false); pm->AddPass(std::make_shared<opt::ConstFoldPass>()); diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc index a171d31ccc..bd881ec986 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_activation_fusion_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ #include "tools/converter/model_parser.h" #include "tools/converter/anf_transform.h" #include "tools/anf_exporter/anf_exporter.h" +#include "test/common/import_from_meta_graphT.h" namespace mindspore { class ConvActivationFusionTest : public mindspore::CommonTest { @@ -40,17 +41,14 @@ CNodeTptr BuildConv2D() { convNode->inputIndex = {0, 1}; convNode->outputIndex = {2}; convNode->primitive = std::make_unique<schema::PrimitiveT>(); - convNode->primitive->value.type = schema::PrimitiveType_Conv2D; - auto prim1 = new schema::Conv2DT; - prim1->padMode = schema::PadMode_SAME_UPPER; + convNode->primitive->value.type = schema::PrimitiveType_Conv2DFusion; + auto prim1 = new schema::Conv2DFusionT; + prim1->pad_mode = schema::PadMode_SAME; prim1->format = schema::Format_NHWC; - prim1->strideH = 1; - prim1->strideW = 1; - prim1->kernelH = 3; - prim1->kernelW = 3; - prim1->dilateH = 1; - prim1->dilateW = 1; - prim1->channelOut = 3; + prim1->stride = {1, 1}; + prim1->kernel_size = {3, 3}; + prim1->dilation = {1, 1}; + prim1->out_channel = 3; convNode->primitive->value.value = prim1; convNode->name = "Conv2D"; return convNode; @@ -60,18 +58,14 @@ CNodeTptr BuildDepthwiseConv2D() { convNode->inputIndex = {0, 1}; convNode->outputIndex = {2}; convNode->primitive = std::make_unique<schema::PrimitiveT>(); - convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D; - auto prim1 = new schema::DepthwiseConv2DT; - prim1->padMode = schema::PadMode_SAME_UPPER; + convNode->primitive->value.type = schema::PrimitiveType_Conv2DFusion; + auto prim1 = new schema::Conv2DFusionT; + prim1->pad_mode = schema::PadMode_SAME; prim1->format = schema::Format_NHWC; - prim1->strideH = 1; - prim1->strideW = 1; - prim1->kernelH = 3; - prim1->kernelW = 3; - prim1->dilateH = 1; - prim1->dilateW = 1; - prim1->channelIn = 1; - prim1->channelMultiplier = 3; + prim1->stride = {1, 1}; + prim1->kernel_size = {3, 3}; + prim1->dilation = {1, 1}; + prim1->in_channel = 1; convNode->primitive->value.value = prim1; convNode->name = "Conv2D"; return convNode; @@ -82,7 +76,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::ActivationType meta_graph->name = "graph"; // conv node CNodeTptr convNode; - if (conv_type == schema::PrimitiveType_Conv2D) { + if (conv_type == schema::PrimitiveType_Conv2DFusion) { convNode = BuildConv2D(); } else { convNode = BuildDepthwiseConv2D(); @@ -96,7 +90,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::ActivationType next_node->primitive = std::make_unique<schema::PrimitiveT>(); next_node->primitive->value.type = schema::PrimitiveType_Activation; auto prim2 = new schema::ActivationT; - prim2->type = activation_type; + prim2->activation_type = activation_type; next_node->primitive->value.value = prim2; next_node->name = "activation"; meta_graph->nodes.emplace_back(std::move(next_node)); @@ -141,42 +135,42 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::ActivationType } } // namespace TEST_F(ConvActivationFusionTest, TestConvReluNode) { - auto meta_graph = BuildGraph(schema::PrimitiveType_Conv2D, schema::ActivationType_RELU); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto meta_graph = BuildGraph(schema::PrimitiveType_Conv2DFusion, schema::ActivationType_RELU); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto anf_transform = new lite::AnfTransform(); auto new_graph = anf_transform->Transform(func_graph); ASSERT_NE(nullptr, new_graph); auto new_meta_graph = lite::Export(new_graph); ASSERT_EQ(new_meta_graph->nodes.size(), 1); for (auto &cnode : new_meta_graph->nodes) { - ASSERT_EQ(cnode->primitive->value.AsConv2D()->activationType, schema::ActivationType_RELU); + ASSERT_EQ(cnode->primitive->value.AsConv2DFusion()->activation_type, schema::ActivationType_RELU); } } TEST_F(ConvActivationFusionTest, TestConvRelu6Node) { - auto meta_graph = BuildGraph(schema::PrimitiveType_Conv2D, schema::ActivationType_RELU6); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto meta_graph = BuildGraph(schema::PrimitiveType_Conv2DFusion, schema::ActivationType_RELU6); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto anf_transform = new lite::AnfTransform(); auto new_graph = anf_transform->Transform(func_graph); ASSERT_NE(nullptr, new_graph); auto new_meta_graph = lite::Export(new_graph); ASSERT_EQ(new_meta_graph->nodes.size(), 1); for (auto &cnode : new_meta_graph->nodes) { - ASSERT_EQ(cnode->primitive->value.AsConv2D()->activationType, schema::ActivationType_RELU6); + ASSERT_EQ(cnode->primitive->value.AsConv2DFusion()->activation_type, schema::ActivationType_RELU6); } } TEST_F(ConvActivationFusionTest, TestBadCase_ConvRelu) { - auto meta_graph = BuildGraph(schema::PrimitiveType_DepthwiseConv2D, schema::ActivationType_LEAKY_RELU); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto meta_graph = BuildGraph(schema::PrimitiveType_Conv2DFusion, schema::ActivationType_LEAKY_RELU); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto anf_transform = new lite::AnfTransform(); auto new_graph = anf_transform->Transform(func_graph); ASSERT_NE(nullptr, new_graph); auto new_meta_graph = lite::Export(new_graph); ASSERT_EQ(new_meta_graph->nodes.size(), 2); for (auto &cnode : new_meta_graph->nodes) { - if (cnode->primitive->value.type == schema::PrimitiveType_DepthwiseConv2D) { - ASSERT_EQ(cnode->primitive->value.AsDepthwiseConv2D()->activationType, schema::ActivationType_NO_ACTIVATION); + if (cnode->primitive->value.type == schema::PrimitiveType_Conv2DFusion) { + ASSERT_EQ(cnode->primitive->value.AsConv2DFusion()->activation_type, schema::ActivationType_NO_ACTIVATION); } } } diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc index ddfbd6dd5c..74b30b4904 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_biasadd_fusion_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ #include "tools/converter/model_parser.h" #include "tools/converter/anf_transform.h" #include "tools/anf_exporter/anf_exporter.h" +#include "test/common/import_from_meta_graphT.h" namespace mindspore { class ConvBiasAddFusionTest : public mindspore::CommonTest { @@ -40,17 +41,14 @@ CNodeTptr BuildConv2D() { convNode->inputIndex = {0, 1}; convNode->outputIndex = {2}; convNode->primitive = std::make_unique<schema::PrimitiveT>(); - convNode->primitive->value.type = schema::PrimitiveType_Conv2D; - auto prim1 = new schema::Conv2DT; - prim1->padMode = schema::PadMode_SAME_UPPER; + convNode->primitive->value.type = schema::PrimitiveType_Conv2DFusion; + auto prim1 = new schema::Conv2DFusionT; + prim1->pad_mode = schema::PadMode_SAME; prim1->format = schema::Format_NHWC; - prim1->strideH = 1; - prim1->strideW = 1; - prim1->kernelH = 3; - prim1->kernelW = 3; - prim1->dilateH = 1; - prim1->dilateW = 1; - prim1->channelOut = 3; + prim1->stride = {1, 1}; + prim1->kernel_size = {3, 3}; + prim1->dilation = {1, 1}; + prim1->out_channel = 3; convNode->primitive->value.value = prim1; convNode->name = "Conv2D"; return convNode; @@ -60,18 +58,14 @@ CNodeTptr BuildDepthwiseConv2D() { convNode->inputIndex = {0, 1}; convNode->outputIndex = {2}; convNode->primitive = std::make_unique<schema::PrimitiveT>(); - convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D; - auto prim1 = new schema::DepthwiseConv2DT; - prim1->padMode = schema::PadMode_SAME_UPPER; + convNode->primitive->value.type = schema::PrimitiveType_Conv2DFusion; + auto prim1 = new schema::Conv2DFusionT; + prim1->pad_mode = schema::PadMode_SAME; prim1->format = schema::Format_NHWC; - prim1->strideH = 1; - prim1->strideW = 1; - prim1->kernelH = 3; - prim1->kernelW = 3; - prim1->dilateH = 1; - prim1->dilateW = 1; - prim1->channelIn = 1; - prim1->channelMultiplier = 3; + prim1->stride = {1, 1}; + prim1->kernel_size = {3, 3}; + prim1->dilation = {1, 1}; + prim1->in_channel = 1; convNode->primitive->value.value = prim1; convNode->name = "Conv2D"; return convNode; @@ -82,7 +76,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::PrimitiveType meta_graph->name = "graph"; // conv node CNodeTptr convNode; - if (conv_type == schema::PrimitiveType_Conv2D) { + if (conv_type == schema::PrimitiveType_Conv2DFusion) { convNode = BuildConv2D(); } else { convNode = BuildDepthwiseConv2D(); @@ -150,8 +144,8 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, schema::PrimitiveType } } // namespace TEST_F(ConvBiasAddFusionTest, TestConvAddNode) { - auto meta_graph = BuildGraph(schema::PrimitiveType_Conv2D, schema::PrimitiveType_BiasAdd); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto meta_graph = BuildGraph(schema::PrimitiveType_Conv2DFusion, schema::PrimitiveType_BiasAdd); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto anf_transform = new lite::AnfTransform(); auto new_graph = anf_transform->Transform(func_graph); ASSERT_NE(nullptr, new_graph); @@ -161,8 +155,8 @@ TEST_F(ConvBiasAddFusionTest, TestConvAddNode) { } TEST_F(ConvBiasAddFusionTest, TestDeptiwiseConvAddNode) { - auto meta_graph = BuildGraph(schema::PrimitiveType_DepthwiseConv2D, schema::PrimitiveType_Add); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto meta_graph = BuildGraph(schema::PrimitiveType_Conv2DFusion, schema::PrimitiveType_AddFusion); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto anf_transform = new lite::AnfTransform(); auto new_graph = anf_transform->Transform(func_graph); ASSERT_NE(nullptr, new_graph); @@ -171,8 +165,8 @@ TEST_F(ConvBiasAddFusionTest, TestDeptiwiseConvAddNode) { } TEST_F(ConvBiasAddFusionTest, TestBadCase_ConvAdd) { - auto meta_graph = BuildGraph(schema::PrimitiveType_DepthwiseConv2D, schema::PrimitiveType_MatMul); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto meta_graph = BuildGraph(schema::PrimitiveType_Conv2DFusion, schema::PrimitiveType_MatMul); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto anf_transform = new lite::AnfTransform(); auto new_graph = anf_transform->Transform(func_graph); ASSERT_NE(nullptr, new_graph); diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc index f8e2973451..3922f13822 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_bn_fusion_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ #include "tools/converter/model_parser.h" #include "tools/converter/anf_transform.h" #include "tools/anf_exporter/anf_exporter.h" +#include "test/common/import_from_meta_graphT.h" namespace mindspore { class ConvBNFusionTest : public mindspore::CommonTest { @@ -40,17 +41,14 @@ CNodeTptr BuildConv2D() { convNode->inputIndex = {0, 1}; convNode->outputIndex = {2}; convNode->primitive = std::make_unique<schema::PrimitiveT>(); - convNode->primitive->value.type = schema::PrimitiveType_Conv2D; - auto prim1 = new schema::Conv2DT; - prim1->padMode = schema::PadMode_SAME_UPPER; + convNode->primitive->value.type = schema::PrimitiveType_Conv2DFusion; + auto prim1 = new schema::Conv2DFusionT; + prim1->pad_mode = schema::PadMode_SAME; prim1->format = schema::Format_NHWC; - prim1->strideH = 1; - prim1->strideW = 1; - prim1->kernelH = 3; - prim1->kernelW = 3; - prim1->dilateH = 1; - prim1->dilateW = 1; - prim1->channelOut = 3; + prim1->stride = {1, 1}; + prim1->kernel_size = {3, 3}; + prim1->dilation = {1, 1}; + prim1->out_channel = 3; convNode->primitive->value.value = prim1; convNode->name = "Conv2D"; return convNode; @@ -60,18 +58,14 @@ CNodeTptr BuildDepthwiseConv2D() { convNode->inputIndex = {0, 1, 2}; convNode->outputIndex = {3}; convNode->primitive = std::make_unique<schema::PrimitiveT>(); - convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D; - auto prim1 = new schema::DepthwiseConv2DT; - prim1->padMode = schema::PadMode_SAME_UPPER; + convNode->primitive->value.type = schema::PrimitiveType_Conv2DFusion; + auto prim1 = new schema::Conv2DFusionT; + prim1->pad_mode = schema::PadMode_SAME; prim1->format = schema::Format_NHWC; - prim1->strideH = 1; - prim1->strideW = 1; - prim1->kernelH = 3; - prim1->kernelW = 3; - prim1->dilateH = 1; - prim1->dilateW = 1; - prim1->channelIn = 1; - prim1->channelMultiplier = 3; + prim1->stride = {1, 1}; + prim1->kernel_size = {3, 3}; + prim1->dilation = {1, 1}; + prim1->in_channel = 1; convNode->primitive->value.value = prim1; convNode->name = "Conv2D"; @@ -83,7 +77,7 @@ MetaGraphTptr BuildCaffeGraph(schema::PrimitiveType conv_type) { meta_graph->name = "graph"; // conv node CNodeTptr convNode; - if (conv_type == schema::PrimitiveType_Conv2D) { + if (conv_type == schema::PrimitiveType_Conv2DFusion) { convNode = BuildConv2D(); } else { convNode = BuildDepthwiseConv2D(); @@ -164,7 +158,7 @@ MetaGraphTptr BuildTFGraph(schema::PrimitiveType conv_type) { meta_graph->name = "graph"; // conv node CNodeTptr convNode; - if (conv_type == schema::PrimitiveType_Conv2D) { + if (conv_type == schema::PrimitiveType_Conv2DFusion) { convNode = BuildConv2D(); } else { convNode = BuildDepthwiseConv2D(); @@ -267,8 +261,8 @@ MetaGraphTptr BuildTFGraph(schema::PrimitiveType conv_type) { } } // namespace TEST_F(ConvBNFusionTest, TestConvAddNode) { - auto meta_graph = BuildCaffeGraph(schema::PrimitiveType_Conv2D); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto meta_graph = BuildCaffeGraph(schema::PrimitiveType_Conv2DFusion); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto anf_transform = new lite::AnfTransform(); auto new_graph = anf_transform->Transform(func_graph); ASSERT_NE(nullptr, new_graph); @@ -277,8 +271,8 @@ TEST_F(ConvBNFusionTest, TestConvAddNode) { } TEST_F(ConvBNFusionTest, TestDeptiwiseConvAddNode) { - auto meta_graph = BuildTFGraph(schema::PrimitiveType_DepthwiseConv2D); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto meta_graph = BuildTFGraph(schema::PrimitiveType_Conv2DFusion); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto anf_transform = new lite::AnfTransform(); auto new_graph = anf_transform->Transform(func_graph); ASSERT_NE(nullptr, new_graph); diff --git a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc index e5ab50e54e..d72650698a 100644 --- a/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc +++ b/mindspore/lite/test/ut/tools/optimizer/fusion/conv_scale_fusion_test.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ #include "tools/converter/model_parser.h" #include "tools/converter/anf_transform.h" #include "tools/anf_exporter/anf_exporter.h" +#include "test/common/import_from_meta_graphT.h" namespace mindspore { class ConvScaleFusionTest : public mindspore::CommonTest { @@ -46,17 +47,14 @@ CNodeTptr BuildConv2D(int with_bias_flag) { convNode->outputIndex = {2}; } convNode->primitive = std::make_unique<schema::PrimitiveT>(); - convNode->primitive->value.type = schema::PrimitiveType_Conv2D; - auto prim1 = new schema::Conv2DT; - prim1->padMode = schema::PadMode_SAME_UPPER; + convNode->primitive->value.type = schema::PrimitiveType_Conv2DFusion; + auto prim1 = new schema::Conv2DFusionT; + prim1->pad_mode = schema::PadMode_SAME; prim1->format = schema::Format_NHWC; - prim1->strideH = 1; - prim1->strideW = 1; - prim1->kernelH = 3; - prim1->kernelW = 3; - prim1->dilateH = 1; - prim1->dilateW = 1; - prim1->channelOut = 3; + prim1->stride = {1, 1}; + prim1->kernel_size = {3, 3}; + prim1->dilation = {1, 1}; + prim1->out_channel = 3; convNode->primitive->value.value = prim1; convNode->name = "Conv2D"; return convNode; @@ -72,19 +70,14 @@ CNodeTptr BuildDepthwiseConv2D(int with_bias_flag) { convNode->outputIndex = {2}; } convNode->primitive = std::make_unique<schema::PrimitiveT>(); - convNode->primitive->value.type = schema::PrimitiveType_DepthwiseConv2D; - auto prim1 = new schema::DepthwiseConv2DT; - prim1->padMode = schema::PadMode_SAME_UPPER; + convNode->primitive->value.type = schema::PrimitiveType_Conv2DFusion; + auto prim1 = new schema::Conv2DFusionT; + prim1->pad_mode = schema::PadMode_SAME; prim1->format = schema::Format_NHWC; - prim1->strideH = 1; - prim1->strideW = 1; - prim1->kernelH = 3; - prim1->kernelW = 3; - prim1->dilateH = 1; - prim1->dilateW = 1; - prim1->channelIn = 1; - prim1->channelMultiplier = 3; - + prim1->stride = {1, 1}; + prim1->kernel_size = {3, 3}; + prim1->dilation = {1, 1}; + prim1->in_channel = 1; convNode->primitive->value.value = prim1; convNode->name = "Conv2D"; return convNode; @@ -95,7 +88,7 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, bool conv_with_bias) { meta_graph->name = "graph"; // conv node CNodeTptr convNode; - if (conv_type == schema::PrimitiveType_Conv2D) { + if (conv_type == schema::PrimitiveType_Conv2DFusion) { convNode = BuildConv2D(conv_with_bias); } else { convNode = BuildDepthwiseConv2D(conv_with_bias); @@ -114,8 +107,8 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, bool conv_with_bias) { } scale_node->primitive = std::make_unique<schema::PrimitiveT>(); - scale_node->primitive->value.type = schema::PrimitiveType_Scale; - auto prim2 = new schema::ScaleT; + scale_node->primitive->value.type = schema::PrimitiveType_ScaleFusion; + auto prim2 = new schema::ScaleFusionT; scale_node->primitive->value.value = prim2; scale_node->name = "scale"; meta_graph->nodes.emplace_back(std::move(scale_node)); @@ -193,8 +186,8 @@ MetaGraphTptr BuildGraph(schema::PrimitiveType conv_type, bool conv_with_bias) { } } // namespace TEST_F(ConvScaleFusionTest, TestConvScaleNode) { - auto meta_graph = BuildGraph(schema::PrimitiveType_Conv2D, true); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto meta_graph = BuildGraph(schema::PrimitiveType_Conv2DFusion, true); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto anf_transform = new lite::AnfTransform(); auto new_graph = anf_transform->Transform(func_graph); ASSERT_NE(nullptr, new_graph); @@ -204,8 +197,8 @@ TEST_F(ConvScaleFusionTest, TestConvScaleNode) { } TEST_F(ConvScaleFusionTest, TestDeptiwiseConvScaleNode) { - auto meta_graph = BuildGraph(schema::PrimitiveType_DepthwiseConv2D, false); - auto func_graph = lite::ModelParser::Fb2Anf(meta_graph.get()); + auto meta_graph = BuildGraph(schema::PrimitiveType_Conv2DFusion, false); + auto func_graph = lite::AnfImporterFromMetaGraphT::Fb2Anf(meta_graph.get()); auto anf_transform = new lite::AnfTransform(); auto new_graph = anf_transform->Transform(func_graph); ASSERT_NE(nullptr, new_graph); diff --git a/mindspore/lite/test/win_models.cfg b/mindspore/lite/test/win_models.cfg index 19eb510cef..059e8dc18d 100644 --- a/mindspore/lite/test/win_models.cfg +++ b/mindspore/lite/test/win_models.cfg @@ -1,9 +1,9 @@ -1 mobilenetv2_438.mindir -1 shufflenetv2.mindir -1 retinaface.mindir -1 mobilefacenet.mindir -1 ocr_mobilenetV2.mindir -2 efficientnet.mindir +# 1 mobilenetv2_438.mindir +# 1 shufflenetv2.mindir +# 1 retinaface.mindir +# 1 mobilefacenet.mindir +# 1 ocr_mobilenetV2.mindir +# 2 efficientnet.mindir 3 gender_res_large_deploy 3 ml_ocr_detect_20200305 3 hiai_cv_focusShootOCRModel_07 diff --git a/mindspore/lite/tools/anf_exporter/anf_exporter.cc b/mindspore/lite/tools/anf_exporter/anf_exporter.cc index 6420fd13f3..d617ad3e8c 100644 --- a/mindspore/lite/tools/anf_exporter/anf_exporter.cc +++ b/mindspore/lite/tools/anf_exporter/anf_exporter.cc @@ -15,22 +15,30 @@ */ #include "tools/anf_exporter/anf_exporter.h" - #include <list> #include <memory> #include <string> #include <utility> #include <vector> #include <algorithm> - -#include "src/ops/quant_dtype_cast.h" #include "abstract/abstract_value.h" #include "mindspore/core/ir/primitive.h" +#include "ops/fusion/partial_fusion.h" +#include "ops/control_depend.h" +#include "ops/depend.h" +#include "ops/make_tuple.h" +#include "ops/quant_dtype_cast.h" +#include "ops/tuple_get_item.h" +#include "tools/converter/quant_param_holder.h" +#include "tools/optimizer/common/gllo_utils.h" #include "src/tensor.h" #include "src/param_value_lite.h" #include "src/common/utils.h" -#include "src/ops/partial.h" +#include "ops/partial.h" #include "tools/common/graph_util.h" +#include "src/ops/ops_utils.h" + +using mindspore::ops::PrimitiveC; namespace mindspore::lite { namespace { @@ -85,7 +93,12 @@ void AnfExporter::RemoveIfMakeTuple(const CNodePtr &cnode) { continue; } auto make_tuple_node = utils::cast<CNodePtr>(input_node); - if (IsPrimitiveCNode(make_tuple_node, schema::PrimitiveType_MakeTuple)) { + auto value_node = make_tuple_node->input(0)->cast<ValueNodePtr>(); + if (value_node == nullptr) { + MS_LOG(ERROR) << "value node is invalid."; + return; + } + if (value_node->value() != nullptr && opt::CheckPrimitiveType(make_tuple_node, opt::kPrimMakeTuple)) { has_make_tuple = true; for (size_t j = 1; j < make_tuple_node->inputs().size(); ++j) { inputs.emplace_back(make_tuple_node->input(j)); @@ -100,7 +113,7 @@ void AnfExporter::RemoveIfMakeTuple(const CNodePtr &cnode) { } void AnfExporter::RemoveIfDepend(const CNodePtr &cnode) { - bool hasDepend = false; + bool has_depend = false; std::vector<AnfNodePtr> inputs; inputs.clear(); @@ -111,16 +124,21 @@ void AnfExporter::RemoveIfDepend(const CNodePtr &cnode) { inputs.emplace_back(cnode->input(i)); continue; } - auto dependNode = utils::cast<CNodePtr>(inputNode); - if (IsPrimitiveCNode(dependNode, schema::PrimitiveType_Depend) || - IsPrimitiveCNode(dependNode, schema::PrimitiveType_ControlDepend)) { - hasDepend = true; - bool maskOut = (dependNode->inputs().size() == 3); - for (size_t j = 1; j < dependNode->inputs().size(); ++j) { - AnfNodePtr dependInputNode = dependNode->input(j); - if (dependInputNode->isa<CNode>()) { - inputs.emplace_back(dependInputNode); - if (maskOut) { + auto depend_node = utils::cast<CNodePtr>(inputNode); + auto value_node = depend_node->input(0)->cast<ValueNodePtr>(); + if (value_node == nullptr) { + MS_LOG(ERROR) << "value node is invalid."; + return; + } + if (value_node->value() != nullptr && (opt::CheckPrimitiveType(depend_node, prim::kPrimDepend) || + opt::CheckPrimitiveType(depend_node, prim::kPrimControlDepend))) { + has_depend = true; + bool mask_out = (depend_node->inputs().size() == 3); + for (size_t j = 1; j < depend_node->inputs().size(); ++j) { + AnfNodePtr depend_input_node = depend_node->input(j); + if (depend_input_node->isa<CNode>()) { + inputs.emplace_back(depend_input_node); + if (mask_out) { break; } } @@ -129,23 +147,35 @@ void AnfExporter::RemoveIfDepend(const CNodePtr &cnode) { inputs.emplace_back(cnode->input(i)); } } - if (hasDepend) { + if (has_depend) { cnode->set_inputs(inputs); } } int AnfExporter::ConvertQuantParam(const std::unique_ptr<schema::MetaGraphT> &meta_graph, - const std::shared_ptr<PrimitiveC> &primitive, + const std::shared_ptr<mindspore::Primitive> &primitive, const std::unique_ptr<schema::CNodeT> &dst_node) { MS_ASSERT(meta_graph != nullptr); MS_ASSERT(primitive != nullptr); MS_ASSERT(dst_node != nullptr); // add quant param - dst_node->quantType = primitive->quant_type(); MS_LOG(DEBUG) << "node: " << dst_node->name << " add QuantParam"; // activation - auto input_quant_params = primitive->input_quant_params(); - auto node_type = (schema::PrimitiveType)primitive->Type(); + QuantParamsVector input_quant_params; + QuantParamsVector output_quant_params; + dst_node->quantType = schema::QuantType_QUANT_NONE; + auto quant_param_valueptr = primitive->GetAttr("quant_params"); + if (quant_param_valueptr != nullptr) { + auto quant_param_holder = quant_param_valueptr->cast<QuantParamHolderPtr>(); + if (quant_param_holder == nullptr) { + MS_LOG(ERROR) << "quant param is invalid."; + return RET_ERROR; + } + input_quant_params = quant_param_holder->input_quant_params(); + output_quant_params = quant_param_holder->output_quant_params(); + dst_node->quantType = quant_param_holder->quant_type(); + } + // add quant param if (!input_quant_params.empty()) { for (size_t i = 0; i < input_quant_params.size(); i++) { if (i >= dst_node->inputIndex.size()) { @@ -170,10 +200,8 @@ int AnfExporter::ConvertQuantParam(const std::unique_ptr<schema::MetaGraphT> &me MS_LOG(DEBUG) << "node: " << dst_node->name << " input quant params is empty"; } // output - - auto output_quant_params = primitive->output_quant_params(); if (output_quant_params.empty()) { - if (node_type != schema::PrimitiveType_QuantDTypeCast) { + if (primitive->name() != mindspore::ops::kNameQuantDTypeCast) { MS_LOG(DEBUG) << "node: " << dst_node->name << " output quant params is empty"; } } else { @@ -202,13 +230,12 @@ int AnfExporter::ConvertQuantParam(const std::unique_ptr<schema::MetaGraphT> &me auto first_output_index = dst_node->outputIndex[0]; auto first_tensor_output = meta_graph->allTensors[first_output_index].get(); if (dst_node->quantType == schema::QuantType_PostTraining) { - if (node_type != schema::PrimitiveType_QuantDTypeCast) { + if (primitive->name() != mindspore::ops::kNameQuantDTypeCast) { first_tensor_output->dataType = kNumberTypeInt8; } else { - MS_ASSERT(utils::isa<std::shared_ptr<QuantDTypeCast>>(primitive)); - auto primc = utils::cast<std::shared_ptr<QuantDTypeCast>>(primitive); + auto primc = primitive->cast<std::shared_ptr<mindspore::ops::QuantDTypeCast>>(); MS_ASSERT(primc != nullptr); - if (primc->GetDstT() != kNumberTypeFloat32) { + if (primc->get_dst_t() != kNumberTypeFloat32) { first_tensor_output->dataType = kNumberTypeInt8; } } @@ -297,23 +324,24 @@ int AnfExporter::Anf2Fb(const FuncGraphPtr &func_graph, const std::unique_ptr<sc int ret = RET_OK; auto cnodes = GetOrderedCNodes(func_graph); for (const auto &cnode : cnodes) { - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - if (primitive_c == nullptr) { + auto prim = GetValueNode<std::shared_ptr<Primitive>>(cnode->input(0)); + schema::PrimitiveT *primT = nullptr; + if (prim == nullptr) { auto fg = GetValueNode<FuncGraphPtr>(cnode->input(0)); if (fg != nullptr) { auto partial_cnode = CreatePartialCnode(fg, cnode); - primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(partial_cnode->input(0)); - auto primT = primitive_c->primitiveT(); + prim = GetValueNode<std::shared_ptr<Primitive>>(partial_cnode->input(0)); + primT = GetPrimitiveT(partial_cnode->input(0)); MS_ASSERT(primT != nullptr); auto pos = fg_subgraph_map.find(fg); if (pos != fg_subgraph_map.end()) { - MS_ASSERT(primT->value.AsPartial() != nullptr); - primT->value.AsPartial()->subGraphIndex = fg_subgraph_map.at(fg); + MS_ASSERT(primT->value.AsPartialFusion() != nullptr); + primT->value.AsPartialFusion()->sub_graph_index = fg_subgraph_map.at(fg); } else { size_t next_subgraph_index = fg_subgraph_map.size() + 1; fg_subgraph_map.insert(std::pair<FuncGraphPtr, int>{fg, next_subgraph_index}); - MS_ASSERT(primT->value.AsPartial() != nullptr); - primT->value.AsPartial()->subGraphIndex = next_subgraph_index; + MS_ASSERT(primT->value.AsPartialFusion() != nullptr); + primT->value.AsPartialFusion()->sub_graph_index = next_subgraph_index; ret = ExportSubgraph(fg, meta_graphT, next_subgraph_index, keep_graph, copy_primitive, cnode); if (ret != RET_OK) { MS_LOG(ERROR) << "ExportSubgraph failed"; @@ -330,25 +358,22 @@ int AnfExporter::Anf2Fb(const FuncGraphPtr &func_graph, const std::unique_ptr<sc RemoveIfMakeTuple(cnode); if (train_flag) { RemoveIfDepend(cnode); - if (primitive_c->Type() == schema::PrimitiveType_Depend || - primitive_c->Type() == schema::PrimitiveType_ControlDepend) { + if (prim->name() == mindspore::ops::kNameDepend || prim->name() == mindspore::ops::kNameControlDepend) { continue; } } - if ((primitive_c->Type() == schema::PrimitiveType_TupleGetItem) || - (primitive_c->Type() == schema::PrimitiveType_MakeTuple)) { + if (prim->name() == mindspore::ops::kNameTupleGetItem || prim->name() == mindspore::ops::kNameMakeTuple) { continue; } - auto primT = primitive_c->primitiveT(); auto node = std::make_unique<schema::CNodeT>(); if (node == nullptr) { MS_LOG(ERROR) << "object failed to be constructed"; ret = RET_MEMORY_FAILED; break; } - if (primT->value.type == schema::PrimitiveType_Return) { - node->name = "return_node"; + if (opt::CheckPrimitiveType(cnode, opt::kPrimReturn)) { + node->name = mindspore::ops::kNameReturn; ret = SetGraphoutputIndex(cnode, subgraph_index, meta_graphT, sub_graphT, node.get()); if (ret != RET_OK) { MS_LOG(ERROR) << "SetOpOutputN failed"; @@ -356,32 +381,23 @@ int AnfExporter::Anf2Fb(const FuncGraphPtr &func_graph, const std::unique_ptr<sc } continue; } - + if (primT == nullptr) { + primT = GetPrimitiveT(cnode->input(0)); + } node->nodeType = schema::NodeType_CNode; node->name = cnode->fullname_with_scope(); - if (copy_primitive) { - auto primitive = new (std::nothrow) schema::PrimitiveT(); - if (primitive != nullptr) { - *primitive = *primT; - node->primitive = std::unique_ptr<schema::PrimitiveT>(primitive); - } - } else { - node->primitive = std::unique_ptr<schema::PrimitiveT>(primT); - } + node->primitive = std::unique_ptr<schema::PrimitiveT>(primT); ret = SetOpInputNode(cnode, meta_graphT, node.get()); if (ret != RET_OK) { MS_LOG(ERROR) << "SetOpInputNode failed"; break; } SetOpOutputNode(cnode, meta_graphT, node.get()); - ret = ConvertQuantParam(meta_graphT, primitive_c, node); + ret = ConvertQuantParam(meta_graphT, prim, node); if (ret != RET_OK) { MS_LOG(ERROR) << "ConvertQuantParam failed"; break; } - if (!keep_graph) { - primitive_c->ClearPrimitiveT(); - } meta_graphT->nodes.push_back(std::move(node)); meta_graphT->subGraph.at(subgraph_index)->nodeIndices.push_back(node_idx++); } @@ -437,22 +453,59 @@ schema::MetaGraphT *AnfExporter::Export(const FuncGraphPtr &func_graph, bool kee return meta_graphT.release(); } -int AnfExporter::ConvertInputCNode(const std::shared_ptr<AnfNode> &input_anode, schema::CNodeT *output_cnode) { - std::string input_name = input_anode->fullname_with_scope(); - auto input_cnode = utils::cast<CNodePtr>(input_anode); - if (!IsPrimitiveCNode(input_cnode, schema::PrimitiveType_TupleGetItem)) { +int AnfExporter::ConvertInputCNodeCommonOp(const AnfNodePtr &input_anode, schema::CNodeT *output_cnode) { + MS_ASSERT(input_anode != nullptr && output_cnode != nullptr); + auto input_name = input_anode->fullname_with_scope(); + if (this->train_flag) { bool found = false; if (node_id_map_.find(input_name) != node_id_map_.end()) { output_cnode->inputIndex.emplace_back(node_id_map_[input_name]); found = true; } - if (!found) { auto input_index_key = input_name + "_o:" + std::to_string(0); if (node_id_map_.find(input_index_key) != node_id_map_.end()) { output_cnode->inputIndex.emplace_back(node_id_map_[input_index_key]); } } + return RET_OK; + } + if (utils::isa<abstract::AbstractTuple>(input_anode->abstract())) { + auto tuple = std::reinterpret_pointer_cast<abstract::AbstractTuple>(input_anode->abstract()); + if (tuple == nullptr) { + MS_LOG(ERROR) << "tuple is nullptr"; + return RET_ERROR; + } + auto elements = tuple->elements(); + for (size_t i = 0; i < elements.size(); i++) { + if (elements.size() == 1) { + if (node_id_map_.find(input_name) != node_id_map_.end()) { + output_cnode->inputIndex.emplace_back(node_id_map_[input_name]); + } + } else { + std::string name = input_name + "_o:" + std::to_string(i); + if (node_id_map_.find(name) != node_id_map_.end()) { + output_cnode->inputIndex.emplace_back(node_id_map_[name]); + } + } + } + } else { + if (node_id_map_.find(input_name) != node_id_map_.end()) { + output_cnode->inputIndex.emplace_back(node_id_map_[input_name]); + } + } + return RET_OK; +} + +int AnfExporter::ConvertInputCNode(const std::shared_ptr<AnfNode> &input_anode, schema::CNodeT *output_cnode) { + auto input_cnode = utils::cast<CNodePtr>(input_anode); + auto input_value_node = input_cnode->input(0)->cast<ValueNodePtr>(); + if (input_value_node == nullptr) { + MS_LOG(ERROR) << "value node is invalid."; + return RET_ERROR; + } + if (input_value_node->value() == nullptr || !opt::CheckPrimitiveType(input_cnode, prim::kPrimTupleGetItem)) { + return ConvertInputCNodeCommonOp(input_anode, output_cnode); } else { auto inputs = input_cnode->inputs(); @@ -536,7 +589,11 @@ int AnfExporter::ConvertInputParameter(const std::shared_ptr<AnfNode> &input_ano } paramTensor->name = input_name; - if (primitive_c->enable_huffman_code() && paramTensor->dataType == kNumberTypeInt8) { + QuantParamHolderPtr quant_param_holder = primitive_c->GetAttr("quant_params") == nullptr + ? nullptr + : primitive_c->GetAttr("quant_params")->cast<QuantParamHolderPtr>(); + if (quant_param_holder != nullptr && quant_param_holder->enable_huffman_code() && + paramTensor->dataType == kNumberTypeInt8) { paramTensor->enableHuffmanCode = true; } node_id_map_[input_name] = meta_graphT->allTensors.size(); @@ -584,7 +641,7 @@ int AnfExporter::ProcessInt32OrInt64Imm(const ValueNodePtr &valueNode, std::uniq (*paramTensor)->dataType = kNumberTypeInt32; (*paramTensor)->dims = {1}; (*paramTensor)->nodeType = schema::NodeType::NodeType_ValueNode; - int real_data = CastToInt(value).front(); + int real_data = opt::CastToInt(value).front(); (*paramTensor)->data.resize(sizeof(int32_t)); ret = memcpy_s((*paramTensor)->data.data(), sizeof(int32_t), &real_data, sizeof(int32_t)); if (ret != EOK) { @@ -790,9 +847,7 @@ void AnfExporter::SetOpOutputNode(const CNodePtr &cnode, const std::unique_ptr<s std::string name = cnode_name + "_o:" + std::to_string(i); node_id_map_[name] = meta_graphT->allTensors.size(); meta_graphT->allTensors.emplace_back(msTensor); - if (IsPrimitiveCNode(cnode, schema::PrimitiveType_Conv2D) || - IsPrimitiveCNode(cnode, schema::PrimitiveType_DepthwiseConv2D) || - IsPrimitiveCNode(cnode, schema::PrimitiveType_Adam)) + if (opt::CheckPrimitiveType(cnode, prim::kPrimConv2DFusion) || opt::CheckPrimitiveType(cnode, prim::kPrimAdam)) break; } else { if (elements.size() == 1) { @@ -817,10 +872,9 @@ void AnfExporter::SetOpOutputNode(const CNodePtr &cnode, const std::unique_ptr<s } msTensor->dataType = type; meta_graphT->allTensors.emplace_back(msTensor); - if (IsPrimitiveCNode(cnode, schema::PrimitiveType_Conv2D) || - IsPrimitiveCNode(cnode, schema::PrimitiveType_DepthwiseConv2D) || - IsPrimitiveCNode(cnode, schema::PrimitiveType_FusedBatchNorm) || - IsPrimitiveCNode(cnode, schema::PrimitiveType_LayerNorm)) { + if (opt::CheckPrimitiveType(cnode, prim::kPrimConv2DFusion) || + opt::CheckPrimitiveType(cnode, prim::kPrimFusedBatchNorm) || + opt::CheckPrimitiveType(cnode, prim::kPrimLayerNormFusion)) { break; } } @@ -846,49 +900,8 @@ void AnfExporter::SetOpOutputNode(const CNodePtr &cnode, const std::unique_ptr<s } } -bool AnfExporter::HasPrimitiveCNode(const AnfNodePtr &node) { - MS_ASSERT(node != nullptr); - auto cnode = node->cast<CNodePtr>(); - if (cnode == nullptr) { - return false; - } - - auto prim = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - if (prim == nullptr) { - return false; - } - return true; -} - -bool AnfExporter::IsPrimitiveCNode(const AnfNodePtr &node, schema::PrimitiveType type) { - MS_ASSERT(node != nullptr); - auto cnode = node->cast<CNodePtr>(); - if (cnode == nullptr) { - return false; - } - - auto prim = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - if (prim == nullptr) { - return false; - } - return (schema::PrimitiveType)(prim->Type()) == type; -} - ValueNodePtr AnfExporter::GetPartialAnfPrim() { - auto partial_primitiveT = new (std::nothrow) schema::PrimitiveT; - if (partial_primitiveT == nullptr) { - MS_LOG(ERROR) << "new partial_primitiveT failed"; - return nullptr; - } - partial_primitiveT->value.type = schema::PrimitiveType_Partial; - partial_primitiveT->value.value = new (std::nothrow) schema::PartialT; - if (partial_primitiveT->value.value == nullptr) { - MS_LOG(ERROR) << "new PartialT failed"; - delete (partial_primitiveT); - return nullptr; - } - - auto partial_prim = std::make_shared<lite::Partial>(partial_primitiveT); + auto partial_prim = std::make_shared<mindspore::ops::PartialFusion>(); ValueNodePtr partial_anf_prim = NewValueNode(partial_prim); return partial_anf_prim; } diff --git a/mindspore/lite/tools/anf_exporter/anf_exporter.h b/mindspore/lite/tools/anf_exporter/anf_exporter.h index 2496ae3c70..2a910eafbc 100644 --- a/mindspore/lite/tools/anf_exporter/anf_exporter.h +++ b/mindspore/lite/tools/anf_exporter/anf_exporter.h @@ -22,10 +22,12 @@ #include <vector> #include <memory> #include "schema/inner/model_generated.h" -#include "src/ops/primitive_c.h" +#include "ops/primitive_c.h" #include "ir/func_graph.h" #include "tools/converter/converter_context.h" +using mindspore::ops::PrimitiveC; + namespace mindspore::lite { constexpr const int kPartialMinSize = 3; @@ -46,6 +48,7 @@ class AnfExporter { protected: int ConvertInputCNode(const std::shared_ptr<AnfNode> &input_anode, schema::CNodeT *output_cnode); + int ConvertInputCNodeCommonOp(const AnfNodePtr &input_anode, schema::CNodeT *output_cnode); int ConvertInputParameter(const std::shared_ptr<AnfNode> &input_anode, const std::shared_ptr<PrimitiveC> &primitive, const std::unique_ptr<schema::MetaGraphT> &meta_graphT, schema::CNodeT *output_cnode); int ConvertInputValueNode(const std::shared_ptr<AnfNode> &input_anode, @@ -68,13 +71,11 @@ class AnfExporter { const std::shared_ptr<Value> &value, schema::CNodeT *output_cnode, const std::unique_ptr<schema::MetaGraphT> &meta_graphT); int SetGraphInputIndex(const std::unique_ptr<schema::MetaGraphT> &meta_graphT, const size_t &subgraph_index); - int SetGraphoutputIndex(const CNodePtr &cnode, const size_t subgraph_index, + int SetGraphoutputIndex(const CNodePtr &cnode, size_t subgraph_index, const std::unique_ptr<schema::MetaGraphT> &meta_graphT, const std::unique_ptr<schema::SubGraphT> &sub_graphT, schema::CNodeT *return_node); - static bool IsPrimitiveCNode(const AnfNodePtr &node, schema::PrimitiveType type); - static bool HasPrimitiveCNode(const AnfNodePtr &node); static int ConvertQuantParam(const std::unique_ptr<schema::MetaGraphT> &meta_graph, - const std::shared_ptr<PrimitiveC> &primitive, + const std::shared_ptr<mindspore::Primitive> &primitive, const std::unique_ptr<schema::CNodeT> &dst_node); int Anf2Fb(const FuncGraphPtr &func_graph, const std::unique_ptr<schema::MetaGraphT> &meta_graphT, const size_t &subgraph_index, const bool &keep_graph, const bool &copy_primitive, @@ -82,10 +83,10 @@ class AnfExporter { int ExportSubgraph(const FuncGraphPtr &func_graph, const std::unique_ptr<schema::MetaGraphT> &meta_graphT, const size_t &subgraph_index, bool keep_graph, bool copy_primitive, const std::shared_ptr<AnfNode> &partial_anode = nullptr); - ValueNodePtr GetPartialAnfPrim(); - CNodePtr CreatePartialCnode(const FuncGraphPtr &fg, AnfNodePtr cnode); - std::vector<schema::CNodeT *> GetSubgraphNodes(const std::unique_ptr<schema::MetaGraphT> &meta_graphT, - const size_t &subgraph_index); + static ValueNodePtr GetPartialAnfPrim(); + static CNodePtr CreatePartialCnode(const FuncGraphPtr &fg, AnfNodePtr cnode); + static std::vector<schema::CNodeT *> GetSubgraphNodes(const std::unique_ptr<schema::MetaGraphT> &meta_graphT, + const size_t &subgraph_index); private: std::map<std::string, int> node_id_map_; diff --git a/mindspore/lite/tools/anf_importer/CMakeLists.txt b/mindspore/lite/tools/anf_importer/CMakeLists.txt deleted file mode 100644 index 10f4f0d8d8..0000000000 --- a/mindspore/lite/tools/anf_importer/CMakeLists.txt +++ /dev/null @@ -1,11 +0,0 @@ -file(GLOB ANF_IMPORTER_SRC_LIST RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} - *.cc - ) -set_property(SOURCE ${ANF_IMPORTER_SRC_LIST} PROPERTY COMPILE_DEFINITIONS SUBMODULE_ID=mindspore::SubModuleId::SM_LITE) -add_library(anf_importer_mid OBJECT - ${ANF_IMPORTER_SRC_LIST} - ) -add_dependencies(anf_importer_mid proto_mid) - -add_dependencies(anf_importer_mid fbs_src) -add_dependencies(anf_importer_mid fbs_inner_src) diff --git a/mindspore/lite/tools/anf_importer/anf_importer.cc b/mindspore/lite/tools/anf_importer/anf_importer.cc deleted file mode 100644 index 88a8f7c057..0000000000 --- a/mindspore/lite/tools/anf_importer/anf_importer.cc +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/anf_importer/anf_importer.h" -#include <utility> -#include "schema/model_generated.h" -#include "ir/dtype.h" -#include "include/errorcode.h" -#include "schema/inner/model_generated.h" -namespace mindspore { -namespace lite { -int AnfImporter::Import(const converter::Flags *flag) { - auto ret = ConverterConstTensor(); - if (RET_OK != ret) { - MS_LOG(ERROR) << "ConverterConstTensor failed " << ret; - return ret; - } - ret = ConverterCNode(); - if (RET_OK != ret) { - MS_LOG(ERROR) << "ConverterCNode failed " << ret; - return ret; - } - ret = AddReturnCNode(); - if (RET_OK != ret) { - MS_LOG(ERROR) << "AddReturnCNode failed " << ret; - return ret; - } - return RET_OK; -} - -AnfNodePtr AnfImporter::GetNode(int tensor_id) { - auto n = nodes_.find(tensor_id); - if (n == nodes_.end()) { - return nullptr; - } - return n->second; -} - -void AnfImporter::AddNode(int tensor_id, AnfNodePtr node) { nodes_[tensor_id] = std::move(node); } -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/anf_importer/anf_importer.h b/mindspore/lite/tools/anf_importer/anf_importer.h deleted file mode 100644 index 5d55b665f8..0000000000 --- a/mindspore/lite/tools/anf_importer/anf_importer.h +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_ANF_IMPORTER_H_ -#define MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_ANF_IMPORTER_H_ - -#include <unordered_map> -#include "ir/func_graph.h" -#include "ir/anf.h" -#include "base/base.h" -#include "schema/inner/model_generated.h" -#include "tools/converter/converter_flags.h" - -namespace mindspore::lite { -class AnfImporter { - public: - AnfImporter() = default; - - virtual ~AnfImporter() = default; - - virtual int Import(const converter::Flags *flag = nullptr); - - virtual FuncGraphPtr GetResult() = 0; - - protected: - // convert const tensor into parameter and save in nodes_ - virtual int ConverterConstTensor() = 0; - // convert other node into cnode and save in nodes_ - virtual int ConverterCNode() = 0; - - virtual int AddReturnCNode() = 0; - - AnfNodePtr GetNode(int tensor_id); - - void AddNode(int tensor_id, AnfNodePtr node); - - protected: - std::unordered_map<int, AnfNodePtr> nodes_; -}; -} // namespace mindspore::lite - -#endif // MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_ANF_IMPORTER_H_ diff --git a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc deleted file mode 100644 index 81927416ef..0000000000 --- a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.cc +++ /dev/null @@ -1,280 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/anf_importer/import_from_meta_graphT.h" -#include <vector> -#include <algorithm> -#include "schema/inner/model_generated.h" -#include "frontend/operator/ops.h" -#include "src/param_value_lite.h" -#include "src/common/log_adapter.h" -#include "include/errorcode.h" - -namespace mindspore::lite { -int AnfImporterFromMetaGraphT::ConverterConstTensor() { - MS_ASSERT(nullptr != meta_graph_); - MS_ASSERT(nullptr != func_graph_); - for (size_t i = 0; i < meta_graph_->allTensors.size(); i++) { - auto &tensor = meta_graph_->allTensors.at(i); - MS_ASSERT(tensor != nullptr); - if (tensor->nodeType != schema::NodeType::NodeType_ValueNode) { - continue; - } - auto parameter = func_graph_->add_parameter(); - std::vector<int> shape(tensor->dims.size()); - std::copy(tensor->dims.begin(), tensor->dims.end(), shape.begin()); - auto type_id = static_cast<TypeId>(tensor->dataType); - auto type_ptr = TypeIdToType(type_id); - std::vector<int64_t> shape_vector; - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), - [](const int32_t &value) { return static_cast<int64_t>(value); }); - auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector); - MS_ASSERT(nullptr != abstract_tensor); - parameter->set_abstract(abstract_tensor); - if (!tensor->name.empty()) { - parameter->set_name(tensor->name); - } else { - parameter->set_name("const-" + std::to_string(i)); - } - - ParamValueLitePtr param_value = std::make_shared<ParamValueLite>(); - MS_ASSERT(nullptr != param_value); - param_value->set_tensor_shape(shape); - param_value->set_tensor_type(type_id); - param_value->set_format(tensor->format); - if (!tensor->data.empty()) { - auto size = tensor->data.size(); - char *tensor_data = new (std::nothrow) char[size]; - if (tensor_data == nullptr) { - MS_LOG(ERROR) << "new char[] failed"; - return RET_MEMORY_FAILED; - } - auto ret = memcpy_s(tensor_data, size, tensor->data.data(), size); - if (EOK != ret) { - MS_LOG(ERROR) << "memcpy_s error"; - delete[] tensor_data; - return RET_MEMORY_FAILED; - } - param_value->SetTensorData(tensor_data, size); - parameter->set_default_param(param_value); - } else if (std::find(meta_graph_->inputIndex.begin(), meta_graph_->inputIndex.end(), i) == - meta_graph_->inputIndex.end()) { - parameter->set_default_param(param_value); - } - AddNode(i, parameter); - } - return RET_OK; -} - -ValueNodePtr AnfImporterFromMetaGraphT::ConvertPrimitive(const std::unique_ptr<schema::CNodeT> &cNode) { - MS_ASSERT(nullptr != meta_graph_); - MS_ASSERT(nullptr != cNode); - auto primitiveCValue = PrimitiveC::Create(cNode->primitive.release()); - if (primitiveCValue == nullptr) { - MS_LOG(ERROR) << "fail to convert primitive"; - return nullptr; - } - cNode->primitive = nullptr; - // add quant parameter - for (auto index : cNode->inputIndex) { - if (!meta_graph_->allTensors[index]->quantParams.empty()) { - std::vector<schema::QuantParamT> quant_params(meta_graph_->allTensors[index]->quantParams.size()); - std::transform( - meta_graph_->allTensors[index]->quantParams.begin(), meta_graph_->allTensors[index]->quantParams.end(), - quant_params.begin(), - [](std::unique_ptr<schema::QuantParamT> &quant_param) -> schema::QuantParamT { return *quant_param; }); - primitiveCValue->AddInputQuantParam(quant_params); - } else { - std::vector<schema::QuantParamT> notinited_quant_params(1); - primitiveCValue->AddInputQuantParam(notinited_quant_params); - } - } - for (auto index : cNode->outputIndex) { - if (!meta_graph_->allTensors[index]->quantParams.empty()) { - std::vector<schema::QuantParamT> quant_params(meta_graph_->allTensors[index]->quantParams.size()); - std::transform( - meta_graph_->allTensors[index]->quantParams.begin(), meta_graph_->allTensors[index]->quantParams.end(), - quant_params.begin(), - [](std::unique_ptr<schema::QuantParamT> &quant_param) -> schema::QuantParamT { return *quant_param; }); - primitiveCValue->AddOutputQuantParam(quant_params); - } else { - std::vector<schema::QuantParamT> notinited_quant_params(1); - primitiveCValue->AddOutputQuantParam(notinited_quant_params); - } - } - auto value_node = NewValueNode(std::shared_ptr<PrimitiveC>(primitiveCValue)); - return value_node; -} - -abstract::AbstractTensorPtr AnfImporterFromMetaGraphT::ConvertTensorToAbstractTensor( - const std::unique_ptr<schema::TensorT> &tensor) { - MS_ASSERT(nullptr != tensor); - std::vector<int> shape(tensor->dims.size()); - std::copy(tensor->dims.begin(), tensor->dims.end(), shape.begin()); - auto type_id = static_cast<TypeId>(tensor->dataType); - auto type_ptr = TypeIdToType(type_id); - std::vector<int64_t> shape_vector; - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), - [](const int32_t &value) { return static_cast<int64_t>(value); }); - auto ptr = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector); - MS_ASSERT(nullptr != ptr); - return ptr; -} - -int AnfImporterFromMetaGraphT::ConvertAbstract(const std::unique_ptr<schema::CNodeT> &src_cnode, - const CNodePtr &dst_cnode) { - MS_ASSERT(nullptr != meta_graph_); - MS_ASSERT(nullptr != src_cnode); - MS_ASSERT(nullptr != dst_cnode); - std::vector<uint32_t> out_tensor_ids = src_cnode->outputIndex; - if (out_tensor_ids.size() == 1) { - auto out_tensor_id = out_tensor_ids.front(); - MS_ASSERT(meta_graph_->allTensors.size() > out_tensor_id); - auto &tensor = meta_graph_->allTensors.at(out_tensor_id); - MS_ASSERT(nullptr != tensor); - dst_cnode->set_abstract(ConvertTensorToAbstractTensor(tensor)); - AddNode(out_tensor_id, dst_cnode); - } else { - AbstractBasePtrList abstract_list; - for (size_t i = 0; i < out_tensor_ids.size(); i++) { - auto out_tensor_id = out_tensor_ids.at(i); - MS_ASSERT(meta_graph_->allTensors.size() > out_tensor_id); - auto &tensor = meta_graph_->allTensors.at(out_tensor_id); - MS_ASSERT(nullptr != tensor); - abstract_list.emplace_back(ConvertTensorToAbstractTensor(tensor)); - auto tuple_get_item_prim_ptr = GetTupleGetItemPrim(); - if (tuple_get_item_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetTupleGetItemPrim return nullptr"; - return RET_NULL_PTR; - } - auto tuple_get_item_prim = NewValueNode(tuple_get_item_prim_ptr); - auto get_item_value = NewValueNode(MakeValue<int>(i)); - if (tuple_get_item_prim == nullptr || get_item_value == nullptr) { - MS_LOG(ERROR) << "NewValueNode is nullptr"; - return RET_NULL_PTR; - } - std::vector<AnfNodePtr> inputs{tuple_get_item_prim, dst_cnode, get_item_value}; - CNodePtr get_item_cnode = func_graph_->NewCNode(inputs); - if (get_item_cnode == nullptr) { - MS_LOG(ERROR) << "NewCNode is nullptr"; - return RET_NULL_PTR; - } - get_item_cnode->set_fullname_with_scope(src_cnode->name + "_getitem_" + std::to_string(i)); - AddNode(out_tensor_id, get_item_cnode); - } - dst_cnode->set_abstract(std::make_shared<abstract::AbstractTuple>(abstract_list)); - } - return RET_OK; -} - -int AnfImporterFromMetaGraphT::ConverterCNode() { - MS_ASSERT(nullptr != meta_graph_); - MS_ASSERT(nullptr != func_graph_); - for (const auto &cNode : meta_graph_->nodes) { - MS_ASSERT(nullptr != cNode); - auto anf_primitive = ConvertPrimitive(cNode); - if (anf_primitive == nullptr) { - MS_LOG(ERROR) << "cannot obtain anf primitive"; - return RET_NULL_PTR; - } - std::vector<AnfNodePtr> op_inputs = {anf_primitive}; - for (int j : cNode->inputIndex) { - auto node = GetNode(j); - if (nullptr == node) { - MS_LOG(ERROR) << "Can't find input node."; - return RET_NULL_PTR; - } - op_inputs.push_back(node); - } - auto new_cnode = func_graph_->NewCNode(op_inputs); - MS_ASSERT(nullptr != new_cnode); - new_cnode->set_fullname_with_scope(cNode->name); - auto status = ConvertAbstract(cNode, new_cnode); - if (status != RET_OK) { - MS_LOG(ERROR) << "ConvertAbstract failed."; - return status; - } - } - return RET_OK; -} - -int AnfImporterFromMetaGraphT::AddReturnCNode() { - MS_ASSERT(nullptr != meta_graph_); - MS_ASSERT(nullptr != func_graph_); - if (meta_graph_->outputIndex.size() > 1) { - std::vector<AnfNodePtr> make_tuple_inputs; - auto make_tuple_prim_ptr = GetMakeTuplePrim(); - if (make_tuple_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetMakeTuplePrim return nullptr"; - return RET_NULL_PTR; - } - auto make_tuple_prim = NewValueNode(make_tuple_prim_ptr); - make_tuple_inputs.emplace_back(make_tuple_prim); - for (auto tensor_id : meta_graph_->outputIndex) { - auto cNode = GetNode(tensor_id); - if (nullptr == cNode) { - MS_LOG(ERROR) << "Can't find input node."; - return RET_ERROR; - } - make_tuple_inputs.emplace_back(cNode); - } - auto make_tuple_cnode = func_graph_->NewCNode(make_tuple_inputs); - if (make_tuple_cnode == nullptr) { - MS_LOG(ERROR) << "NewCNode is nullptr"; - return RET_NULL_PTR; - } - make_tuple_cnode->set_fullname_with_scope("return tuple"); - - std::vector<AnfNodePtr> op_inputs; - auto return_prim_ptr = GetReturnPrim(); - if (return_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetReturnPrim return nullptr"; - return RET_NULL_PTR; - } - auto value_node = NewValueNode(return_prim_ptr); - op_inputs.emplace_back(value_node); - op_inputs.emplace_back(make_tuple_cnode); - auto cnode = func_graph_->NewCNode(op_inputs); - MS_ASSERT(nullptr != cnode); - cnode->set_fullname_with_scope("Return"); - func_graph_->set_return(cnode); - } else { - auto return_prim_ptr = GetReturnPrim(); - if (return_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetReturnPrim return nullptr"; - return RET_NULL_PTR; - } - auto value_node = NewValueNode(return_prim_ptr); - std::vector<AnfNodePtr> op_inputs{value_node}; - auto cnode = GetNode(meta_graph_->outputIndex.front()); - if (nullptr == cnode) { - MS_LOG(ERROR) << "Can't find input node."; - return RET_ERROR; - } - op_inputs.emplace_back(cnode); - auto return_cnode = func_graph_->NewCNode(op_inputs); - if (return_cnode == nullptr) { - MS_LOG(ERROR) << "NewCNode is nullptr"; - return RET_NULL_PTR; - } - return_cnode->set_fullname_with_scope("Return"); - func_graph_->set_return(return_cnode); - } - return RET_OK; -} - -FuncGraphPtr AnfImporterFromMetaGraphT::GetResult() { return this->func_graph_; } -} // namespace mindspore::lite diff --git a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h b/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h deleted file mode 100644 index 372f8d3042..0000000000 --- a/mindspore/lite/tools/anf_importer/import_from_meta_graphT.h +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_ -#define MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_ - -#include <utility> -#include <memory> -#include "schema/inner/model_generated.h" -#include "tools/anf_importer/anf_importer.h" -#include "src/ops/primitive_c.h" -#include "abstract/abstract_value.h" - -namespace mindspore::lite { -class AnfImporterFromMetaGraphT : public AnfImporter { - public: - AnfImporterFromMetaGraphT(schema::MetaGraphT *meta_graph, FuncGraphPtr func_graph) - : meta_graph_(meta_graph), func_graph_(std::move(func_graph)) {} - - ~AnfImporterFromMetaGraphT() override = default; - - FuncGraphPtr GetResult() override; - - private: - int ConverterConstTensor() override; - - int ConverterCNode() override; - - ValueNodePtr ConvertPrimitive(const std::unique_ptr<schema::CNodeT> &cNode); - - static abstract::AbstractTensorPtr ConvertTensorToAbstractTensor(const std::unique_ptr<schema::TensorT> &tensor); - - int ConvertAbstract(const std::unique_ptr<schema::CNodeT> &src_cnode, const CNodePtr &dst_cnode); - - int AddReturnCNode() override; - - private: - schema::MetaGraphT *meta_graph_; - FuncGraphPtr func_graph_; -}; -} // namespace mindspore::lite - -#endif // MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_META_GRAPHT_H_ diff --git a/mindspore/lite/tools/anf_importer/import_from_mindir.cc b/mindspore/lite/tools/anf_importer/import_from_mindir.cc deleted file mode 100644 index 3720ec91bb..0000000000 --- a/mindspore/lite/tools/anf_importer/import_from_mindir.cc +++ /dev/null @@ -1,919 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/anf_importer/import_from_mindir.h" -#include <unistd.h> -#include <map> -#include <memory> -#include <stack> -#include <unordered_map> -#include <vector> -#include <algorithm> - -#include "src/ops/primitive_c.h" -#include "frontend/operator/ops.h" -#include "include/errorcode.h" -#include "ir/anf.h" -#include "ir/func_graph.h" -#include "schema/inner/model_generated.h" -#include "securec/include/securec.h" -#include "src/tensor.h" -#include "src/param_value_lite.h" -#include "proto/onnx.pb.h" -#include "src/common/log_adapter.h" -#include "tools/common/protobuf_utils.h" -#include "tools/common/graph_util.h" -#include "load_mindir/load_model.h" - -using string = std::string; -using int32 = int32_t; -using int64 = int64_t; -using uint64 = uint64_t; - -namespace mindspore::lite { -static constexpr char kConstantValueNode[] = "Constant"; - -enum ParseForm : int { - FORM_PARSE_TYPE = 0, - FORM_PARSE_SCALAR = 1, - FORM_PARSE_TENSOR = 2, -}; - -static std::map<std::string, ParseForm> kParseTypeSwitchMap{ - {"type", FORM_PARSE_TYPE}, {"scalar", FORM_PARSE_SCALAR}, {"tensor", FORM_PARSE_TENSOR}}; - -static std::unordered_map<int, TypeId> kDefaultValueSwitchMap{ - {onnx::TensorProto_DataType_BOOL, kNumberTypeBool}, {onnx::TensorProto_DataType_INT8, kNumberTypeInt8}, - {onnx::TensorProto_DataType_INT16, kNumberTypeInt16}, {onnx::TensorProto_DataType_INT32, kNumberTypeInt32}, - {onnx::TensorProto_DataType_INT64, kNumberTypeInt64}, {onnx::TensorProto_DataType_UINT8, kNumberTypeUInt8}, - {onnx::TensorProto_DataType_UINT16, kNumberTypeUInt16}, {onnx::TensorProto_DataType_UINT32, kNumberTypeUInt32}, - {onnx::TensorProto_DataType_UINT64, kNumberTypeUInt64}, {onnx::TensorProto_DataType_FLOAT16, kNumberTypeFloat16}, - {onnx::TensorProto_DataType_FLOAT, kNumberTypeFloat32}, {onnx::TensorProto_DataType_DOUBLE, kNumberTypeFloat64}, - {onnx::TensorProto_DataType_STRING, kObjectTypeString}, -}; - -std::shared_ptr<ValueTuple> ParserScalarAttrValue(const std::string &attr_name, - const std::unordered_map<string, ValuePtr> &kv) { - std::string str = attr_name; - auto replace = [&](const string &orgStr, const string &newStr) { - std::string::size_type pos(0); - while ((pos = str.find(orgStr)) != std::string::npos) { - str.replace(pos, orgStr.length(), newStr); - } - return str; - }; - // remove "scalar:" - str = replace("scalar:", ""); - // remove "Tuple" - str = replace("Tuple", ""); - // remove "List" - str = replace("List", ""); - std::stack<std::string> rules; - std::stack<ValuePtr> value; - int num = 0, count = 0; - for (size_t i = 0; i < str.length(); i++) { - if (str[i] == '[') { - rules.push("["); - } else if (str[i] == ']') { - // rules - std::vector<ValuePtr> vec; - while (rules.top() != "[") { - rules.pop(); - vec.push_back(value.top()); - value.pop(); - } - // pop "[" - rules.pop(); - // make tuple for names - std::string res = "dummy"; - // make tuple for values - reverse(vec.begin(), vec.end()); - auto vt = std::make_shared<ValueTuple>(vec); - if (rules.empty() && value.empty()) { - return vt; - } - rules.push(res); - value.push(vt); - } else if (str[i] == ',') { - continue; - } else { - count++; - if (str[i + 1] == '[' || str[i + 1] == ']' || str[i + 1] == ',') { - auto value_name = str.substr(i - count + 1, count); - value.push(kv.at(value_name)); - rules.push(value_name); - count = 0; - num++; - } - } - } - return {}; -} - -std::shared_ptr<abstract::AbstractTuple> ParserAttrShape( - const std::string &attr_name, const std::unordered_map<string, abstract::AbstractTensorPtr> &kv) { - std::string str = attr_name; - auto replace = [&](const string &orgStr, const string &newStr) { - std::string::size_type pos(0); - while ((pos = str.find(orgStr)) != std::string::npos) { - str.replace(pos, orgStr.length(), newStr); - } - return str; - }; - // remove "scalar:" - str = replace("shape:", ""); - // remove "Tuple" - str = replace("Tuple", ""); - // remove "List" - str = replace("List", ""); - std::stack<std::string> rules; - std::stack<abstract::AbstractBasePtr> value; - int num = 0, count = 0; - for (size_t i = 0; i < str.length(); i++) { - if (str[i] == '[') { - rules.push("["); - } else if (str[i] == ']') { - // rules - std::vector<abstract::AbstractBasePtr> vec; - while (rules.top() != "[") { - rules.pop(); - vec.push_back(value.top()); - value.pop(); - } - // pop "[" - rules.pop(); - // make tuple for names - std::string res = "dummy"; - // make tuple for values - reverse(vec.begin(), vec.end()); - auto vt = std::make_shared<abstract::AbstractTuple>(vec); - if (rules.empty() && value.empty()) { - return vt; - } - rules.push(res); - value.push(vt); - } else if (str[i] == ',') { - continue; - } else { - count++; - if (str[i + 1] == '[' || str[i + 1] == ']' || str[i + 1] == ',') { - auto value_name = str.substr(i - count + 1, count); - value.push(kv.at(value_name)); - rules.push(value_name); - count = 0; - num++; - } - } - } - return {}; -} - -#define PARSE_ONNXATTR_IN_SCALAR_FORM(type, valuetype) \ - ValuePtr ParseAttrInScalar_##type##_##valuetype(const onnx::TensorProto &attr_tensor) { \ - if (attr_tensor.type##_data_size() == 1) { \ - auto value = static_cast<valuetype>(attr_tensor.type##_data(0)); \ - return MakeValue<valuetype>(value); \ - } else { \ - MS_LOG(ERROR) << "size of scalar tensor doesn't equal 1!"; \ - } \ - return {}; \ - } - -PARSE_ONNXATTR_IN_SCALAR_FORM(double, double) -PARSE_ONNXATTR_IN_SCALAR_FORM(float, float) -PARSE_ONNXATTR_IN_SCALAR_FORM(string, string) -PARSE_ONNXATTR_IN_SCALAR_FORM(int32, int32) -PARSE_ONNXATTR_IN_SCALAR_FORM(int32, bool) -PARSE_ONNXATTR_IN_SCALAR_FORM(int64, int64) -PARSE_ONNXATTR_IN_SCALAR_FORM(uint64, uint64) - -int AnfImporterFromMindir::BuildParameterForFuncGraph(const ParameterPtr &node, - const onnx::ValueInfoProto &value_proto) { - if (node == nullptr) { - return RET_NULL_PTR; - } - if (!value_proto.has_type() || !value_proto.has_name()) { - MS_LOG(ERROR) << "onnx ValueInfoProto has no type or name! "; - return RET_PARAM_INVALID; - } - node->set_name(value_proto.name()); - const auto &type_proto = value_proto.type(); - if (!type_proto.has_tensor_type()) { - MS_LOG(ERROR) << "onnx TypeProto has no tensor_type! "; - return RET_PARAM_INVALID; - } - const onnx::TypeProto_Tensor &tensor_typeproto = type_proto.tensor_type(); - if (!tensor_typeproto.has_elem_type() || !tensor_typeproto.has_shape()) { - MS_LOG(ERROR) << "onnx TypeProto_Tensor has no elem_type or shape! "; - return RET_INPUT_TENSOR_ERROR; - } - const onnx::TensorShapeProto &tensor_shape = tensor_typeproto.shape(); - std::vector<int> shape; - for (int i = 0; i < tensor_shape.dim_size(); ++i) { - shape.push_back(tensor_shape.dim(i).dim_value()); - } - - if (kDefaultValueSwitchMap.find(tensor_typeproto.elem_type()) == kDefaultValueSwitchMap.end()) { - MS_LOG(ERROR) << "onnx TypeProto_Tensor elem_type is not support yet!"; - return RET_PARAM_INVALID; - } - - auto type_ptr = TypeIdToType(kDefaultValueSwitchMap[tensor_typeproto.elem_type()]); - std::vector<int64_t> shape_vector; - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), - [](const int32_t &value) { return static_cast<int64_t>(value); }); - auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector); - node->set_abstract(abstract_tensor); - - if (default_para_map_.find(value_proto.name()) != default_para_map_.end()) { - auto *tensor_info = new (std::nothrow) Tensor(kDefaultValueSwitchMap[tensor_typeproto.elem_type()], shape); - if (tensor_info == nullptr) { - return RET_MEMORY_FAILED; - } - tensor_info->MallocData(); - const onnx::TensorProto initialize_proto = default_para_map_[value_proto.name()]; - std::string initial_data = initialize_proto.raw_data(); - auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->MutableData()); - if (tensor_data_buf == nullptr) { - delete tensor_info; - return RET_MEMORY_FAILED; - } - tensor_info->set_data(nullptr); - auto ret = memcpy_s(tensor_data_buf, tensor_info->Size(), initial_data.data(), initial_data.size()); - if (EOK != ret) { - MS_LOG(ERROR) << "memcpy_s error"; - delete tensor_data_buf; - delete tensor_info; - return RET_MEMORY_FAILED; - } - - ParamValueLitePtr param_value = std::make_shared<ParamValueLite>(); - if (param_value == nullptr) { - delete tensor_info; - return RET_NULL_PTR; - } - param_value->SetTensorData(tensor_data_buf, tensor_info->Size()); - param_value->set_tensor_type(tensor_info->data_type()); - param_value->set_tensor_shape(tensor_info->shape()); - node->set_default_param(param_value); - delete tensor_info; - } - anfnode_build_map_[value_proto.name()] = node; - return RET_OK; -} - -int AnfImporterFromMindir::ImportParametersForGraph(const FuncGraphPtr &outputFuncGraph, - const onnx::GraphProto &importProto) { - if (outputFuncGraph == nullptr) { - return RET_NULL_PTR; - } - MS_LOG(INFO) << "Parameters had default paramerer size is: " << importProto.initializer_size(); - - for (int i = 0; i < importProto.initializer_size(); ++i) { - const onnx::TensorProto &initializer_proto = importProto.initializer(i); - if (!initializer_proto.has_name()) { - MS_LOG(ERROR) << "initializer vector of onnx GraphProto has no name at index: " << i; - return RET_PARAM_INVALID; - } - default_para_map_[initializer_proto.name()] = initializer_proto; - } - - int status = RET_OK; - MS_LOG(INFO) << "all parameters size: " << importProto.input_size(); - for (int i = 0; i < importProto.input_size(); ++i) { - const onnx::ValueInfoProto &input_proto = importProto.input(i); - status = BuildParameterForFuncGraph(outputFuncGraph->add_parameter(), input_proto); - if (status != RET_OK) { - MS_LOG(ERROR) << "Build parameter for funcgraph fail at index: " << i; - break; - } - } - return status; -} - -bool AnfImporterFromMindir::ObtainCNodeAttrInTypeForm(const PrimitivePtr &prim, const std::string &attr_name, - const onnx::TensorProto &attr_tensor) { - if (prim == nullptr) { - return false; - } - const int attr_tensor_type = attr_tensor.data_type(); - if (kDefaultValueSwitchMap.find(attr_tensor_type) == kDefaultValueSwitchMap.end()) { - MS_LOG(ERROR) << "Obtain attr in type-form has not support input type:" << attr_tensor_type; - return false; - } - prim->AddAttr(attr_name, TypeIdToType(kDefaultValueSwitchMap[attr_tensor_type])); - return true; -} - -ValuePtr AnfImporterFromMindir::ObtainCNodeAttrInScalarForm(const onnx::TensorProto &attr_tensor) { - const int attr_tensor_type = attr_tensor.data_type(); - switch (attr_tensor_type) { - case onnx::TensorProto_DataType_STRING: { - return ParseAttrInScalar_string_string(attr_tensor); - } - case onnx::TensorProto_DataType_INT32: { - return ParseAttrInScalar_int32_int32(attr_tensor); - } - case onnx::TensorProto_DataType_INT64: { - return ParseAttrInScalar_int64_int64(attr_tensor); - } - case onnx::TensorProto_DataType_UINT64: { - return ParseAttrInScalar_uint64_uint64(attr_tensor); - } - case onnx::TensorProto_DataType_FLOAT: { - return ParseAttrInScalar_float_float(attr_tensor); - } - case onnx::TensorProto_DataType_DOUBLE: { - return ParseAttrInScalar_double_double(attr_tensor); - } - case onnx::TensorProto_DataType_BOOL: { - return ParseAttrInScalar_int32_bool(attr_tensor); - } - default: - MS_LOG(ERROR) << "Obtain attr in scalar-form has not support input type: " << attr_tensor_type; - return {}; - } -} - -bool AnfImporterFromMindir::ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const std::string &attr_name, - const onnx::TensorProto &attr_tensor) { - if (prim == nullptr) { - return false; - } - const int attr_tensor_type = attr_tensor.data_type(); - const std::string &tensor_buf = attr_tensor.raw_data(); - std::vector<int> shape; - auto ret = EOK; - if (attr_tensor.dims_size() != 0) { - for (int i = 0; i < attr_tensor.dims_size(); ++i) { - shape.push_back(attr_tensor.dims(i)); - } - std::vector<int64_t> shape_vector; - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), - [](const int32_t &value) { return static_cast<int64_t>(value); }); - tensor::TensorPtr tensor_info = - std::make_shared<tensor::Tensor>(kDefaultValueSwitchMap[attr_tensor_type], shape_vector); - auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->data_c()); - ret = memcpy_s(tensor_data_buf, tensor_info->Size(), tensor_buf.data(), tensor_buf.size()); - if (EOK != ret) { - MS_LOG(ERROR) << "memcpy_s error"; - return false; - } - prim->set_attr(attr_name, MakeValue(tensor_info)); - } else { - if (attr_tensor_type == onnx::TensorProto_DataType_DOUBLE) { - size_t data_size = sizeof(double); - double attr_value = 0.0; - ret = memcpy_s(&attr_value, data_size, tensor_buf.data(), tensor_buf.size()); - if (EOK != ret) { - MS_LOG(ERROR) << "memcpy_s error"; - return false; - } - prim->set_attr(attr_name, MakeValue<double>(attr_value)); - } else if (attr_tensor_type == onnx::TensorProto_DataType_INT64) { - size_t data_size = sizeof(int64_t); - int64_t attr_value = 0; - ret = memcpy_s(&attr_value, data_size, tensor_buf.data(), tensor_buf.size()); - if (EOK != ret) { - MS_LOG(ERROR) << "memcpy_s error"; - return false; - } - prim->set_attr(attr_name, MakeValue<int64_t>(attr_value)); - } else if (attr_tensor_type == onnx::TensorProto_DataType_BOOL) { - size_t data_size = sizeof(bool); - bool attr_value = false; - ret = memcpy_s(&attr_value, data_size, tensor_buf.data(), tensor_buf.size()); - if (EOK != ret) { - MS_LOG(ERROR) << "memcpy_s error"; - return false; - } - prim->set_attr(attr_name, MakeValue<bool>(attr_value)); - } - } - return ret == EOK; -} - -bool AnfImporterFromMindir::GetAttrValueForCNode(const PrimitivePtr &prim, const onnx::AttributeProto &attr_proto) { - if (prim == nullptr) { - return false; - } - const std::string &attr_name = attr_proto.name(); - if (!attr_proto.has_ref_attr_name()) { - MS_LOG(ERROR) << "CNode parse attr type has no ref_attr_name"; - return false; - } - const std::string &ref_attr_name = attr_proto.ref_attr_name(); - if (ref_attr_name.empty()) { - MS_LOG(ERROR) << "ref_attr_name is empty"; - return false; - } - string type = ""; - std::size_t pos(0); - if ((pos = ref_attr_name.find("scalar:")) != std::string::npos) { - type = ref_attr_name.substr(pos, string("scalar:").length() - 1); - } else if ((pos = ref_attr_name.find("type:")) != std::string::npos) { - type = ref_attr_name.substr(pos, string("type:").length() - 1); - } else if ((pos = ref_attr_name.find("tensor:")) != std::string::npos) { - type = ref_attr_name.substr(pos, string("tensor:").length() - 1); - } - std::unordered_map<std::string, ValuePtr> kv; - for (int i = 0; i < attr_proto.tensors_size(); i++) { - const onnx::TensorProto &attr_tensor = attr_proto.tensors(i); - switch (kParseTypeSwitchMap[type]) { - case FORM_PARSE_TYPE: { - return ObtainCNodeAttrInTypeForm(prim, attr_name, attr_tensor); - } - case FORM_PARSE_SCALAR: { - auto res = ObtainCNodeAttrInScalarForm(attr_tensor); - kv.insert(std::pair<string, ValuePtr>(attr_tensor.name(), res)); - break; - } - case FORM_PARSE_TENSOR: { - return ObtainCNodeAttrInTensorForm(prim, attr_name, attr_tensor); - } - default: - MS_LOG(ERROR) << "parse attr type don't support input of ref_attr_name"; - return false; - } - } - if (kParseTypeSwitchMap[type] == FORM_PARSE_SCALAR) { - if (kv.size() == 1) { - auto iter = kv.begin(); - prim->AddAttr(attr_name, iter->second); - } else { - auto res = ParserScalarAttrValue(ref_attr_name, kv); - prim->AddAttr(attr_name, res); - } - } - return true; -} - -bool AnfImporterFromMindir::ObtainValueNodeInTensorForm(const std::string &value_node_name, - const onnx::TensorProto &attr_tensor) { - const int attr_tensor_type = attr_tensor.data_type(); - std::vector<int> shape; - for (int i = 0; i < attr_tensor.dims_size(); ++i) { - shape.push_back(attr_tensor.dims(i)); - } - std::vector<int> shape_vector; - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), - [](const int32_t &value) { return static_cast<int>(value); }); - ParamValueLitePtr param_value = std::make_shared<ParamValueLite>(); - param_value->set_tensor_shape(shape_vector); - param_value->set_tensor_type(kDefaultValueSwitchMap[attr_tensor_type]); - const std::string &tensor_buf = attr_tensor.raw_data(); - auto tensor_data = new (std::nothrow) char[tensor_buf.size()]; - if (tensor_data == nullptr) { - MS_LOG(ERROR) << "Tensor_data is nullptr"; - return false; - } - auto ret = memcpy_s(tensor_data, tensor_buf.size(), tensor_buf.data(), tensor_buf.size()); - if (ret != EOK) { - delete[] tensor_data; - MS_LOG(ERROR) << "Memcpy error: " << ret; - return false; - } - param_value->SetTensorData(tensor_data, tensor_buf.size()); - auto new_value_node = NewValueNode(MakeValue(param_value)); - if (new_value_node == nullptr) { - MS_LOG(ERROR) << "Make valuenode fail"; - return false; - } - auto type_ptr = TypeIdToType(kDefaultValueSwitchMap[attr_tensor_type]); - std::vector<int64_t> shape_vector_int64; - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector_int64), - [](const int32_t &value) { return static_cast<int64_t>(value); }); - auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector_int64); - new_value_node->set_abstract(abstract_tensor); - anfnode_build_map_[value_node_name] = new_value_node; - return true; -} - -bool AnfImporterFromMindir::ObtainValueNodeInTypeForm(const std::string &value_node_name, - const onnx::TensorProto &attr_tensor) { - const int attr_tensor_type = attr_tensor.data_type(); - if (kDefaultValueSwitchMap.find(attr_tensor_type) == kDefaultValueSwitchMap.end()) { - MS_LOG(ERROR) << "Obtain ValueNode attr in type-form has not support input type: " << attr_tensor_type; - return false; - } - auto new_value_node = NewValueNode(TypeIdToType(kDefaultValueSwitchMap[attr_tensor_type])); - abstract::AbstractTypePtr abs_type = std::make_shared<abstract::AbstractType>(std::make_shared<TypeType>()); - new_value_node->set_abstract(abs_type); - anfnode_build_map_[value_node_name] = new_value_node; - return true; -} - -bool AnfImporterFromMindir::GetAttrValueForValueNode(const std::string &value_node_name, - const onnx::AttributeProto &attr_proto) { - if (!attr_proto.has_ref_attr_name()) { - MS_LOG(ERROR) << "CNode parse attr type has no ref_attr_name"; - return false; - } - const std::string &ref_attr_name = attr_proto.ref_attr_name(); - if (ref_attr_name.empty()) { - MS_LOG(ERROR) << "ref_attr_name is empty"; - return false; - } - string type = ""; - std::size_t pos(0); - if ((pos = ref_attr_name.find("scalar:")) != std::string::npos) { - type = ref_attr_name.substr(pos, string("scalar:").length() - 1); - } else if ((pos = ref_attr_name.find("type:")) != std::string::npos) { - type = ref_attr_name.substr(pos, string("type:").length() - 1); - } else if ((pos = ref_attr_name.find("tensor:")) != std::string::npos) { - type = ref_attr_name.substr(pos, string("tensor:").length() - 1); - } - std::unordered_map<std::string, ValuePtr> kv; - for (int i = 0; i < attr_proto.tensors_size(); i++) { - const onnx::TensorProto &attr_tensor = attr_proto.tensors(i); - switch (kParseTypeSwitchMap[type]) { - case FORM_PARSE_TYPE: { - return ObtainValueNodeInTypeForm(value_node_name, attr_tensor); - } - case FORM_PARSE_SCALAR: { - auto res = ObtainCNodeAttrInScalarForm(attr_tensor); - kv.insert(std::pair<string, ValuePtr>(attr_tensor.name(), res)); - break; - } - case FORM_PARSE_TENSOR: { - return ObtainValueNodeInTensorForm(value_node_name, attr_tensor); - } - default: - MS_LOG(ERROR) << "parse attr type don't support input of ref_attr_name"; - return false; - } - } - - ValueNodePtr new_value_node; - if (kParseTypeSwitchMap[type] == FORM_PARSE_SCALAR) { - if (kv.size() == 1) { - auto iter = kv.begin(); - new_value_node = NewValueNode(iter->second); - new_value_node->set_abstract(iter->second->ToAbstract()); - } else { - auto value_ptr = ParserScalarAttrValue(ref_attr_name, kv); - new_value_node = NewValueNode(value_ptr); - new_value_node->set_abstract(value_ptr->ToAbstract()); - } - anfnode_build_map_[value_node_name] = new_value_node; - } - return true; -} - -bool AnfImporterFromMindir::BuildValueNodeForFuncGraph(const onnx::NodeProto &node_proto) { - const std::string &value_node_name = node_proto.output(0); - const onnx::AttributeProto &attr_proto = node_proto.attribute(0); - if (!attr_proto.has_ref_attr_name()) { - MS_LOG(ERROR) << "parse ValueNode don't have ref_attr_name"; - return false; - } - return GetAttrValueForValueNode(value_node_name, attr_proto); -} - -std::unordered_map<std::string, abstract::AbstractTensorPtr> AnfImporterFromMindir::GetAbstractForCNode( - const onnx::AttributeProto &attr_proto) { - std::unordered_map<std::string, abstract::AbstractTensorPtr> kv; - for (int i = 0; i < attr_proto.tensors_size(); i++) { - std::vector<int> shape; - const onnx::TensorProto &attr_tensor = attr_proto.tensors(i); - for (int j = 0; j < attr_tensor.dims_size(); ++j) { - shape.push_back(attr_tensor.dims(j)); - } - std::vector<int64_t> shape_vector; - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), - [](const int32_t &value) { return static_cast<int64_t>(value); }); - auto type_ptr = TypeIdToType(kDefaultValueSwitchMap[attr_tensor.data_type()]); - auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector); - kv.insert(std::pair<string, abstract::AbstractTensorPtr>(attr_tensor.name(), abstract_tensor)); - } - return kv; -} - -CNodePtr AnfImporterFromMindir::BuildCNodeForFuncGraph(const FuncGraphPtr &outputFuncGraph, - const onnx::NodeProto &node_proto, - const schema::QuantType &quantType) { - static bool interrupt = false; - if (outputFuncGraph == nullptr) { - MS_LOG(ERROR) << "output funcgraph is nullptr"; - return nullptr; - } - if (!node_proto.has_op_type()) { - MS_LOG(ERROR) << "Get CNode op_type failed!"; - return nullptr; - } - const std::string &node_name = node_proto.output(0); - const std::string &fullname_with_scope = node_proto.domain(); - const std::string &node_type = node_proto.op_type(); - PrimitivePtr prim = std::make_shared<mindspore::Primitive>(node_type); - if (prim == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - prim->set_instance_name(node_type); - std::unordered_map<std::string, abstract::AbstractTensorPtr> kv; - string shape_ref_attr_name; - for (int i = 0; i < node_proto.attribute_size(); ++i) { - const onnx::AttributeProto &attr_proto = node_proto.attribute(i); - if (attr_proto.ref_attr_name().find("shape:") != string::npos) { - shape_ref_attr_name = attr_proto.ref_attr_name(); - kv = GetAbstractForCNode(attr_proto); - continue; - } - if (!GetAttrValueForCNode(prim, attr_proto)) { - MS_LOG(ERROR) << "Get CNode attr failed!"; - return nullptr; - } - } - - std::vector<AnfNodePtr> inputs; - inputs.clear(); - for (int i = 0; i < node_proto.input_size(); ++i) { - const std::string &input_name = node_proto.input(i); - if (anfnode_build_map_.find(input_name) == anfnode_build_map_.end()) { - if (!interrupt) { - MS_LOG(ERROR) << node_name << " input " << i << input_name << "can't find in nodes have parsed"; - interrupt = true; - } - inputs.push_back(nullptr); - } else { - inputs.push_back(anfnode_build_map_[input_name]); - } - } - auto primitivec_ptr = PrimitiveC::Create(*prim, inputs, quantType); - if (primitivec_ptr == nullptr || interrupt) { - interrupt = true; - if (primitivec_ptr == nullptr) { - NoSupportOp::GetInstance()->InsertOp(prim->name()); - } - return nullptr; - } - inputs.insert(inputs.begin(), NewValueNode(primitivec_ptr)); - CNodePtr cnode_ptr = outputFuncGraph->NewCNode(inputs); - if (cnode_ptr == nullptr) { - interrupt = true; - MS_LOG(ERROR) << "funcgraph new cnode failed"; - return nullptr; - } - if (kv.empty()) { - AbstractBasePtrList elem; - for (size_t index = 1; index < cnode_ptr->inputs().size(); ++index) { - elem.push_back(cnode_ptr->input(index)->abstract()); - } - cnode_ptr->set_abstract(std::make_shared<abstract::AbstractTuple>(elem)); - } else if (1 == kv.size()) { - auto iter = kv.begin(); - cnode_ptr->set_abstract(iter->second); - } else { - auto abstract = ParserAttrShape(shape_ref_attr_name, kv); - cnode_ptr->set_abstract(abstract); - } - - cnode_ptr->set_fullname_with_scope(fullname_with_scope); - anfnode_build_map_[node_name] = cnode_ptr; - return cnode_ptr; -} - -bool AnfImporterFromMindir::BuildReturnForFuncGraph(const FuncGraphPtr &outputFuncGraph, - const onnx::GraphProto &importProto, const CNodePtr &cnode_ptr) { - if (outputFuncGraph == nullptr || cnode_ptr == nullptr) { - MS_LOG(ERROR) << "output funcgraph or cnode is nullptr"; - return false; - } - std::vector<AnfNodePtr> inputs; - if (importProto.output_size() > 1) { - inputs.clear(); - auto primitiveT = std::make_unique<schema::PrimitiveT>(); - MS_ASSERT(primitiveT != nullptr); - primitiveT->value.type = schema::PrimitiveType_MakeTuple; - std::shared_ptr<PrimitiveC> primitivec_ptr = std::make_shared<PrimitiveC>(primitiveT.release()); - MS_ASSERT(primitivec_ptr != nullptr); - inputs.push_back(NewValueNode(primitivec_ptr)); - AbstractBasePtrList elem; - for (int out_size = 0; out_size < importProto.output_size(); ++out_size) { - const onnx::ValueInfoProto &output_node = importProto.output(out_size); - const std::string &out_tuple = output_node.name(); - inputs.push_back(anfnode_build_map_[out_tuple]); - if (anfnode_build_map_[out_tuple] == nullptr) { - MS_LOG(ERROR) << "AnfNode is nullptr"; - return false; - } - elem.push_back(anfnode_build_map_[out_tuple]->abstract()); - } - auto maketuple_ptr = outputFuncGraph->NewCNode(inputs); - if (maketuple_ptr == nullptr) { - MS_LOG(ERROR) << "maketuple_ptr is nullptr"; - return false; - } - maketuple_ptr->set_abstract(std::make_shared<abstract::AbstractTuple>(elem)); - inputs.clear(); - auto primReturn = std::make_unique<schema::PrimitiveT>(); - MS_ASSERT(primReturn != nullptr); - primReturn->value.type = schema::PrimitiveType_Return; - std::shared_ptr<PrimitiveC> primitive_return_value_ptr = std::make_shared<PrimitiveC>(primReturn.release()); - MS_ASSERT(primitive_return_value_ptr != nullptr); - inputs.push_back(NewValueNode(primitive_return_value_ptr)); - inputs.push_back(maketuple_ptr); - auto return_node = outputFuncGraph->NewCNode(inputs); - if (return_node == nullptr) { - MS_LOG(ERROR) << "funcgraph new cnode failed"; - return false; - } - outputFuncGraph->set_return(return_node); - MS_LOG(INFO) << "Construct funcgraph finined, all success."; - } else { - const onnx::ValueInfoProto &output_node = importProto.output(0); - const onnx::TypeProto &output_typeproto = output_node.type(); - int output_type = output_typeproto.tensor_type().elem_type(); - std::vector<int> output_shape; - for (int i = 0; i < output_typeproto.tensor_type().shape().dim_size(); ++i) { - output_shape.push_back(output_typeproto.tensor_type().shape().dim(i).dim_value()); - } - std::vector<int64_t> shape_vector; - (void)std::transform(output_shape.begin(), output_shape.end(), std::back_inserter(shape_vector), - [](const int32_t &value) { return static_cast<int64_t>(value); }); - auto type_ptr = TypeIdToType(kDefaultValueSwitchMap[output_type]); - auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector); - inputs.clear(); - auto primReturn = std::make_unique<schema::PrimitiveT>(); - MS_ASSERT(primReturn != nullptr); - primReturn->value.type = schema::PrimitiveType_Return; - std::shared_ptr<PrimitiveC> primitiveTReturnValuePtr = std::make_shared<PrimitiveC>(primReturn.release()); - MS_ASSERT(primitiveTReturnValuePtr != nullptr); - inputs.push_back(NewValueNode(primitiveTReturnValuePtr)); - inputs.push_back(cnode_ptr); - auto return_node = outputFuncGraph->NewCNode(inputs); - if (return_node == nullptr) { - MS_LOG(ERROR) << "funcgraph new cnode failed"; - return false; - } - return_node->set_abstract(abstract_tensor); - outputFuncGraph->set_return(return_node); - MS_LOG(INFO) << "Construct funcgraph finined, all success!"; - } - return true; -} - -int AnfImporterFromMindir::ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto, - const schema::QuantType &quantType) { - if (outputFuncGraph == nullptr) { - MS_LOG(ERROR) << "funcgraph is nullptr"; - return RET_NULL_PTR; - } - MS_LOG(INFO) << "The CNdoe size : " << importProto.node_size(); - CNodePtr cnode_ptr = nullptr; - CNodePtr last_cnode_ptr = nullptr; - int status = RET_OK; - NoSupportOp::GetInstance()->SetFmkType("MINDIR"); - for (int i = 0; i < importProto.node_size(); ++i) { - const onnx::NodeProto &node_proto = importProto.node(i); - const std::string &node_type = node_proto.op_type(); - if (node_type == kConstantValueNode) { - if (status == RET_OK && !BuildValueNodeForFuncGraph(node_proto)) { - MS_LOG(ERROR) << "Build ValueNode for funcgraph fail at index: : " << i; - status = RET_ERROR; - } - continue; - } - cnode_ptr = BuildCNodeForFuncGraph(outputFuncGraph, node_proto, quantType); - if (cnode_ptr == nullptr) { - MS_LOG(ERROR) << "Build CNode for funcgraph fail at index: : " << i; - return RET_ERROR; - } - - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode_ptr->input(0)); - if (primitive_c == nullptr) { - MS_LOG(ERROR) << "primitive_c is nullptr"; - return RET_ERROR; - } - } - if (status != RET_OK) { - return status; - } - if (!BuildReturnForFuncGraph(outputFuncGraph, importProto, cnode_ptr)) { - MS_LOG(ERROR) << "Build ReturnNode for funcgraph failed"; - status = RET_ERROR; - } - return status; -} - -int AnfImporterFromMindir::BuildFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto, - const schema::QuantType &quantType) { - if (outputFuncGraph == nullptr) { - MS_LOG(ERROR) << "fundgraph is nullptr"; - return RET_NULL_PTR; - } - GraphDebugInfoPtr debug_info_ptr = outputFuncGraph->debug_info(); - if (debug_info_ptr == nullptr) { - MS_LOG(ERROR) << "funcgraph's debug info is nullptr"; - return RET_NULL_PTR; - } - if (importProto.has_name()) { - debug_info_ptr->set_name(importProto.name()); - } else { - MS_LOG(INFO) << "FuncGraph under converting has not name!"; - } - - auto status = ImportParametersForGraph(outputFuncGraph, importProto); - if (status != RET_OK) { - return status; - } - return ImportNodesForGraph(outputFuncGraph, importProto, quantType); -} - -int AnfImporterFromMindir::ParseModelConfigureInfo(const onnx::ModelProto &model_proto) { - if (!model_proto.has_producer_name()) { - MS_LOG(ERROR) << "Parse model producer name from pb file failed!"; - return RET_GRAPH_FILE_ERR; - } - producer_name_ = model_proto.producer_name(); - - if (!model_proto.has_model_version()) { - MS_LOG(ERROR) << "Parse model producer version from pb file failed!"; - return RET_GRAPH_FILE_ERR; - } - model_version_ = model_proto.model_version(); - - if (!model_proto.has_ir_version()) { - MS_LOG(ERROR) << "Parse model version from pb file failed!"; - return RET_GRAPH_FILE_ERR; - } - ir_version_ = model_proto.ir_version(); - return RET_OK; -} - -int AnfImporterFromMindir::Import(const converter::Flags *flag) { - if (flag->trainModel) { - func_graph_ = LoadMindIR(flag->modelFile, true); - if (func_graph_ != nullptr) { - return RET_OK; - } else { - MS_LOG(ERROR) << "Parse new mind_ir proto failed, Trying old onnx format"; - } - } - onnx_model_ = ReadOnnxFromBinary(flag->modelFile); - if (onnx_model_ == nullptr) { - MS_LOG(DEBUG) << "Parse model failed, which is not an old mindir model"; - func_graph_ = LoadMindIR(flag->modelFile, true); - if (func_graph_ == nullptr) { - MS_LOG(ERROR) << "The mindir model cannot be parsed, which may not match proto file."; - return RET_GRAPH_FILE_ERR; - } - return RET_OK; - } - FuncGraphPtr dstGraph = std::make_shared<mindspore::FuncGraph>(); - if (dstGraph == nullptr) { - MS_LOG(ERROR) << "funcgraph is nullptr"; - return RET_NULL_PTR; - } - int status = ParseModelConfigureInfo(*onnx_model_); - if (status != RET_OK) { - MS_LOG(ERROR) << "Parse configuration info for pb file failed!"; - return status; - } - auto quantType = flag->quantType; - const onnx::GraphProto &graphBuild = onnx_model_->graph(); - status = BuildFuncGraph(dstGraph, graphBuild, quantType); - if (status != RET_OK) { - MS_LOG(ERROR) << "Build funcgraph failed!"; - func_graph_ = nullptr; - return status; - } - func_graph_ = dstGraph; - MS_LOG(INFO) << "Parse pb to build FuncGraph Success!"; - return RET_OK; -} - -onnx::ModelProto *AnfImporterFromMindir::ReadOnnxFromBinary(const std::string &model_path) { - auto onnx_model = new (std::nothrow) onnx::ModelProto; - if (onnx_model == nullptr) { - MS_LOG(ERROR) << "New onnx ModelProto failed!"; - return nullptr; - } - if (RET_OK != ValidateFileStr(model_path, ".mindir")) { - MS_LOG(ERROR) << "INPUT ILLEGAL: modelFile must be *.mindir"; - delete (onnx_model); - return nullptr; - } - if (ReadProtoFromBinaryFile((const char *)model_path.c_str(), onnx_model) != RET_OK) { - MS_LOG(ERROR) << "Read onnx model file failed, which is not a matched onnx model"; - delete (onnx_model); - return nullptr; - } - return onnx_model; -} - -FuncGraphPtr AnfImporterFromMindir::GetResult() { return this->func_graph_; } -} // namespace mindspore::lite diff --git a/mindspore/lite/tools/anf_importer/import_from_mindir.h b/mindspore/lite/tools/anf_importer/import_from_mindir.h deleted file mode 100644 index d47bb23932..0000000000 --- a/mindspore/lite/tools/anf_importer/import_from_mindir.h +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_ -#define MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_ - -#include <map> -#include <string> -#include <unordered_map> -#include <utility> - -#include "include/errorcode.h" -#include "proto/onnx.pb.h" -#include "tools/converter/converter_context.h" -#include "tools/anf_importer/anf_importer.h" -#include "abstract/abstract_value.h" - -namespace mindspore::lite { -class AnfImporterFromMindir : public AnfImporter { - public: - AnfImporterFromMindir() = default; - - ~AnfImporterFromMindir() override { delete onnx_model_; } - - static onnx::ModelProto *ReadOnnxFromBinary(const std::string &model_path); - - FuncGraphPtr GetResult() override; - - int Import(const converter::Flags *flag) override; - - private: - int ConverterConstTensor() override { return RET_ERROR; }; - int ConverterCNode() override { return RET_ERROR; }; - int AddReturnCNode() override { return RET_ERROR; }; - int ParseModelConfigureInfo(const onnx::ModelProto &model_proto); - int BuildFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto, - const schema::QuantType &quantType); - int ImportParametersForGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto); - int ImportNodesForGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto, - const schema::QuantType &quantType); - int BuildParameterForFuncGraph(const ParameterPtr &node, const onnx::ValueInfoProto &value_proto); - CNodePtr BuildCNodeForFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::NodeProto &node_proto, - const schema::QuantType &quantType); - bool BuildReturnForFuncGraph(const FuncGraphPtr &outputFuncGraph, const onnx::GraphProto &importProto, - const CNodePtr &cnode_ptr); - static bool GetAttrValueForCNode(const PrimitivePtr &prim, const onnx::AttributeProto &attr_proto); - static bool ObtainCNodeAttrInTypeForm(const PrimitivePtr &prim, const std::string &attr_name, - const onnx::TensorProto &attr_tensor); - static ValuePtr ObtainCNodeAttrInScalarForm(const onnx::TensorProto &attr_tensor); - static bool ObtainCNodeAttrInTensorForm(const PrimitivePtr &prim, const std::string &attr_name, - const onnx::TensorProto &attr_tensor); - bool BuildValueNodeForFuncGraph(const onnx::NodeProto &node_proto); - bool ObtainValueNodeInTensorForm(const string &value_node_name, const onnx::TensorProto &attr_tensor); - bool GetAttrValueForValueNode(const std::string &value_node_name, const onnx::AttributeProto &attr_proto); - bool ObtainValueNodeInTypeForm(const string &value_node_name, const onnx::TensorProto &attr_tensor); - static std::unordered_map<std::string, abstract::AbstractTensorPtr> GetAbstractForCNode( - const onnx::AttributeProto &attr_proto); - - private: - std::string producer_name_; - int model_version_{}; - int ir_version_{}; - std::unordered_map<std::string, AnfNodePtr> anfnode_build_map_; - std::map<std::string, onnx::TensorProto> default_para_map_; - onnx::ModelProto *onnx_model_ = nullptr; - FuncGraphPtr func_graph_ = nullptr; -}; -} // namespace mindspore::lite - -#endif // MINDSPORE_LITE_TOOLS_COMMON_ANF_IMPORTER_IMPORTER_FROM_PROTOBUF_H_ diff --git a/mindspore/lite/tools/common/flag_parser.cc b/mindspore/lite/tools/common/flag_parser.cc index fe41d832b4..1c4ed26b79 100644 --- a/mindspore/lite/tools/common/flag_parser.cc +++ b/mindspore/lite/tools/common/flag_parser.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -165,7 +165,7 @@ std::string FlagParser::Usage(const Option<std::string> &usgMsg) const { ReplaceAll(&helpInfo, "\n\r", "\n"); usageLine += thisLine + "\n"; } else { - // breif help message + // brief help message helpLine = thisLine + " " + helpInfo + "\n"; } } diff --git a/mindspore/lite/tools/common/flag_parser.h b/mindspore/lite/tools/common/flag_parser.h index dc4c913db5..7a69333ee5 100644 --- a/mindspore/lite/tools/common/flag_parser.h +++ b/mindspore/lite/tools/common/flag_parser.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/common/graph_util.cc b/mindspore/lite/tools/common/graph_util.cc index 6cf1ed5851..52182389f2 100644 --- a/mindspore/lite/tools/common/graph_util.cc +++ b/mindspore/lite/tools/common/graph_util.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,6 +15,7 @@ */ #include "tools/common/graph_util.h" +#include <algorithm> #include <ctime> #include <utility> #include <set> @@ -389,7 +390,8 @@ STATUS ReplaceTensorOfNode(schema::MetaGraphT *graphT, uint32_t nodeIdx, uint32_ } NodeIter InsertNode(schema::MetaGraphT *graphT, uint32_t existNodeIdx, InsertPlace place, size_t inoutIndex, - std::unique_ptr<CNodeT> toAddNode, STATUS *errorCode, const OpDefCopyer &opDefCopyer) { + std::unique_ptr<CNodeT> toAddNode, STATUS *errorCode, int *insert_num, + const OpDefCopyer &opDefCopyer) { MS_ASSERT(graphT != nullptr); MS_ASSERT(errorCode != nullptr); if (existNodeIdx >= graphT->nodes.size()) { @@ -399,17 +401,20 @@ NodeIter InsertNode(schema::MetaGraphT *graphT, uint32_t existNodeIdx, InsertPla auto node_iter = graphT->nodes.begin() + existNodeIdx; MS_ASSERT(node_iter != graphT->nodes.begin()); MS_ASSERT((*node_iter) != nullptr); - return InsertNode(graphT, node_iter, place, inoutIndex, std::move(toAddNode), errorCode); + return InsertNode(graphT, node_iter, place, inoutIndex, std::move(toAddNode), errorCode, insert_num); } NodeIter InsertNode(schema::MetaGraphT *graphT, NodeIter existNodeIter, InsertPlace place, size_t inoutIndexIdx, - std::unique_ptr<CNodeT> toAddNode, STATUS *errorCode, const OpDefCopyer &opDefCopyer) { + std::unique_ptr<CNodeT> toAddNode, STATUS *errorCode, int *insert_num, + const OpDefCopyer &opDefCopyer) { MS_ASSERT(graphT != nullptr); MS_ASSERT(errorCode != nullptr); if (place == kBefore) { - return InsertNodeBefore(graphT, existNodeIter, inoutIndexIdx, std::move(toAddNode), errorCode, opDefCopyer); + return InsertNodeBefore(graphT, existNodeIter, inoutIndexIdx, std::move(toAddNode), errorCode, insert_num, + opDefCopyer); } else if (place == kAfter) { - return InsertNodeAfter(graphT, existNodeIter, inoutIndexIdx, std::move(toAddNode), errorCode, opDefCopyer); + return InsertNodeAfter(graphT, existNodeIter, inoutIndexIdx, std::move(toAddNode), errorCode, insert_num, + opDefCopyer); } else { MS_LOG(ERROR) << "Invalid InsertPlace : " << place; return graphT->nodes.end(); @@ -417,7 +422,8 @@ NodeIter InsertNode(schema::MetaGraphT *graphT, NodeIter existNodeIter, InsertPl } NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, size_t inputIndexIdx, - std::unique_ptr<CNodeT> toAddNodeIn, STATUS *errorCode, const OpDefCopyer &opDefCopyer) { + std::unique_ptr<CNodeT> toAddNodeIn, STATUS *errorCode, int *insert_num, + const OpDefCopyer &opDefCopyer) { MS_ASSERT(graphT != nullptr); MS_ASSERT(errorCode != nullptr); auto &existNode = *existNodeIter; @@ -428,27 +434,29 @@ NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, si MS_ASSERT(graphT->allTensors.size() > preTensorIdx); auto preNodeIdxes = GetInputNodeIdx(*graphT, *(existNode), inputIndexIdx); - if (preNodeIdxes.empty()) { + size_t insert_node_num = preNodeIdxes.empty() ? 1 : preNodeIdxes.size(); + std::vector<std::unique_ptr<CNodeT>> toAddNodes; + for (size_t i = 0; i < insert_node_num; ++i) { auto &preTensor = graphT->allTensors.at(preTensorIdx); MS_ASSERT(preTensor != nullptr); auto toAddTensor = CopyTensorDefT(preTensor); if (toAddTensor == nullptr) { - MS_LOG(ERROR) << "Copy TensorT failed"; *errorCode = RET_NULL_PTR; + MS_LOG(ERROR) << "Copy Tensor failed"; return graphT->nodes.end(); } toAddTensor->nodeType = schema::NodeType_CNode; - preTensor->refCount = 0; - preTensor->data.clear(); + toAddTensor->refCount = 0; + toAddTensor->data.clear(); MS_ASSERT(toAddNodeIn->primitive != nullptr); if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) { auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast(); MS_ASSERT(prim != nullptr); - preTensor->dataType = prim->srcT; - toAddTensor->dataType = prim->dstT; - if (prim->srcT == TypeId::kNumberTypeUInt8 && prim->dstT == TypeId::kNumberTypeInt8) { + preTensor->dataType = prim->src_t; + toAddTensor->dataType = prim->dst_t; + if (prim->src_t == TypeId::kNumberTypeUInt8 && prim->dst_t == TypeId::kNumberTypeInt8) { preTensor->quantParams.front()->zeroPoint += 128; - } else if (prim->srcT == TypeId::kNumberTypeInt8 && prim->dstT == TypeId::kNumberTypeUInt8) { + } else if (prim->src_t == TypeId::kNumberTypeInt8 && prim->dst_t == TypeId::kNumberTypeUInt8) { toAddTensor->quantParams.front()->zeroPoint += 128; } } @@ -460,6 +468,9 @@ NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, si *errorCode = RET_NULL_PTR; return graphT->nodes.end(); } + if (!preNodeIdxes.empty()) { + toAddNode->name = toAddNodeIn->name + "_" + std::to_string(i); + } toAddNode->inputIndex.clear(); toAddNode->inputIndex.push_back(preTensorIdx); toAddNode->outputIndex.clear(); @@ -470,65 +481,19 @@ NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, si break; } } + toAddNodes.emplace_back(std::move(toAddNode)); + } + for (auto &toAddNode : toAddNodes) { existNodeIter = graphT->nodes.insert(existNodeIter, std::move(toAddNode)); existNodeIter++; - } else { - std::vector<std::unique_ptr<CNodeT>> toAddNodes; - for (size_t i = 0; i < preNodeIdxes.size(); i++) { - MS_ASSERT(graphT->nodes.size() > preNodeIdxes.at(i)); - auto &preTensor = graphT->allTensors.at(preTensorIdx); - MS_ASSERT(preTensor != nullptr); - auto toAddTensor = CopyTensorDefT(preTensor); - if (toAddTensor == nullptr) { - *errorCode = RET_NULL_PTR; - MS_LOG(ERROR) << "Copy TensorT failed"; - return graphT->nodes.end(); - } - toAddTensor->nodeType = schema::NodeType_CNode; - MS_ASSERT(toAddNodeIn->primitive != nullptr); - if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) { - auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast(); - MS_ASSERT(prim != nullptr); - preTensor->dataType = prim->srcT; - toAddTensor->dataType = prim->dstT; - if (prim->srcT == TypeId::kNumberTypeUInt8 && prim->dstT == TypeId::kNumberTypeInt8) { - preTensor->quantParams.front()->zeroPoint += 128; - } else if (prim->srcT == TypeId::kNumberTypeInt8 && prim->dstT == TypeId::kNumberTypeUInt8) { - toAddTensor->quantParams.front()->zeroPoint += 128; - } - } - graphT->allTensors.emplace_back(std::move(toAddTensor)); - size_t toAddTensorIdx = graphT->allTensors.size() - 1; - auto toAddNode = opDefCopyer(toAddNodeIn.get()); - if (toAddNode == nullptr) { - MS_LOG(ERROR) << "copy toAddNodeIn failed"; - *errorCode = RET_NULL_PTR; - return graphT->nodes.end(); - } - toAddNode->name = toAddNodeIn->name + "_" + std::to_string(i++); - toAddNode->inputIndex.clear(); - toAddNode->inputIndex.push_back(preTensorIdx); - toAddNode->outputIndex.clear(); - toAddNode->outputIndex.push_back(toAddTensorIdx); - for (auto iter = existNode->inputIndex.begin(); iter != existNode->inputIndex.end(); iter++) { - if (*iter == preTensorIdx) { - *iter = toAddTensorIdx; - break; - } - } - toAddNodes.emplace_back(std::move(toAddNode)); - } - for (auto &toAddNode : toAddNodes) { - existNodeIter = graphT->nodes.insert(existNodeIter, std::move(toAddNode)); - existNodeIter++; - } + *insert_num += 1; } *errorCode = RET_OK; return existNodeIter; } NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, size_t outputIndexIdx, - std::unique_ptr<schema::CNodeT> toAddNodeIn, STATUS *errorCode, + std::unique_ptr<schema::CNodeT> toAddNodeIn, STATUS *errorCode, int *insert_num, const OpDefCopyer &opDefCopyer) { MS_ASSERT(graphT != nullptr); MS_ASSERT(errorCode != nullptr); @@ -538,9 +503,12 @@ NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, siz MS_ASSERT(toAddNodeIn != nullptr); auto postTensorIdx = existNode->outputIndex.at(outputIndexIdx); MS_ASSERT(graphT->allTensors.size() > postTensorIdx); - auto postNodeIdxes = GetOutputNodeIdx(*graphT, *(existNode), outputIndexIdx); - if (postNodeIdxes.empty()) { + bool is_output_index = IsContain(graphT->outputIndex, postTensorIdx); + size_t insert_node_num = (postNodeIdxes.empty() || is_output_index) ? postNodeIdxes.size() + 1 : postNodeIdxes.size(); + bool has_insert_for_graph_out = postNodeIdxes.empty() || is_output_index; + std::vector<std::unique_ptr<schema::CNodeT>> toAddNodes; + for (size_t i = 0; i < insert_node_num; ++i) { auto &postTensor = graphT->allTensors.at(postTensorIdx); MS_ASSERT(postTensor != nullptr); auto toAddTensor = CopyTensorDefT(postTensor); @@ -554,11 +522,11 @@ NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, siz if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) { auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast(); MS_ASSERT(prim != nullptr); - postTensor->dataType = prim->srcT; - toAddTensor->dataType = prim->dstT; - if (prim->srcT == TypeId::kNumberTypeInt8 && prim->dstT == TypeId::kNumberTypeUInt8) { + postTensor->dataType = prim->src_t; + toAddTensor->dataType = prim->dst_t; + if (prim->src_t == TypeId::kNumberTypeInt8 && prim->dst_t == TypeId::kNumberTypeUInt8) { toAddTensor->quantParams.front()->zeroPoint += 128; - } else if (prim->srcT == TypeId::kNumberTypeUInt8 && prim->dstT == TypeId::kNumberTypeInt8) { + } else if (prim->src_t == TypeId::kNumberTypeUInt8 && prim->dst_t == TypeId::kNumberTypeInt8) { postTensor->quantParams.front()->zeroPoint += 128; } } @@ -574,92 +542,30 @@ NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, siz toAddNode->inputIndex.push_back(postTensorIdx); toAddNode->outputIndex.clear(); toAddNode->outputIndex.push_back(toAddTensorIdx); - for (auto iter = graphT->outputIndex.begin(); iter != graphT->outputIndex.end(); iter++) { - if (*iter == postTensorIdx) { - *iter = toAddTensorIdx; - break; - } + if (!postNodeIdxes.empty()) { + toAddNode->name = toAddNodeIn->name + "_" + std::to_string(i); } - existNodeIter = graphT->nodes.insert(existNodeIter, std::move(toAddNode)); - existNodeIter++; - } else { - std::vector<std::unique_ptr<schema::CNodeT>> toAddNodes; - int i = 0; - for (size_t postNodeIdx : postNodeIdxes) { - MS_ASSERT(graphT->nodes.size() > postNodeIdx); - auto &postNode = graphT->nodes.at(postNodeIdx); - MS_ASSERT(postNode != nullptr); - auto &postTensor = graphT->allTensors.at(postTensorIdx); - MS_ASSERT(postTensor != nullptr); - // for multioutput,when one outpout as other node input,need add one more node - if (IsContain(graphT->outputIndex, postTensorIdx)) { - auto toAddTensor = CopyTensorDefT(postTensor); - if (toAddTensor == nullptr) { - MS_LOG(ERROR) << "Copy TensorT failed"; - *errorCode = RET_NULL_PTR; - return graphT->nodes.end(); - } - toAddTensor->nodeType = schema::NodeType_CNode; - graphT->allTensors.emplace_back(std::move(toAddTensor)); - size_t toAddTensorIdx = graphT->allTensors.size() - 1; - auto toAddNode = opDefCopyer(toAddNodeIn.get()); - toAddNode->name = toAddNodeIn->name + "_" + std::to_string(i++); - toAddNode->inputIndex.clear(); - toAddNode->inputIndex.push_back(postTensorIdx); - toAddNode->outputIndex.clear(); - toAddNode->outputIndex.push_back(toAddTensorIdx); - for (auto iter = graphT->outputIndex.begin(); iter != graphT->outputIndex.end(); iter++) { - if (*iter == postTensorIdx) { - *iter = toAddTensorIdx; - break; - } - } - toAddNodes.emplace_back(std::move(toAddNode)); - } - auto toAddTensor = CopyTensorDefT(postTensor); - if (toAddTensor == nullptr) { - MS_LOG(ERROR) << "Copy TensorT failed"; - *errorCode = RET_NULL_PTR; - return graphT->nodes.end(); - } - MS_ASSERT(toAddNodeIn->primitive != nullptr); - if (toAddNodeIn->primitive->value.type == schema::PrimitiveType_QuantDTypeCast) { - auto prim = toAddNodeIn->primitive->value.AsQuantDTypeCast(); - MS_ASSERT(prim != nullptr); - postTensor->dataType = prim->srcT; - toAddTensor->dataType = prim->dstT; - if (prim->dstT == TypeId::kNumberTypeUInt8 && prim->srcT == TypeId::kNumberTypeInt8) { - toAddTensor->quantParams.front()->zeroPoint += 128; - } else if (prim->srcT == TypeId::kNumberTypeUInt8 && prim->dstT == TypeId::kNumberTypeInt8) { - postTensor->quantParams.front()->zeroPoint += 128; + if (has_insert_for_graph_out) { + for (auto iter = graphT->outputIndex.begin(); iter != graphT->outputIndex.end(); iter++) { + if (*iter == postTensorIdx) { + *iter = toAddTensorIdx; } } - graphT->allTensors.emplace_back(std::move(toAddTensor)); - size_t toAddTensorIdx = graphT->allTensors.size() - 1; - auto toAddNode = opDefCopyer(toAddNodeIn.get()); - if (toAddNode == nullptr) { - MS_LOG(ERROR) << "copy toAddNodeIn failed"; - *errorCode = RET_NULL_PTR; - return graphT->nodes.end(); - } - toAddNode->name = toAddNodeIn->name + "_" + std::to_string(i++); - toAddNode->inputIndex.clear(); - toAddNode->inputIndex.push_back(postTensorIdx); - toAddNode->outputIndex.clear(); - toAddNode->outputIndex.push_back(toAddTensorIdx); - MS_ASSERT(IsContain(postNode->inputIndex, postTensorIdx)); + has_insert_for_graph_out = false; + } else { + auto &postNode = graphT->nodes.at(postNodeIdxes[is_output_index ? i - 1 : i]); for (auto iter = postNode->inputIndex.begin(); iter != postNode->inputIndex.end(); iter++) { if (*iter == postTensorIdx) { *iter = toAddTensorIdx; - break; } } - toAddNodes.emplace_back(std::move(toAddNode)); - } - for (auto &toAddNode : toAddNodes) { - existNodeIter = graphT->nodes.insert(existNodeIter, std::move(toAddNode)); - existNodeIter++; } + toAddNodes.emplace_back(std::move(toAddNode)); + } + for (auto &toAddNode : toAddNodes) { + existNodeIter = graphT->nodes.insert(existNodeIter, std::move(toAddNode)); + existNodeIter++; + *insert_num += 1; } *errorCode = RET_OK; return existNodeIter; @@ -673,137 +579,6 @@ STATUS ValidateFileStr(const std::string &modelFile, const std::string &fileType } } -void TransformAttrByAxes(int *origin_attr, int *axes, int element_size) { - if (origin_attr == nullptr || axes == nullptr || element_size == 0) { - MS_LOG(INFO) << "Attr data is from other nodes."; - return; - } - auto axis_map = GetNc2NhAxisMap(); - std::vector<int> cur_attr; - for (int dim = 0; dim < 4; ++dim) { - for (int index = 0; index < element_size; ++index) { - int nhwc_dim = axis_map[axes[index] < 0 ? axes[index] + 4 : axes[index]]; - if (nhwc_dim == dim || (nhwc_dim + 4) == dim) { - cur_attr.push_back(origin_attr[index]); - } - } - } - for (int index = 0; index < element_size; ++index) { - origin_attr[index] = cur_attr[index]; - } -} - -STATUS ChangeOpAttrForSlice(schema::MetaGraphT *graph, const std::unique_ptr<schema::CNodeT> &node) { - auto type = node->primitive->value.type; - if (type == schema::PrimitiveType_StridedSlice) { - // onnx input size is equal to 5 always. - if (node->inputIndex.size() == 5) { - for (int index = 1; index < 5; ++index) { - if (graph->allTensors[node->inputIndex[index]]->data.data() == nullptr) { - MS_LOG(INFO) << "Here don't consider input is from other nodes."; - return RET_NOT_SUPPORT; - } - } - int element_num = graph->allTensors[node->inputIndex[1]]->dims[0]; - auto axes = graph->allTensors[node->inputIndex[3]]->data; - for (int index = 1; index < 5; ++index) { - TransformAttrByAxes(reinterpret_cast<int *>(graph->allTensors[node->inputIndex[index]]->data.data()), - reinterpret_cast<int *>(axes.data()), element_num); - } - } - } - if (type == schema::PrimitiveType_Slice) { - auto attr = node->primitive->value.AsSlice(); - if (attr == nullptr) { - MS_LOG(ERROR) << "node->primitive->value.AsSlice() is nullptr."; - return RET_NULL_PTR; - } - // transform attr - attr->format = schema::Format_NHWC; - if (attr->begin.empty() || attr->size.empty()) { - MS_LOG(INFO) << "Here don't consider these attr are from other nodes."; - return RET_NOT_SUPPORT; - } - int element_num = attr->begin.size(); - if (attr->axes.empty()) { - for (int index = 0; index < element_num; ++index) { - attr->axes.push_back(index); - } - } - TransformAttrByAxes(attr->begin.data(), attr->axes.data(), element_num); - TransformAttrByAxes(attr->size.data(), attr->axes.data(), element_num); - TransformAttrByAxes(attr->axes.data(), attr->axes.data(), element_num); - } - return RET_OK; -} - -STATUS ChangeOpAxis(schema::MetaGraphT *graph, const std::unique_ptr<schema::CNodeT> &node) { - MS_ASSERT(node->primitive != nullptr); - auto type = node->primitive->value.type; - auto input1_ndim = graph->allTensors.at(node->inputIndex[0])->dims.size(); - if (input1_ndim != 4) { - if (node->inputIndex.size() > 1) { - auto input2_ndim = graph->allTensors.at(node->inputIndex[1])->dims.size(); - if (input2_ndim != 4 && input2_ndim != 0) { - MS_LOG(ERROR) << "change op axis only support 4 dims"; - return RET_NOT_SUPPORT; - } - } else { - MS_LOG(ERROR) << "change op axis only support 4 dims"; - return RET_NOT_SUPPORT; - } - } - if (type == schema::PrimitiveType_Concat) { - MS_ASSERT(node->primitive->value.AsConcat() != nullptr); - auto origin_axis = node->primitive->value.AsConcat()->axis; - auto axis_map = GetNc2NhAxisMap(); - if (node->primitive->value.AsConcat() == nullptr) { - MS_LOG(ERROR) << "node->primitive->value.AsConcat() is nullptr"; - return RET_NULL_PTR; - } - node->primitive->value.AsConcat()->axis = axis_map[origin_axis < 0 ? origin_axis + 4 : origin_axis]; - } - if (type == schema::PrimitiveType_Split) { - MS_ASSERT(node->primitive->value.AsSplit() != nullptr); - auto origin_axis = node->primitive->value.AsSplit()->splitDim; - auto axis_map = GetNc2NhAxisMap(); - if (node->primitive->value.AsSplit() == nullptr) { - MS_LOG(ERROR) << "node->primitive->value.AsSplit() is nullptr"; - return RET_NULL_PTR; - } - node->primitive->value.AsSplit()->splitDim = axis_map[origin_axis]; - } - if (type == schema::PrimitiveType_Crop) { - MS_ASSERT(node->primitive->value.AsCrop() != nullptr); - auto origin_axis = node->primitive->value.AsCrop()->axis; - auto offsets = node->primitive->value.AsCrop()->offsets; - auto axis_map = GetNc2NhAxisMap(); - if (node->primitive->value.AsCrop() == nullptr) { - MS_LOG(ERROR) << "node->primitive->value.AsCrop() is nullptr"; - return RET_NULL_PTR; - } - // nchw->nhwc,offsets need pad 0; - if (axis_map[origin_axis] == 0) { - offsets = {offsets[0], offsets[2], offsets[3], offsets[1]}; - } else if (axis_map[origin_axis] == 1 || axis_map[origin_axis] == 2) { - // orgin_axis = 2 or orgin_axis = 3 - offsets.push_back(0); - } else if (axis_map[origin_axis] == -1) { - // origin_axis = 1 - offsets = {offsets[1], offsets[2], offsets[0]}; - } else { - // axis error - MS_LOG(ERROR) << "Crop error"; - return RET_ERROR; - } - node->primitive->value.AsCrop()->offsets = offsets; - } - if (type == schema::PrimitiveType_Slice || type == schema::PrimitiveType_StridedSlice) { - return ChangeOpAttrForSlice(graph, node); - } - return RET_OK; -} - std::string GetModelName(const std::string &modelFile) { std::string modelName = modelFile; modelName = modelName.substr(modelName.find_last_of('/') + 1); @@ -835,5 +610,30 @@ int SetSubgraphTensorIndices(schema::MetaGraphT *meta_graphT) { } return RET_OK; } + +std::vector<int> GetTransposePerm(MetaGraphT *graph, const std::unique_ptr<CNodeT> &cnode) { + MS_ASSERT(graph != nullptr && cnode != nullptr); + std::vector<int> perm; + if (cnode->primitive->value.type != schema::PrimitiveType_Transpose) { + return perm; + } + if (cnode->inputIndex.size() < 2) { + MS_LOG(ERROR) << "transpose node input size is less than 2."; + return perm; + } + MS_ASSERT(cnode->outputIndex.at(1) < graph->allTensors.size()); + auto &perm_tensor = graph->allTensors.at(cnode->inputIndex.at(1)); + if (perm_tensor->data.empty()) { + return perm; + } + MS_ASSERT(perm_tensor->dims.size() != 0); + perm.resize(perm_tensor->dims[0]); + if (memcpy_s(perm.data(), perm_tensor->dims[0] * sizeof(int), perm_tensor->data.data(), + perm_tensor->dims[0] * sizeof(int)) != EOK) { + MS_LOG(ERROR) << "memcpy data failed."; + return {}; + } + return perm; +} } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/common/graph_util.h b/mindspore/lite/tools/common/graph_util.h index 9f746a6124..13cf28d24f 100644 --- a/mindspore/lite/tools/common/graph_util.h +++ b/mindspore/lite/tools/common/graph_util.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -71,30 +71,28 @@ STATUS ReplaceTensorOfNode(schema::MetaGraphT *graphT, uint32_t nodeIdx, uint32_ std::unique_ptr<schema::TensorT> tensor); NodeIter InsertNode(schema::MetaGraphT *graphT, uint32_t existNodeIdx, InsertPlace place, size_t inoutIndex, - std::unique_ptr<schema::CNodeT> toAddNode, STATUS *errorCode, + std::unique_ptr<schema::CNodeT> toAddNode, STATUS *errorCode, int *insert_num, const OpDefCopyer &opDefCopyer = GetSimpleOpCopyer()); NodeIter InsertNode(schema::MetaGraphT *graphT, NodeIter existNodeIter, InsertPlace place, size_t inoutIndexIdx, - std::unique_ptr<schema::CNodeT> toAddNode, STATUS *errorCode, + std::unique_ptr<schema::CNodeT> toAddNode, STATUS *errorCode, int *insert_num, const OpDefCopyer &opDefCopyer = GetSimpleOpCopyer()); NodeIter InsertNodeBefore(schema::MetaGraphT *graphT, NodeIter existNodeIter, size_t inputIndexIdx, - std::unique_ptr<schema::CNodeT> toAddNode, STATUS *errorCode, const OpDefCopyer &opDefCopyer); + std::unique_ptr<schema::CNodeT> toAddNode, STATUS *errorCode, int *insert_num, + const OpDefCopyer &opDefCopyer); NodeIter InsertNodeAfter(schema::MetaGraphT *graphT, NodeIter existNodeIter, size_t outputIndexIdx, - std::unique_ptr<schema::CNodeT> toAddNode, STATUS *errorCode, const OpDefCopyer &opDefCopyer); + std::unique_ptr<schema::CNodeT> toAddNode, STATUS *errorCode, int *insert_num, + const OpDefCopyer &opDefCopyery); STATUS ValidateFileStr(const std::string &modelFile, const std::string &fileType); -void TransformAttrByAxes(int *origin_attr, int *axes, int element_size); - -STATUS ChangeOpAxis(schema::MetaGraphT *graph, const std::unique_ptr<schema::CNodeT> &node); - -STATUS ChangeOpAttrForSlice(schema::MetaGraphT *graph, const std::unique_ptr<schema::CNodeT> &node); - STATUS SetSubgraphTensorIndices(schema::MetaGraphT *meta_graphT); std::string GetModelName(const std::string &modelFile); + +std::vector<int> GetTransposePerm(schema::MetaGraphT *graph, const std::unique_ptr<schema::CNodeT> &cnode); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/common/node_util.cc b/mindspore/lite/tools/common/node_util.cc index c32e585ff0..e2cab08311 100644 --- a/mindspore/lite/tools/common/node_util.cc +++ b/mindspore/lite/tools/common/node_util.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,83 +24,85 @@ namespace mindspore { namespace lite { -static const std::vector<schema::PrimitiveType> nhwcOpList = {schema::PrimitiveType_Conv2DGradFilter, - schema::PrimitiveType_Conv2DGradInput, - schema::PrimitiveType_GroupConv2DGradInput, - schema::PrimitiveType_PoolingGrad, - schema::PrimitiveType_BiasGrad, - schema::PrimitiveType_BNGrad, +static const std::vector<schema::PrimitiveType> nhwcOpList = {schema::PrimitiveType_Conv2DBackpropFilterFusion, + schema::PrimitiveType_Conv2DBackpropInputFusion, + schema::PrimitiveType_AvgPoolGrad, + schema::PrimitiveType_MaxPoolGrad, + schema::PrimitiveType_BiasAddGrad, + schema::PrimitiveType_BatchNormGrad, schema::PrimitiveType_ApplyMomentum, - schema::PrimitiveType_Sgd, + schema::PrimitiveType_SGD, schema::PrimitiveType_Adam, - schema::PrimitiveType_Conv2D, - schema::PrimitiveType_DeConv2D, - schema::PrimitiveType_DepthwiseConv2D, - schema::PrimitiveType_DeDepthwiseConv2D, - schema::PrimitiveType_Pooling, - schema::PrimitiveType_LocalResponseNormalization, + schema::PrimitiveType_AvgPoolFusion, + schema::PrimitiveType_MaxPoolFusion, + schema::PrimitiveType_Conv2DFusion, + schema::PrimitiveType_Conv2dTransposeFusion, + schema::PrimitiveType_LRN, schema::PrimitiveType_Resize, schema::PrimitiveType_BatchNorm, schema::PrimitiveType_FusedBatchNorm, - schema::PrimitiveType_PReLU, + schema::PrimitiveType_PReLUFusion, schema::PrimitiveType_BiasAdd, schema::PrimitiveType_SpaceToDepth, schema::PrimitiveType_DepthToSpace, - schema::PrimitiveType_TopK}; + schema::PrimitiveType_TopKFusion, + schema::PrimitiveType_BatchToSpace, + schema::PrimitiveType_SpaceToBatch, + schema::PrimitiveType_SpaceToBatchND}; static const std::vector<schema::PrimitiveType> nhwcOpAllInputList = { - schema::PrimitiveType_PoolingGrad, schema::PrimitiveType_ActivationGrad, schema::PrimitiveType_Conv2DGradFilter, - schema::PrimitiveType_BNGrad}; + schema::PrimitiveType_AvgPoolGrad, schema::PrimitiveType_MaxPoolGrad, schema::PrimitiveType_ActivationGrad, + schema::PrimitiveType_Conv2DBackpropFilterFusion, schema::PrimitiveType_BatchNormGrad}; // index {} mean all inputs need insert static std::unordered_map<schema::PrimitiveType, std::vector<int>> extNhwcInsertIndex = { - {schema::PrimitiveType_BNGrad, {0, 1}}, + {schema::PrimitiveType_BatchNormGrad, {0, 1}}, + {schema::PrimitiveType_Conv2DBackpropFilterFusion, {0, 1}}, {schema::PrimitiveType_ApplyMomentum, {3}}, - {schema::PrimitiveType_Sgd, {1}}, + {schema::PrimitiveType_SGD, {1}}, {schema::PrimitiveType_Adam, {9}}}; static const std::vector<schema::PrimitiveType> fp32FullOpList = { - schema::PrimitiveType_Concat, schema::PrimitiveType_Add, + schema::PrimitiveType_Concat, schema::PrimitiveType_AddFusion, schema::PrimitiveType_Floor}; // fp32 ops support C4 and nhwc in fp32 static const std::vector<schema::PrimitiveType> int8NeedNhwcOpList = {}; -static const std::vector<schema::PrimitiveType> int8OpList = {schema::PrimitiveType_Conv2D, - schema::PrimitiveType_DepthwiseConv2D, - schema::PrimitiveType_Add, +static const std::vector<schema::PrimitiveType> int8OpList = {schema::PrimitiveType_Conv2DFusion, + schema::PrimitiveType_Conv2dTransposeFusion, + schema::PrimitiveType_AddFusion, schema::PrimitiveType_Transpose, - schema::PrimitiveType_Pooling, + schema::PrimitiveType_AvgPoolFusion, + schema::PrimitiveType_MaxPoolFusion, schema::PrimitiveType_Concat, - schema::PrimitiveType_SoftMax, + schema::PrimitiveType_Softmax, schema::PrimitiveType_Reshape, schema::PrimitiveType_Activation, schema::PrimitiveType_Resize, schema::PrimitiveType_FullConnection, - schema::PrimitiveType_ArgMax, - schema::PrimitiveType_ArgMin, + schema::PrimitiveType_ArgMaxFusion, + schema::PrimitiveType_ArgMinFusion, schema::PrimitiveType_BatchNorm, schema::PrimitiveType_FusedBatchNorm, schema::PrimitiveType_BiasAdd, - schema::PrimitiveType_Div, - schema::PrimitiveType_Mul, - schema::PrimitiveType_Slice, - schema::PrimitiveType_SoftMax, + schema::PrimitiveType_DivFusion, + schema::PrimitiveType_MulFusion, + schema::PrimitiveType_SliceFusion, schema::PrimitiveType_Split, schema::PrimitiveType_Squeeze, - schema::PrimitiveType_Sub, + schema::PrimitiveType_SubFusion, schema::PrimitiveType_StridedSlice, - schema::PrimitiveType_TopK, + schema::PrimitiveType_TopKFusion, schema::PrimitiveType_Unsqueeze, schema::PrimitiveType_MatMul, - schema::PrimitiveType_Pad, - schema::PrimitiveType_DeConv2D, - schema::PrimitiveType_Scale, + schema::PrimitiveType_PadFusion, + schema::PrimitiveType_ScaleFusion, schema::PrimitiveType_Cast, schema::PrimitiveType_Shape, schema::PrimitiveType_ExpandDims, schema::PrimitiveType_BatchToSpace, schema::PrimitiveType_BatchToSpaceND, - schema::PrimitiveType_Reduce, + schema::PrimitiveType_ReduceFusion, schema::PrimitiveType_Round, schema::PrimitiveType_Floor, schema::PrimitiveType_Ceil, @@ -115,9 +117,9 @@ static const std::vector<schema::PrimitiveType> int8OpList = {schema::PrimitiveT schema::PrimitiveType_SpaceToBatch, schema::PrimitiveType_SpaceToBatchND, schema::PrimitiveType_DepthToSpace, - schema::PrimitiveType_Power, + schema::PrimitiveType_PowFusion, schema::PrimitiveType_GatherNd, - schema::PrimitiveType_LeakyReLU, + schema::PrimitiveType_LeakyRelu, schema::PrimitiveType_Gather, schema::PrimitiveType_Equal, schema::PrimitiveType_NotEqual, @@ -125,19 +127,18 @@ static const std::vector<schema::PrimitiveType> int8OpList = {schema::PrimitiveT schema::PrimitiveType_Greater, schema::PrimitiveType_GreaterEqual, schema::PrimitiveType_Eltwise, - schema::PrimitiveType_DeDepthwiseConv2D, schema::PrimitiveType_DetectionPostProcess, schema::PrimitiveType_Crop, schema::PrimitiveType_PriorBox, schema::PrimitiveType_QuantDTypeCast, - schema::PrimitiveType_LayerNorm, - schema::PrimitiveType_L2Norm}; + schema::PrimitiveType_LayerNormFusion, + schema::PrimitiveType_L2NormalizeFusion}; static const std::vector<schema::PrimitiveType> needInsertOpList = { - schema::PrimitiveType_Eltwise, schema::PrimitiveType_Activation, schema::PrimitiveType_Concat, - schema::PrimitiveType_Power, schema::PrimitiveType_StridedSlice, schema::PrimitiveType_Add, - schema::PrimitiveType_Split, schema::PrimitiveType_Slice, schema::PrimitiveType_Crop, - schema::PrimitiveType_Mul, schema::PrimitiveType_Maximum, schema::PrimitiveType_ActivationGrad}; + schema::PrimitiveType_Eltwise, schema::PrimitiveType_Activation, schema::PrimitiveType_Concat, + schema::PrimitiveType_PowFusion, schema::PrimitiveType_StridedSlice, schema::PrimitiveType_AddFusion, + schema::PrimitiveType_Split, schema::PrimitiveType_SliceFusion, schema::PrimitiveType_Crop, + schema::PrimitiveType_MulFusion, schema::PrimitiveType_Maximum, schema::PrimitiveType_ActivationGrad}; static const std::unordered_map<int, int> nc2NhAxisMap = {{0, 0}, {1, -1}, {2, 1}, {3, 2}}; @@ -157,6 +158,13 @@ std::vector<schema::PrimitiveType> GetUint8NhwcOpList() { return int8NeedNhwcOpL std::vector<schema::PrimitiveType> GetInt8OpList() { return int8OpList; } +const schema::Primitive *ConvertToPrimitive(schema::PrimitiveT *primitive_t, flatbuffers::FlatBufferBuilder *fbb) { + auto prim_offset = schema::CreatePrimitive(*fbb, primitive_t); + fbb->Finish(prim_offset); + auto prim_buf = fbb->GetBufferPointer(); + return flatbuffers::GetRoot<schema::Primitive>(prim_buf); +} + STATUS NodeUtils::ConvertDims(mindspore::schema::Format src_format, const std::vector<int32_t> &src_dims, mindspore::schema::Format dst_format, std::vector<int32_t> *dst_dims) { MS_ASSERT(nullptr != dst_dims); diff --git a/mindspore/lite/tools/common/node_util.h b/mindspore/lite/tools/common/node_util.h index fb50012e88..c630a8e305 100644 --- a/mindspore/lite/tools/common/node_util.h +++ b/mindspore/lite/tools/common/node_util.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -70,6 +70,8 @@ std::vector<schema::PrimitiveType> GetUint8NhwcOpList(); std::vector<schema::PrimitiveType> GetInt8OpList(); +const schema::Primitive *ConvertToPrimitive(schema::PrimitiveT *primitive_t, flatbuffers::FlatBufferBuilder *fbb); + class NodeUtils { public: static STATUS ConvertDims(schema::Format src_format, const std::vector<int32_t> &src_dims, schema::Format dst_format, diff --git a/mindspore/lite/tools/common/option.h b/mindspore/lite/tools/common/option.h index c57063f6bc..4709a6627a 100644 --- a/mindspore/lite/tools/common/option.h +++ b/mindspore/lite/tools/common/option.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/common/protobuf_utils.cc b/mindspore/lite/tools/common/protobuf_utils.cc index 764ecfdf71..3bfd73b3a7 100644 --- a/mindspore/lite/tools/common/protobuf_utils.cc +++ b/mindspore/lite/tools/common/protobuf_utils.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/common/protobuf_utils.h b/mindspore/lite/tools/common/protobuf_utils.h index e9419f9059..c188ecdd96 100644 --- a/mindspore/lite/tools/common/protobuf_utils.h +++ b/mindspore/lite/tools/common/protobuf_utils.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/common/storage.cc b/mindspore/lite/tools/common/storage.cc index 329596d500..9672da74fc 100644 --- a/mindspore/lite/tools/common/storage.cc +++ b/mindspore/lite/tools/common/storage.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/common/storage.h b/mindspore/lite/tools/common/storage.h index 7f1906b0f6..26ae87d981 100644 --- a/mindspore/lite/tools/common/storage.h +++ b/mindspore/lite/tools/common/storage.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/common/tensor_util.cc b/mindspore/lite/tools/common/tensor_util.cc index b7443ad034..c15b3148dd 100644 --- a/mindspore/lite/tools/common/tensor_util.cc +++ b/mindspore/lite/tools/common/tensor_util.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/common/tensor_util.h b/mindspore/lite/tools/common/tensor_util.h index 71c199b9a4..0232e45c23 100644 --- a/mindspore/lite/tools/common/tensor_util.h +++ b/mindspore/lite/tools/common/tensor_util.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -37,7 +37,6 @@ using schema::QuantParamT; using schema::TensorT; using schema::Format::Format_NCHW; using schema::Format::Format_NHWC; -using STATUS = int; std::unique_ptr<QuantParamT> GetTensorQuantParam(const std::unique_ptr<TensorT> &tensor); diff --git a/mindspore/lite/tools/converter/CMakeLists.txt b/mindspore/lite/tools/converter/CMakeLists.txt index eb7e2f6cae..b94e1aa8d4 100644 --- a/mindspore/lite/tools/converter/CMakeLists.txt +++ b/mindspore/lite/tools/converter/CMakeLists.txt @@ -53,7 +53,7 @@ file(GLOB_RECURSE CONVERTER_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ../optimizer/graph/weight_format_hardcode_pass.cc ../optimizer/graph/clip_convert_activation_pass.cc ../optimizer/graph/group_depthwise_op_convert_pass.cc - ../optimizer/graph/tflite_inputs_order_exchange_pass.cc + ../optimizer/graph/tflite_inputs_adjust_pass.cc ../optimizer/graph/update_conv2d_param_pass.cc ../optimizer/graph/unused_cast_node_remove_pass.cc ../optimizer/graph/unused_transpose_node_remove_pass.cc @@ -64,13 +64,12 @@ file(GLOB_RECURSE CONVERTER_SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} ../optimizer/graph/onnx_inputs_adjust_pass.cc ../optimizer/graph/while_pass.cc ../optimizer/graph/if_pass.cc - ../optimizer/graph/mindir_inputs_adjust_pass.cc ../optimizer/graph/functionalize_control_op_pass.cc ../optimizer/graph/functionalize_while.cc ../optimizer/graph/inputs_adjust_pass.cc + ../optimizer/graph/primitive_adjust_pass.cc ) -add_subdirectory(../anf_importer anf_importer) add_subdirectory(../anf_exporter anf_exporter) add_subdirectory(parser/caffe) add_subdirectory(parser/tflite) @@ -84,9 +83,12 @@ set(SRC_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../src) set(LITE_SRC ${SRC_DIR}/common/graph_util.cc ${SRC_DIR}/common/string_util.cc + ${SRC_DIR}/common/prim_util.cc + ${SRC_DIR}/common/tensor_util.cc ${SRC_DIR}/runtime/allocator.cc ${SRC_DIR}/runtime/runtime_api.cc ${SRC_DIR}/runtime/thread_pool.c + ${SRC_DIR}/runtime/infer_manager.cc ${SRC_DIR}/inner_context.cc ${SRC_DIR}/tensor.cc ${SRC_DIR}/tensorlist.cc @@ -100,6 +102,9 @@ set(LITE_SRC ${SRC_DIR}/errorcode.cc ${SRC_DIR}/dequant.cc ${SRC_DIR}/huffman_decode.cc + ${SRC_DIR}/ops/ops_utils.cc + ${SRC_DIR}/ops/ops_def.cc + ${SRC_DIR}/train/train_populate_parameter.cc ) set(ENABLE_MINDRT "off") @@ -113,8 +118,10 @@ file(GLOB KERNEL_SRC ${ARM_DIR}/base/*.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../nnacl/*.c ${CMAKE_CURRENT_SOURCE_DIR}/../../nnacl/fp32/*.c + ${CMAKE_CURRENT_SOURCE_DIR}/../../nnacl/infer/*.c ${CMAKE_CURRENT_SOURCE_DIR}/../../nnacl/int8/*.c ${CMAKE_CURRENT_SOURCE_DIR}/../../nnacl/base/*.c + ${CMAKE_CURRENT_SOURCE_DIR}/../../nnacl/quantization/*.c ${ARM_DIR}/fp32/*.cc ${ARM_DIR}/int8/*.cc ) @@ -166,7 +173,6 @@ target_link_libraries(converter_lite PRIVATE tf_parser_mid caffe_parser_mid onnx_parser_mid - anf_importer_mid anf_exporter_mid graph_pass_mid fusion_mid @@ -175,7 +181,7 @@ target_link_libraries(converter_lite PRIVATE ${SECUREC_LIBRARY} mindspore::json mindspore::eigen - mindspore_core + -Wl,--whole-archive mindspore_core -Wl,--no-whole-archive mindspore::glog mindspore::protobuf mindspore::flatbuffers diff --git a/mindspore/lite/tools/converter/anf_transform.cc b/mindspore/lite/tools/converter/anf_transform.cc index 57f0cfccb3..d373173958 100644 --- a/mindspore/lite/tools/converter/anf_transform.cc +++ b/mindspore/lite/tools/converter/anf_transform.cc @@ -32,14 +32,14 @@ #include "tools/optimizer/fusion/tflite_lstm_cell_fusion.h" #include "tools/optimizer/fusion/tf_lstm_cell_fusion.h" #include "tools/optimizer/fusion/bidirection_tf_gru_cell_fusion.h" +#include "tools/optimizer/graph/primitive_adjust_pass.h" #include "tools/optimizer/graph/mindir_adjust_pass.h" -#include "tools/optimizer/graph/mindir_inputs_adjust_pass.h" #include "tools/optimizer/graph/redundant_op_remove_pass.h" #include "tools/optimizer/graph/weight_format_hardcode_pass.h" #include "tools/optimizer/graph/weight_format_transform_pass.h" #include "tools/optimizer/graph/clip_convert_activation_pass.h" #include "tools/optimizer/graph/group_depthwise_op_convert_pass.h" -#include "tools/optimizer/graph/tflite_inputs_order_exchange_pass.h" +#include "tools/optimizer/graph/tflite_inputs_adjust_pass.h" #include "tools/optimizer/graph/onnx_inputs_adjust_pass.h" #include "tools/optimizer/graph/update_conv2d_param_pass.h" #include "tools/optimizer/graph/unused_cast_node_remove_pass.h" @@ -124,7 +124,6 @@ int AnfTransform::AddGraphPass(const std::shared_ptr<opt::GraphOptimizer> &optim auto slice_prepose_pass = std::make_shared<opt::SlicePreposePass>(); slice_prepose_pass->SetFmkType(config->fmk); graph_pm->AddPass(slice_prepose_pass); - graph_pm->AddPass(std::make_shared<opt::InputAdjustPass>()); optimizer->AddPassManager(graph_pm); return RET_OK; } @@ -135,7 +134,7 @@ int AnfTransform::AddConvertPass(const std::shared_ptr<opt::GraphOptimizer> &opt convert_pm->AddPass(std::make_shared<opt::ClipConvertActivationPass>()); if (config->fmk == lite::converter::FmkType_TFLITE) { convert_pm->AddPass(std::make_shared<opt::GroupDepthwiseOpConvertPass>()); - convert_pm->AddPass(std::make_shared<opt::TfliteInputsOrderExchangePass>()); + convert_pm->AddPass(std::make_shared<opt::TfliteInputsAdjustPass>()); } optimizer->AddPassManager(convert_pm); return RET_OK; @@ -153,6 +152,10 @@ int AnfTransform::AddConstFoldPass(const std::shared_ptr<opt::GraphOptimizer> &o auto update_conv2d_param_pass = std::make_shared<opt::UpdateConv2DParamPass>(); update_conv2d_param_pass->SetFmkType(config->fmk); const_fold_pm->AddPass(update_conv2d_param_pass); + auto weight_format_hardcode_pass = std::make_shared<opt::WeightFormatHardCodePass>(); + weight_format_hardcode_pass->SetFmkType(config->fmk); + weight_format_hardcode_pass->SetQuantType(config->quantType); + const_fold_pm->AddPass(weight_format_hardcode_pass); auto infershape_pass = std::make_shared<opt::InferShapePass>(); infershape_pass->SetFmkType(config->fmk); const_fold_pm->AddPass(infershape_pass); @@ -161,9 +164,17 @@ int AnfTransform::AddConstFoldPass(const std::shared_ptr<opt::GraphOptimizer> &o } int AnfTransform::RunAdjustPass(const FuncGraphPtr &old_graph, const converter::Flags *config) { + if (config->fmk == converter::FmkType_MS) { + if (RunMindirAdjustPass(old_graph, config) != RET_OK) { + return RET_ERROR; + } + } + auto adjust_input = std::make_shared<opt::InputAdjustPass>(); + if (!adjust_input->Run(old_graph)) { + MS_LOG(ERROR) << "adjust input failed."; + return RET_ERROR; + } switch (config->fmk) { - case converter::FmkType_MS: - return RunMindirAdjustPass(old_graph, config); case converter::FmkType_ONNX: return RunOnnxAdjustPass(old_graph, config); case converter::FmkType_TF: @@ -174,6 +185,13 @@ int AnfTransform::RunAdjustPass(const FuncGraphPtr &old_graph, const converter:: } int AnfTransform::RunMindirAdjustPass(const FuncGraphPtr &old_graph, const converter::Flags *config) { + auto primitive_adjust_pass = std::make_shared<opt::PrimitiveAdjustPass>(); + primitive_adjust_pass->SetFmkType(config->fmk); + if (!primitive_adjust_pass->Run(old_graph)) { + MS_LOG(ERROR) << "primitive adjust failed."; + ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_ERROR); + return RET_ERROR; + } auto mindir_adjust_pass = std::make_shared<opt::MindirAdjustPass>(); mindir_adjust_pass->SetFmkType(config->fmk); mindir_adjust_pass->SetQuantType(config->quantType); @@ -183,12 +201,6 @@ int AnfTransform::RunMindirAdjustPass(const FuncGraphPtr &old_graph, const conve ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_ERROR); return RET_ERROR; } - auto mindir_inputs_adjust_pass = std::make_shared<opt::MindirInputAdjustOpPass>(); - if (!mindir_inputs_adjust_pass->Run(old_graph)) { - MS_LOG(ERROR) << "mindir inputs adjust failed."; - ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_ERROR); - return RET_ERROR; - } return RET_OK; } @@ -217,23 +229,23 @@ int AnfTransform::DoQuantize(const FuncGraphPtr &old_graph, const converter::Fla const FuncGraphPtr &new_graph) { // quant if (config->quantType == schema::QuantType_PostTraining) { - this->mQuantizer = std::make_unique<quant::PostTrainingQuantizer>(new_graph, config->configFile, config->bitNum); - if (mQuantizer == nullptr) { + this->m_quantizer_ = std::make_unique<quant::PostTrainingQuantizer>(new_graph, config->configFile, config->bitNum); + if (m_quantizer_ == nullptr) { MS_LOG(ERROR) << "New PostTrainingQuantizer failed"; ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_MEMORY_FAILED); return RET_ERROR; } } else if (config->quantType == schema::QuantType_WeightQuant) { - this->mQuantizer = std::make_unique<quant::WeightQuantizer>(new_graph, *config); - if (mQuantizer == nullptr) { + this->m_quantizer_ = std::make_unique<quant::WeightQuantizer>(new_graph, *config); + if (m_quantizer_ == nullptr) { MS_LOG(ERROR) << "New WeightQuantizer failed"; ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_MEMORY_FAILED); return RET_ERROR; } } - if (mQuantizer != nullptr) { - mQuantizer->flags = *config; - auto status = mQuantizer->DoQuantize(new_graph); + if (m_quantizer_ != nullptr) { + m_quantizer_->flags = *config; + auto status = m_quantizer_->DoQuantize(new_graph); if (status != RET_OK) { MS_LOG(ERROR) << "Quant failed " << status; ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status); diff --git a/mindspore/lite/tools/converter/anf_transform.h b/mindspore/lite/tools/converter/anf_transform.h index bc3af711c1..7e64bc81c3 100644 --- a/mindspore/lite/tools/converter/anf_transform.h +++ b/mindspore/lite/tools/converter/anf_transform.h @@ -36,25 +36,27 @@ class AnfTransform { FuncGraphPtr Transform(const FuncGraphPtr &old_graph, const converter::Flags *config = nullptr); private: + std::unique_ptr<quant::Quantizer> m_quantizer_ = nullptr; + STATUS GetAllFuncGraph(const FuncGraphPtr &main_graph, FuncGraphVector *subgraphs, std::vector<ValueNodePtr> *vnodes); + FuncGraphPtr TransformSingleFuncGraph(const FuncGraphPtr &old_graph, const converter::Flags *config = nullptr); - std::unique_ptr<quant::Quantizer> mQuantizer = nullptr; - int AddFusionPass(const std::shared_ptr<opt::GraphOptimizer> &optimizer, const converter::Flags *config); + static int AddFusionPass(const std::shared_ptr<opt::GraphOptimizer> &optimizer, const converter::Flags *config); - int AddGraphPass(const std::shared_ptr<opt::GraphOptimizer> &optimizer, const converter::Flags *config); + static int AddGraphPass(const std::shared_ptr<opt::GraphOptimizer> &optimizer, const converter::Flags *config); - int AddConvertPass(const std::shared_ptr<opt::GraphOptimizer> &optimizer, const converter::Flags *config); + static int AddConvertPass(const std::shared_ptr<opt::GraphOptimizer> &optimizer, const converter::Flags *config); - int AddConstFoldPass(const std::shared_ptr<opt::GraphOptimizer> &optimizer, const converter::Flags *config); + static int AddConstFoldPass(const std::shared_ptr<opt::GraphOptimizer> &optimizer, const converter::Flags *config); - int RunAdjustPass(const FuncGraphPtr &old_graph, const converter::Flags *config); + static int RunAdjustPass(const FuncGraphPtr &old_graph, const converter::Flags *config); - int RunMindirAdjustPass(const FuncGraphPtr &old_graph, const converter::Flags *config); + static int RunMindirAdjustPass(const FuncGraphPtr &old_graph, const converter::Flags *config); - int RunOnnxAdjustPass(const FuncGraphPtr &old_graph, const converter::Flags *config); + static int RunOnnxAdjustPass(const FuncGraphPtr &old_graph, const converter::Flags *config); - int RunTFAdjustPass(const FuncGraphPtr &old_graph, const converter::Flags *config); + static int RunTFAdjustPass(const FuncGraphPtr &old_graph, const converter::Flags *config); int DoQuantize(const FuncGraphPtr &old_graph, const converter::Flags *config, const FuncGraphPtr &new_graph); }; diff --git a/mindspore/lite/tools/converter/converter.cc b/mindspore/lite/tools/converter/converter.cc index 0c87ec13ba..1c9cdc801a 100644 --- a/mindspore/lite/tools/converter/converter.cc +++ b/mindspore/lite/tools/converter/converter.cc @@ -17,12 +17,7 @@ #include "tools/converter/converter.h" #include <memory> #include <vector> -#include <utility> #include "tools/converter/converter_flags.h" -#include "src/common/common.h" -#include "src/common/file_utils.h" -#include "ir/func_graph.h" - #include "src/common/log_adapter.h" #include "tools/common/storage.h" #include "parser/caffe/caffe_converter.h" @@ -30,155 +25,128 @@ #include "parser/onnx/onnx_converter.h" #include "parser/tf/tf_converter.h" #include "tools/anf_exporter/anf_exporter.h" -#include "tools/anf_importer/import_from_mindir.h" -#include "proto/onnx.pb.h" #include "include/version.h" +#include "src/train/train_populate_parameter.h" namespace mindspore { namespace lite { using FmkType = converter::FmkType; -static const char *DELIM_SLASH = "/"; -Converter::Converter() { - this->transform = new GraphDefTransform; - this->anfTransform = new AnfTransform; -} - -Converter::~Converter() { - delete modelParser; - delete modelImporter; - delete transform; - delete anfTransform; -} - -class MindsporeImporter : public Converter { - public: - MindsporeImporter() { modelImporter = new AnfImporterFromMindir(); } - ~MindsporeImporter() override = default; -}; - -MetaGraphT *Converter::Convert(const converter::Flags *flag) { - // parse the model and weight file to generate inference data structure - FuncGraphPtr graph = nullptr; - if (flag->fmk == converter::FmkType_MS) { - MS_ASSERT(nullptr != modelImporter); - int status = modelImporter->Import(flag); - ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status); - graph = modelImporter->GetResult(); - if (graph == nullptr) { +MindsporeImporter::MindsporeImporter() { kernel::PopulateTrainParameters(); } + +std::unique_ptr<Converter> Converter::CreateConverter(converter::FmkType fmk) { + switch (fmk) { + case FmkType::FmkType_MS: + return std::make_unique<MindsporeImporter>(); + case FmkType::FmkType_CAFFE: + return std::make_unique<CaffeConverter>(); + case FmkType::FmkType_TFLITE: + return std::make_unique<TfliteConverter>(); + case FmkType::FmkType_ONNX: + return std::make_unique<OnnxConverter>(); + case FmkType::FmkType_TF: + return std::make_unique<TFConverter>(); + default: { return nullptr; } - graph->set_attr("graph_name", MakeValue("main_graph")); - graph->set_attr("fmk", MakeValue(static_cast<int>(converter::FmkType_MS))); - } else { - MS_ASSERT(nullptr != modelParser); - const std::string modelFile = flag->modelFile; - const std::string weightFile = flag->weightFile; - graph = modelParser->Parse(modelFile, weightFile, flag->quantType); } +} + +MetaGraphT *Converter::Convert(const std::unique_ptr<converter::Flags> &flag) { + if (flag == nullptr) { + MS_LOG(ERROR) << "Input flag is nullptr"; + return nullptr; + } + auto graph = BuildFuncGraph(flag->modelFile, flag->weightFile, flag->quantType); if (graph == nullptr) { MS_LOG(ERROR) << "Parser/Import model return nullptr"; return nullptr; } - - graph = anfTransform->Transform(graph, flag); + // funcgraph compile + graph = funcgraph_transform_->Transform(graph, flag.get()); if (graph == nullptr) { MS_LOG(ERROR) << "Transform anf graph return nullptr"; return nullptr; } + MS_LOG(INFO) << "Run anfTransform success"; - // anf -- fb + // protobuf -> flatbuf auto meta_graph = Export(graph, false, false, flag->trainModel); if (meta_graph == nullptr) { MS_LOG(ERROR) << "Export to meta graph return nullptr"; return nullptr; } + MS_LOG(INFO) << "export success"; - // transform - transform->SetGraphDef(meta_graph); - auto status = transform->Transform(*flag); + // metagraph compile + metagraph_transform_->SetGraphDef(meta_graph); + auto status = metagraph_transform_->Transform(*flag); if (status != RET_OK) { MS_LOG(ERROR) << "Transform meta graph failed " << status; ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status); return nullptr; } - return meta_graph; } int RunConverter(int argc, const char **argv) { + std::ostringstream oss; std::unique_ptr<converter::Flags> flags(new (std::nothrow) converter::Flags); if (flags == nullptr) { - MS_LOG(ERROR) << "NEW FLAGS ERROR:" << RET_MEMORY_FAILED << " " << GetErrorInfo(RET_MEMORY_FAILED); - std::cout << "NEW FLAGS ERROR:" << RET_MEMORY_FAILED << " " << GetErrorInfo(RET_MEMORY_FAILED) << std::endl; + oss.clear(); + oss << "NEW FLAGS ERROR:" << RET_MEMORY_FAILED << " " << GetErrorInfo(RET_MEMORY_FAILED); + MS_LOG(ERROR) << oss.str(); + std::cout << oss.str() << std::endl; return RET_MEMORY_FAILED; } auto status = flags->Init(argc, argv); if (status != RET_OK) { if (status != RET_SUCCESS_EXIT) { - MS_LOG(ERROR) << "CONVERTER::FLAGS INIT FAILED:" << status << " " << GetErrorInfo(status) << std::endl; - std::cout << "CONVERTER::FLAGS INIT FAILED:" << status << " " << GetErrorInfo(status) << std::endl; + oss.clear(); + oss << "CONVERTER::FLAGS INIT FAILED:" << status << " " << GetErrorInfo(status); + MS_LOG(ERROR) << oss.str(); + std::cout << oss.str() << std::endl; } - std::cout << GetErrorInfo(status) << std::endl; return status; } // Load graph - std::string modelName = flags->modelFile.substr(flags->modelFile.find_last_of(DELIM_SLASH) + 1); - MS_LOG(INFO) << "start reading model file"; - - MetaGraphT *fb_graph = nullptr; - switch (flags->fmk) { - case FmkType::FmkType_MS: { - MindsporeImporter mindsporeImporter; - fb_graph = mindsporeImporter.Convert(flags.get()); - break; - } - case FmkType::FmkType_CAFFE: { - CaffeConverter caffeConverter; - fb_graph = caffeConverter.Convert(flags.get()); - } break; - case FmkType::FmkType_TFLITE: { - TfliteConverter tfLiteConverter; - fb_graph = tfLiteConverter.Convert(flags.get()); - } break; - case FmkType::FmkType_ONNX: { - OnnxConverter onnxConverter; - fb_graph = onnxConverter.Convert(flags.get()); - } break; - case FmkType::FmkType_TF: { - TFConverter tfConverter; - fb_graph = tfConverter.Convert(flags.get()); - } break; - default: { - MS_LOG(ERROR) << "UNSUPPORTED FMKTYPE " << flags->fmk << ":" << RET_INPUT_PARAM_INVALID << " " - << GetErrorInfo(RET_INPUT_PARAM_INVALID); - std::cout << "UNSUPPORTED FMKTYPE " << flags->fmk << ":" << RET_INPUT_PARAM_INVALID << " " - << GetErrorInfo(RET_INPUT_PARAM_INVALID) << std::endl; - return RET_INPUT_PARAM_INVALID; - } + MS_LOG(DEBUG) << "start reading model file"; + auto converter = Converter::CreateConverter(flags->fmk); + if (converter == nullptr) { + oss.clear(); + oss << "UNSUPPORTED FMKTYPE " << flags->fmk << ":" << RET_INPUT_PARAM_INVALID << " " + << GetErrorInfo(RET_INPUT_PARAM_INVALID); + MS_LOG(ERROR) << oss.str(); + std::cout << oss.str() << std::endl; + return RET_INPUT_PARAM_INVALID; } + auto meta_graph = converter->Convert(flags); NoSupportOp::GetInstance()->PrintOps(); status = ReturnCode::GetSingleReturnCode()->GetReturnCode(); - if (fb_graph == nullptr) { - MS_LOG(ERROR) << "CONVERT RESULT FAILED:" << status << " " << GetErrorInfo(status); - std::cout << "CONVERT RESULT FAILED:" << status << " " << GetErrorInfo(status) << std::endl; + if (meta_graph == nullptr) { + oss.clear(); + oss << "CONVERT RESULT FAILED:" << status << " " << GetErrorInfo(status); + MS_LOG(ERROR) << oss.str(); + std::cout << oss.str() << std::endl; return status; } // save graph to file - Storage storage; - fb_graph->version = Version(); - status = storage.Save(*fb_graph, flags->outputFile); + meta_graph->version = Version(); + status = Storage::Save(*meta_graph, flags->outputFile); if (status != RET_OK) { - MS_LOG(ERROR) << "SAVE GRAPH FAILED:" << status << " " << GetErrorInfo(status); - std::cout << "SAVE GRAPH FAILED:" << status << " " << GetErrorInfo(status) << std::endl; + oss.clear(); + oss << "SAVE GRAPH FAILED:" << status << " " << GetErrorInfo(status); + MS_LOG(ERROR) << oss.str(); + std::cout << oss.str() << std::endl; return status; } - delete fb_graph; - MS_LOG(INFO) << "CONVERT RESULT SUCCESS:" << status; - std::cout << "CONVERT RESULT SUCCESS:" << status << std::endl; - + delete meta_graph; + oss.clear(); + oss << "CONVERT RESULT SUCCESS:" << status; + MS_LOG(INFO) << oss.str(); + std::cout << oss.str() << std::endl; return status; } } // namespace lite diff --git a/mindspore/lite/tools/converter/converter.h b/mindspore/lite/tools/converter/converter.h index 472bd67a5e..b8fc703cb8 100644 --- a/mindspore/lite/tools/converter/converter.h +++ b/mindspore/lite/tools/converter/converter.h @@ -22,24 +22,48 @@ #include "schema/inner/model_generated.h" #include "tools/converter/graphdef_transform.h" #include "tools/converter/model_parser.h" -#include "tools/anf_importer/anf_importer.h" #include "tools/converter/converter_flags.h" #include "tools/converter/anf_transform.h" #include "tools/converter/converter_context.h" +#include "load_mindir/load_model.h" namespace mindspore { namespace lite { class Converter { public: - Converter(); - virtual ~Converter(); - virtual schema::MetaGraphT *Convert(const lite::converter::Flags *flags); + static std::unique_ptr<Converter> CreateConverter(converter::FmkType fmk); + + virtual ~Converter() = default; + + virtual schema::MetaGraphT *Convert(const std::unique_ptr<converter::Flags> &flag); + + virtual FuncGraphPtr BuildFuncGraph(const std::string &model_file, const std::string &weight_file, + schema::QuantType quant_type) = 0; protected: - ModelParser *modelParser = nullptr; - AnfImporter *modelImporter = nullptr; - GraphDefTransform *transform = nullptr; - AnfTransform *anfTransform = nullptr; + Converter() = default; + + std::unique_ptr<GraphDefTransform> metagraph_transform_ = std::make_unique<GraphDefTransform>(); + std::unique_ptr<AnfTransform> funcgraph_transform_ = std::make_unique<AnfTransform>(); +}; + +class MindsporeImporter : public Converter { + public: + MindsporeImporter(); + + ~MindsporeImporter() override = default; + + FuncGraphPtr BuildFuncGraph(const std::string &model_file, const std::string &weight_file, + schema::QuantType quant_type) override { + auto func_graph = LoadMindIR(model_file); + if (func_graph == nullptr) { + MS_LOG(ERROR) << "get funcgraph failed."; + return nullptr; + } + func_graph->set_attr("graph_name", MakeValue("main_graph")); + func_graph->set_attr("fmk", MakeValue(static_cast<int>(converter::FmkType_MS))); + return func_graph; + } }; int RunConverter(int argc, const char **argv); diff --git a/mindspore/lite/tools/converter/graphdef_transform.cc b/mindspore/lite/tools/converter/graphdef_transform.cc index 472c867703..3ede4f979d 100644 --- a/mindspore/lite/tools/converter/graphdef_transform.cc +++ b/mindspore/lite/tools/converter/graphdef_transform.cc @@ -31,7 +31,6 @@ #include "tools/converter/legacy_optimizer/graph/trans_format_insert_pass.h" #include "tools/converter/legacy_optimizer/graph/global_format_transform_pass.h" #include "tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.h" -#include "tools/converter/legacy_optimizer/graph/unused_node_remove_pass.h" #include "tools/converter/legacy_optimizer/graph/dropout_node_remove_pass.h" #include "tools/converter/legacy_optimizer/graph/topological_sort_pass.h" #include "tools/converter/legacy_optimizer/graph/tensor_quant_pass.h" @@ -39,7 +38,6 @@ #include "tools/converter/legacy_optimizer/graph/infer_quant_param_pass.h" #include "tools/converter/legacy_optimizer/graph/set_unused_quant_param_to_default_pass.h" #include "tools/converter/legacy_optimizer/graph/switch_pass.h" -#include "tools/converter/legacy_optimizer/graph/select_pass.h" #include "tools/converter/legacy_optimizer/graph/subgraph_node_pass.h" #include "tools/converter/legacy_optimizer/graph/subgraph_tensor_pass.h" #include "tools/converter/legacy_optimizer/graph/nested_loop_expand_pass.h" @@ -66,7 +64,6 @@ int GraphDefTransform::Transform(const converter::Flags &ctx) { { auto old_nodes = GetGraphNodes(); Optimizer unusedOpRemoveOptimizer; - unusedOpRemoveOptimizer.AddPass(new UnusedNodeRemovePass()); if (!ctx.trainModel) { unusedOpRemoveOptimizer.AddPass(new DropoutNodeRemovePass()); } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc index bd114c7726..c2026ff8cb 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -99,15 +99,14 @@ STATUS FormatTransFusionPass::DoFusion(schema::MetaGraphT *graph, const std::str MS_LOG(ERROR) << "srcPath or dstPath is failed to get"; return RET_ERROR; } - auto srcNode = graph->nodes.at(srcPath->nodeIdx).get(); - auto dstNode = graph->nodes.at(dstPath->nodeIdx).get(); + auto &srcNode = graph->nodes.at(srcPath->nodeIdx); + auto &dstNode = graph->nodes.at(dstPath->nodeIdx); MS_ASSERT(srcNode != nullptr); MS_ASSERT(dstNode != nullptr); - MS_ASSERT(srcNode->primitive->value.AsTranspose() != nullptr); - bool isNc2NhAndNh2Nc = srcNode->primitive->value.AsTranspose()->perm == nchw2nhwc_perm && - dstNode->primitive->value.AsTranspose()->perm == nhwc2nchw_perm; - bool isNh2NcAndNc2Nh = srcNode->primitive->value.AsTranspose()->perm == nhwc2nchw_perm && - dstNode->primitive->value.AsTranspose()->perm == nchw2nhwc_perm; + auto src_perm = GetTransposePerm(graph, srcNode); + auto dst_perm = GetTransposePerm(graph, dstNode); + bool isNc2NhAndNh2Nc = src_perm == nchw2nhwc_perm && dst_perm == nhwc2nchw_perm; + bool isNh2NcAndNc2Nh = src_perm == nhwc2nchw_perm && dst_perm == nchw2nhwc_perm; if (isNc2NhAndNh2Nc || isNh2NcAndNc2Nh) { auto status = IsolateOneWayNode(graph, srcPath->nodeIdx); if (status != RET_OK) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.h index 9ca9c9b2c7..4ac7b779ba 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/format_trans_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.cc index c5a0b6adfc..77c4b4a868 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,7 +29,6 @@ #include "tools/common/graph_util.h" #include "include/errorcode.h" #include "schema/inner/model_generated.h" -#include "src/ops/primitive_c.h" namespace mindspore { namespace lite { @@ -47,7 +46,7 @@ STATUS FusionPass::Run(schema::MetaGraphT *graph) { } if (!pattern->Check()) { - MS_LOG(ERROR) << "FusionPattern is invaild"; + MS_LOG(ERROR) << "FusionPattern is invalid"; return RET_PARAM_INVALID; } } @@ -229,23 +228,12 @@ bool FusionPass::CheckMatch(schema::MetaGraphT *graph, const std::shared_ptr<Pat bool FusionPass::MatchTree(schema::MetaGraphT *graph, size_t nodeIdx, const std::shared_ptr<PatternOp> &target, std::vector<size_t> &sinkIdes, std::vector<size_t> &pathSinkIdes) { MS_ASSERT(graph != nullptr); - MS_ASSERT(nodeIdx < subGraph->nodes.size()); - auto &scope = graph->nodes.at(nodeIdx); - MS_ASSERT(scope != nullptr); - // if target(except target is marked head) is nullptr, it means the preNode - // has no left or right, but scope is not nullptr - if (target == nullptr) { - return false; - } - // if node is sinked and not in the pathSinkId, then return false - if (IsContain(sinkIdes, nodeIdx) && !IsContain(pathSinkIdes, nodeIdx)) { + MS_ASSERT(nodeIdx < graph->nodes.size()); + // check the func params + if (!CheckMatchParams(graph, nodeIdx, target, sinkIdes, pathSinkIdes)) { return false; } - // type not match - if (!target->isPlaceHold && !IsContain(target->types, scope->primitive->value.type)) { - return false; - } - // path is setted and not pointer to this node + // path is set and not pointer to this node if (target->pathSetted) { MS_ASSERT(target->path != nullptr); if (target->path->nodeIdx != static_cast<int>(nodeIdx)) { @@ -267,27 +255,26 @@ bool FusionPass::MatchTree(schema::MetaGraphT *graph, size_t nodeIdx, const std: for (auto preNodeIdx : preNodeIdxes) { MS_ASSERT(graph->nodes.size() > preNodeIdx); // Case of multiple outputs is not supported. - if (GetInputNodeIdx(*graph, preNodeIdx).size() > kDoubleNum || - GetOutputNodeIdx(*graph, preNodeIdx).size() > kSingleNum) { + if (GetInputNodeIdx(*graph, preNodeIdx).size() > 2 || GetOutputNodeIdx(*graph, preNodeIdx).size() > 1) { sinkIdes.erase((sinkIdes.end() - 1)); pathSinkIdes.erase((pathSinkIdes.end() - 1)); target->UnSetPath(); return false; } - // match left - if (MatchTree(graph, preNodeIdx, target->left, sinkIdes, pathSinkIdes)) { - // match right - if (preNodeIdxes.size() == 1 && target->right == nullptr) { - return true; + if (!MatchTree(graph, preNodeIdx, target->left, sinkIdes, pathSinkIdes)) { + continue; + } + // match left then match right + if (preNodeIdxes.size() == 1 && target->right == nullptr) { + return true; + } + for (auto preNodeIdxInner : preNodeIdxes) { + if (preNodeIdxInner == preNodeIdx) { + continue; } - for (auto preNodeIdxInner : preNodeIdxes) { - if (preNodeIdxInner == preNodeIdx) { - continue; - } - MS_ASSERT(subGraph->nodes.size() > preNodeIdxInner); - if (MatchTree(graph, preNodeIdxInner, target->right, sinkIdes, pathSinkIdes)) { - return true; // ignore follow match, pick the first match - } + MS_ASSERT(subGraph->nodes.size() > preNodeIdxInner); + if (MatchTree(graph, preNodeIdxInner, target->right, sinkIdes, pathSinkIdes)) { + return true; // ignore follow match, pick the first match } } } @@ -297,6 +284,26 @@ bool FusionPass::MatchTree(schema::MetaGraphT *graph, size_t nodeIdx, const std: return false; } +bool FusionPass::CheckMatchParams(schema::MetaGraphT *graph, size_t nodeIdx, const std::shared_ptr<PatternOp> &target, + std::vector<size_t> &sinkIdes, std::vector<size_t> &pathSinkIdes) { + auto &scope = graph->nodes.at(nodeIdx); + MS_ASSERT(scope != nullptr); + // if target(except target is marked head) is nullptr, it means the preNode + // has no left or right, but scope is not nullptr + if (target == nullptr) { + return false; + } + // if node is sinked and not in the pathSinkId, then return false + if (IsContain(sinkIdes, nodeIdx) && !IsContain(pathSinkIdes, nodeIdx)) { + return false; + } + // type not match + if (!target->isPlaceHold && !IsContain(target->types, scope->primitive->value.type)) { + return false; + } + return true; +} + STATUS FusionPass::Fuse(schema::MetaGraphT *graph) { STATUS ret; bool isChange = false; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.h index 9e78588c30..582cb356ec 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -67,6 +67,8 @@ class FusionPass : public GraphPass { bool MatchTree(schema::MetaGraphT *graph, size_t nodeIdx, const std::shared_ptr<PatternOp> &target, std::vector<size_t> &sinkIdes, std::vector<size_t> &pathSinkIdes); + bool CheckMatchParams(schema::MetaGraphT *graph, size_t nodeIdx, const std::shared_ptr<PatternOp> &target, + std::vector<size_t> &sinkIdes, std::vector<size_t> &pathSinkIdes); static bool CheckMatch(schema::MetaGraphT *graph, const std::shared_ptr<PatternOp> &patternOp); void MergeNodeAttrFromPost(std::unique_ptr<schema::CNodeT> &dstOp, std::unique_ptr<schema::CNodeT> &postOp, diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pattern.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pattern.cc index 27acce8cd2..98a3ab46c4 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pattern.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pattern.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pattern.h b/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pattern.h index 444108f3f0..4875ce01fb 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pattern.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/fusion_pattern.h @@ -1,7 +1,5 @@ -#include <memory> - /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,6 +21,7 @@ #include <utility> #include <vector> #include <map> +#include <memory> #include "src/common/log_adapter.h" #include "schema/inner/model_generated.h" diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc index be744dc3d5..85bae4edf8 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -99,13 +99,13 @@ STATUS MatMulBiasAddFusionPass::DoFusion(MetaGraphT *graph, const std::string &p MS_LOG(ERROR) << "new FullConnectionT node failed"; return RET_ERROR; } - fcAttr->hasBias = true; + fcAttr->has_bias = true; fcAttr->axis = 1; MS_ASSERT(matMulNode->primitive != nullptr); MS_ASSERT(matMulNode->primitive->value != nullptr); MS_ASSERT(matMulNode->primitive->value.AsMatMul() != nullptr); - transA = matMulNode->primitive->value.AsMatMul()->transposeA; - transB = matMulNode->primitive->value.AsMatMul()->transposeB; + transA = matMulNode->primitive->value.AsMatMul()->transpose_a; + transB = matMulNode->primitive->value.AsMatMul()->transpose_b; matMulNode->primitive->value.type = schema::PrimitiveType_FullConnection; matMulNode->primitive->value.value = fcAttr.release(); @@ -142,6 +142,19 @@ STATUS MatMulBiasAddFusionPass::InsertTransposeNode(MetaGraphT *graph, const std auto matmulOpIter = graph->nodes.begin() + matMulPath->nodeIdx; STATUS errorCode = RET_OK; + auto perm_tensor = std::make_unique<schema::TensorT>(); + perm_tensor->dataType = kNumberTypeInt32; + perm_tensor->dims = {2}; + std::vector<int> perm{1, 0}; + size_t bytes = perm.size() * sizeof(int); + perm_tensor->data.resize(bytes); + perm_tensor->name = "perm_" + std::to_string(id++); + if (memcpy_s(perm_tensor->data.data(), bytes, perm.data(), bytes) != EOK) { + MS_LOG(ERROR) << "memcpy data failed."; + return RET_ERROR; + } + size_t index = graph->allTensors.size(); + graph->allTensors.push_back(std::move(perm_tensor)); for (auto needInsertIdx : insertNodeIdxList) { auto transNode = std::unique_ptr<CNodeT>(new (std::nothrow) CNodeT); if (transNode == nullptr) { @@ -150,20 +163,18 @@ STATUS MatMulBiasAddFusionPass::InsertTransposeNode(MetaGraphT *graph, const std } transNode->name = "transpose" + std::to_string(id++); transNode->primitive->value.type = schema::PrimitiveType_Transpose; - std::unique_ptr<TransposeT> transposeParam(new (std::nothrow) TransposeT()); - if (transposeParam == nullptr) { - MS_LOG(ERROR) << "new transposeParam failed"; - return RET_ERROR; - } - transposeParam->perm = {1, 0}; - transNode->primitive->value.value = transposeParam.release(); - matmulOpIter = - InsertNode(graph, matmulOpIter, kBefore, needInsertIdx, std::move(transNode), &errorCode, TransposeOpCopyer); + int insert_num = 0; + matmulOpIter = InsertNode(graph, matmulOpIter, kBefore, needInsertIdx, std::move(transNode), &errorCode, + &insert_num, TransposeOpCopyer); if (errorCode != RET_OK) { MS_LOG(ERROR) << "InsertNode failed: " << errorCode; return errorCode; } + for (int i = insert_num; i > 0; --i) { + (*(matmulOpIter - i))->inputIndex.push_back(index); + } } + graph->allTensors.at(index)->refCount = insertNodeIdxList.size(); return RET_OK; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.h index 671c2278b9..96af402092 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/matmul_biasadd_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -66,17 +66,6 @@ class MatMulBiasAddFusionPass : public FusionPass { return nullptr; } newOpDef->primitive->value.type = schema::PrimitiveType_Transpose; - auto transposeParam = new (std::nothrow) TransposeT; - if (transposeParam == nullptr) { - MS_LOG(ERROR) << "new transposeParam failed"; - return nullptr; - } - auto inParam = inOpDef->primitive->value.AsTranspose(); - MS_ASSERT(inParam != nullptr); - transposeParam->perm.resize(inParam->perm.size()); - std::transform(inParam->perm.begin(), inParam->perm.end(), transposeParam->perm.begin(), - [](const int32_t ele) { return ele; }); - newOpDef->primitive->value.value = transposeParam; return newOpDef; }; }; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc index 4451fe5281..131e7999cc 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -38,10 +38,10 @@ STATUS MulAddFusionPass::Run(MetaGraphT *graph) { return FusionPass::Run(graph); STATUS MulAddFusionPass::DefinePattern() { auto mulOp = std::make_shared<PatternOp>(); mulOp->id = MUL_NAME; - mulOp->types = {schema::PrimitiveType_Mul}; + mulOp->types = {schema::PrimitiveType_MulFusion}; auto baOp = std::make_shared<PatternOp>(); baOp->id = ADD_NAME; - baOp->types = {schema::PrimitiveType_Add}; + baOp->types = {schema::PrimitiveType_AddFusion}; baOp->left = mulOp; std::unique_ptr<FusionPattern> fusionPattern(new (std::nothrow) FusionPattern("MulAddFusion")); @@ -136,8 +136,8 @@ STATUS MulAddFusionPass::AddNewScaleNode(MetaGraphT *graph, const std::unique_pt MS_ASSERT(mulNode != nullptr); MS_ASSERT(addNode != nullptr); // replace mulNode as scale - mulNode->primitive->value.type = schema::PrimitiveType_Scale; - std::unique_ptr<ScaleT> scaleParam(new (std::nothrow) ScaleT()); + mulNode->primitive->value.type = schema::PrimitiveType_ScaleFusion; + std::unique_ptr<ScaleFusionT> scaleParam(new (std::nothrow) ScaleFusionT()); if (scaleParam == nullptr) { MS_LOG(ERROR) << "new transposeParam failed"; return RET_ERROR; @@ -147,23 +147,23 @@ STATUS MulAddFusionPass::AddNewScaleNode(MetaGraphT *graph, const std::unique_pt scaleParam->axis = 0 - shape_size; mulNode->inputIndex.push_back(addBiasIndex); MS_ASSERT(addNode->primitive != nullptr); - MS_ASSERT(addNode->primitive->value.AsAdd() != nullptr); - auto activationType = addNode->primitive->value.AsAdd()->activationType; + MS_ASSERT(addNode->primitive->value.AsAddFusion() != nullptr); + auto activationType = addNode->primitive->value.AsAddFusion()->activation_type; if (activationType == ActivationType_RELU || activationType == ActivationType_RELU6 || activationType == ActivationType_NO_ACTIVATION) { // delete addnode - scaleParam->activationType = activationType; + scaleParam->activation_type = activationType; auto status = IsolateOneWayNode(graph, addNode); if (status != RET_OK) { MS_LOG(ERROR) << "IsolateOneWayNode failed"; return status; } } else { - // repace addnode as activation + // replace addnode as activation std::unique_ptr<ActivationT> activationParam(new ActivationT()); MS_ASSERT(addNode->primitive != nullptr); - MS_ASSERT(addNode->primitive->value.AsAdd() != nullptr); - activationParam->type = addNode->primitive->value.AsAdd()->activationType; + MS_ASSERT(addNode->primitive->value.AsAddFusion() != nullptr); + activationParam->activation_type = addNode->primitive->value.AsAddFusion()->activation_type; addNode->primitive->value.type = schema::PrimitiveType_Activation; addNode->primitive->value.value = activationParam.release(); addNode->inputIndex.pop_back(); diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h index d988dc70f8..e6eafe3566 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/mul_add_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.cc index 6d968f41a5..3a7a3968c0 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -62,7 +62,7 @@ STATUS QuantCastFusionPass::DoFusion(MetaGraphT *graph, const std::string &patte auto dstAttr = dstNode->primitive->value.AsQuantDTypeCast(); MS_ASSERT(srcAttr != nullptr); MS_ASSERT(dstAttr != nullptr); - if (srcAttr->dstT != dstAttr->srcT) { + if (srcAttr->dst_t != dstAttr->src_t) { MS_LOG(ERROR) << "srcNode and dstNode can not been fused"; return RET_ERROR; } @@ -73,14 +73,14 @@ STATUS QuantCastFusionPass::DoFusion(MetaGraphT *graph, const std::string &patte return status; } - if (srcAttr->srcT == dstAttr->dstT) { + if (srcAttr->src_t == dstAttr->dst_t) { status = IsolateOneWayNode(graph, dstPath->nodeIdx); if (status != RET_OK) { MS_LOG(ERROR) << "IsolateOneWayNode failed, node: " << dstNode->name.c_str() << ", error: " << status; return status; } } else { - dstAttr->srcT = srcAttr->srcT; + dstAttr->src_t = srcAttr->src_t; } return RET_OK; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.h index 9765bff938..eb23d9c25f 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/fusion/quant_cast_fusion_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/CMakeLists.txt b/mindspore/lite/tools/converter/legacy_optimizer/graph/CMakeLists.txt index 406202a274..d50b798d1a 100755 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/CMakeLists.txt +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/CMakeLists.txt @@ -5,7 +5,6 @@ file(GLOB GRAPH_PASS ${CMAKE_CURRENT_SOURCE_DIR}/isolated_node_remove_pass.cc ${CMAKE_CURRENT_SOURCE_DIR}/model_input_format_preprocess_pass.cc ${CMAKE_CURRENT_SOURCE_DIR}/topological_sort_pass.cc - ${CMAKE_CURRENT_SOURCE_DIR}/unused_node_remove_pass.cc ${CMAKE_CURRENT_SOURCE_DIR}/dropout_node_remove_pass.cc ${CMAKE_CURRENT_SOURCE_DIR}/batchnorm_convert_scale_pass.cc ${CMAKE_CURRENT_SOURCE_DIR}/trans_format_remove_pass.cc diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.cc index d052baf8b5..9606195af2 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,7 +22,6 @@ #include "tools/converter/converter_flags.h" #include "third_party/securec/include/securec.h" #include "src/common/log_adapter.h" -#include "src/common/common.h" #include "tools/common/tensor_util.h" #include "include/errorcode.h" #include "schema/inner/model_generated.h" @@ -74,8 +73,8 @@ STATUS BatchNormConvertScalePass::Run(MetaGraphT *graph) { STATUS BatchNormConvertScalePass::ConvertBNToScale(MetaGraphT *graph, const std::unique_ptr<CNodeT> &bnNode) { MS_ASSERT(graph != nullptr); MS_ASSERT(bnNode != nullptr); - bnNode->primitive->value.type = schema::PrimitiveType_Scale; - std::unique_ptr<ScaleT> scaleParam(new (std::nothrow) ScaleT()); + bnNode->primitive->value.type = schema::PrimitiveType_ScaleFusion; + std::unique_ptr<ScaleFusionT> scaleParam(new (std::nothrow) ScaleFusionT()); if (scaleParam == nullptr) { MS_LOG(ERROR) << "new scaleParam failed"; return RET_ERROR; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.h index e9dab257a2..eeab6e3eda 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/batchnorm_convert_scale_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/dropout_node_remove_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/dropout_node_remove_pass.cc index 53a09c7325..f02d4f4b30 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/dropout_node_remove_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/dropout_node_remove_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -108,7 +108,7 @@ STATUS DropoutNodeRemovePass::Run(schema::MetaGraphT *graph) { for (size_t i = 0; i < graph->nodes.size(); i++) { auto &node = graph->nodes.at(i); if (node->primitive == nullptr) { - MS_LOG(ERROR) << "node->primitive is nullptr"; + MS_LOG(ERROR) << "node->primitive is nullptr, node name: " << node->name; return RET_ERROR; } if (node->primitive->value.type == schema::PrimitiveType_Dropout) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/dropout_node_remove_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/dropout_node_remove_pass.h index 1006951070..401e75bcc0 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/dropout_node_remove_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/dropout_node_remove_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/dtype_trans_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/dtype_trans_pass.cc index 1e149e2106..c8b76b8c64 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/dtype_trans_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/dtype_trans_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -104,7 +104,7 @@ STATUS DTypeTransPass::DoModelOutputDTypeTrans(schema::MetaGraphT *graph) { continue; } int32_t tensorDataType = this->outputDataDType != TypeId::kTypeUnknown - ? this->inputDataDType + ? this->outputDataDType : TensorDataType::GetInstance()->GetTensorType(graphOutIdx); for (auto iter = graph->nodes.begin(); iter != graph->nodes.end(); iter++) { auto nodeName = (*iter)->name; @@ -200,8 +200,8 @@ NodeIter DTypeTransPass::InsertDTypeTransNode(schema::MetaGraphT *graph, NodeIte transNode->primitive->value.value = quantDTypeCastParam; transNode->primitive->value.type = PrimitiveType_QuantDTypeCast; transNode->quantType = QuantType_AwareTraining; - quantDTypeCastParam->srcT = inputDataType; - quantDTypeCastParam->dstT = outputDataType; + quantDTypeCastParam->src_t = inputDataType; + quantDTypeCastParam->dst_t = outputDataType; if (inputDataType == TypeId::kNumberTypeInt8 && outputDataType == TypeId::kNumberTypeFloat32) { transNode->name = "int8toft32_" + tileName + std::to_string(id++); } else if (inputDataType == TypeId::kNumberTypeFloat32 && outputDataType == TypeId::kNumberTypeInt8) { @@ -212,7 +212,8 @@ NodeIter DTypeTransPass::InsertDTypeTransNode(schema::MetaGraphT *graph, NodeIte transNode->name = "int8touint8_" + tileName + std::to_string(id++); } transNode->primitive->value.value = quantDTypeCastParam; - return InsertNode(graph, existNodeIter, place, inoutIdx, std::move(transNode), errorCode, castOpCopyer); + int insert_num = 0; + return InsertNode(graph, existNodeIter, place, inoutIdx, std::move(transNode), errorCode, &insert_num, castOpCopyer); } void DTypeTransPass::SetInputDataDType(TypeId dataType) { this->inputDataDType = dataType; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/dtype_trans_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/dtype_trans_pass.h index 34f2bf358b..42bb39e14f 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/dtype_trans_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/dtype_trans_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -71,8 +71,8 @@ class DTypeTransPass : public GraphPass { MS_LOG(ERROR) << "new QuantDTypeCast failed"; return nullptr; } - QuantDTypeCastParam->srcT = oldQuantDTypeCastParam->srcT; - QuantDTypeCastParam->dstT = oldQuantDTypeCastParam->dstT; + QuantDTypeCastParam->src_t = oldQuantDTypeCastParam->src_t; + QuantDTypeCastParam->dst_t = oldQuantDTypeCastParam->dst_t; newCNode->primitive->value.value = QuantDTypeCastParam; return newCNode; }; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc index 270f22f179..4f52a04fdc 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ #include <string> #include <memory> #include <utility> +#include <vector> #include "tools/converter/legacy_optimizer/graph/format_trans_pass.h" #include "tools/common/node_util.h" #include "src/common/log_adapter.h" @@ -46,31 +47,30 @@ STATUS FormatTransPass::Run(schema::MetaGraphT *graph) { STATUS FormatTransPass::GetInsertFormatTrans(const schema::CNodeT &node, FormatTransNodeType *beforeNodeType, FormatTransNodeType *afterNodeType) { - if (fmkType == converter::FmkType_TFLITE) { // inference by nhwc + if (fmk_type_ == converter::FmkType_TFLITE) { // inference by nhwc return RET_NO_CHANGE; - } else if (fmkType == converter::FmkType_CAFFE || fmkType == converter::FmkType_MS || - fmkType == converter::FmkType_ONNX) { + } else if (fmk_type_ == converter::FmkType_CAFFE || fmk_type_ == converter::FmkType_MS || + fmk_type_ == converter::FmkType_ONNX) { if (!IsContain(GetNhwcOpList(), GetCNodeTType(node))) { return RET_NO_CHANGE; } *beforeNodeType = kNCHW2NHWC; *afterNodeType = kNHWC2NCHW; return RET_OK; - } else if (fmkType == converter::FmkType_TF) { + } else if (fmk_type_ == converter::FmkType_TF) { if (IsContain(GetNhwcOpList(), GetCNodeTType(node)) && GetFormat(node) == schema::Format_NCHW) { *beforeNodeType = kNCHW2NHWC; *afterNodeType = kNHWC2NCHW; return RET_OK; } return RET_NO_CHANGE; - } else { - MS_LOG(ERROR) << "Unsupported fmk: " << fmkType; - return RET_ERROR; } + MS_LOG(ERROR) << "Unsupported fmk: " << fmk_type_; + return RET_ERROR; } STATUS FormatTransPass::DoModelInputFormatTrans(schema::MetaGraphT *graph) { - if (fmkType == converter::FmkType_TF || fmkType == converter::FmkType_TFLITE) { + if (fmk_type_ == converter::FmkType_TF || fmk_type_ == converter::FmkType_TFLITE) { return RET_OK; } MS_ASSERT(graph != nullptr); @@ -79,7 +79,7 @@ STATUS FormatTransPass::DoModelInputFormatTrans(schema::MetaGraphT *graph) { return RET_OK; } // onnx input format may be nhwc - if (fmkType == converter::FmkType_ONNX && graph->inputIndex.size() == 1) { + if (fmk_type_ == converter::FmkType_ONNX && graph->inputIndex.size() == 1) { auto &input_tensor = graph->allTensors.at(graph->inputIndex[0]); auto &input_dims = input_tensor->dims; if (input_dims.size() == 4 && input_dims[3] != -1 && input_dims[1] == -1) { @@ -205,16 +205,23 @@ NodeIter FormatTransPass::InsertFormatTransNode(schema::MetaGraphT *graph, NodeI auto transNode = std::make_unique<schema::CNodeT>(); transNode->primitive = std::make_unique<schema::PrimitiveT>(); transNode->primitive->value.type = schema::PrimitiveType_Transpose; - auto attr = new (std::nothrow) schema::TransposeT(); - + auto perm_tensor = std::make_unique<schema::TensorT>(); + perm_tensor->dataType = kNumberTypeInt32; + perm_tensor->dims = {4}; + std::vector<int> perm; if (nodeType == kNCHW2NHWC) { - transNode->name = "nchw2nhwc_" + tileName + std::to_string(id++); - attr->perm = {0, 2, 3, 1}; + transNode->name = "nchw2nhwc_" + tileName + std::to_string(id_++); + perm = {0, 2, 3, 1}; } else { - transNode->name = "nhwc2nchw_" + tileName + std::to_string(id++); - attr->perm = {0, 3, 1, 2}; + transNode->name = "nhwc2nchw_" + tileName + std::to_string(id_++); + perm = {0, 3, 1, 2}; + } + size_t bytes = perm.size() * sizeof(int); + perm_tensor->data.resize(bytes); + if (memcpy_s(perm_tensor->data.data(), bytes, perm.data(), bytes) != EOK) { + MS_LOG(ERROR) << "memcpy data failed."; } - transNode->primitive->value.value = attr; + perm_tensor->name = transNode->name + "_perm"; OpDefCopyer TransposeOpCopyer = [](CNodeT *inOpDef) -> std::unique_ptr<CNodeT> { auto newOpDef = std::make_unique<schema::CNodeT>(); @@ -230,44 +237,211 @@ NodeIter FormatTransPass::InsertFormatTransNode(schema::MetaGraphT *graph, NodeI return nullptr; } newOpDef->primitive->value.type = schema::PrimitiveType_Transpose; - auto transposeParam = new (std::nothrow) TransposeT; - if (transposeParam == nullptr) { - MS_LOG(ERROR) << "new transposeParam failed"; - return nullptr; - } - auto inParam = inOpDef->primitive->value.AsTranspose(); - MS_ASSERT(inParam != nullptr); - transposeParam->perm.resize(inParam->perm.size()); - std::transform(inParam->perm.begin(), inParam->perm.end(), transposeParam->perm.begin(), - [](const int32_t ele) { return ele; }); - MS_ASSERT(newOpDef->primitive != nullptr); - newOpDef->primitive->value.value = transposeParam; return newOpDef; }; - - return InsertNode(graph, existNodeIter, place, inoutIdx, std::move(transNode), errorCode, TransposeOpCopyer); + int insert_num = 0; + auto iter = + InsertNode(graph, existNodeIter, place, inoutIdx, std::move(transNode), errorCode, &insert_num, TransposeOpCopyer); + size_t index = graph->allTensors.size(); + graph->allTensors.push_back(std::move(perm_tensor)); + for (int i = insert_num; i > 0; --i) { + (*(iter - i))->inputIndex.push_back(index); + } + return iter; } -void FormatTransPass::SetQuantType(QuantType quantType) { this->quantType = quantType; } - -void FormatTransPass::SetFmk(converter::FmkType fmkType) { this->fmkType = fmkType; } - int FormatTransPass::GetFormat(const schema::CNodeT &node) { switch (node.primitive->value.type) { - case schema::PrimitiveType_Conv2D: - return node.primitive->value.AsConv2D()->format; - case schema::PrimitiveType_DeConv2D: - return node.primitive->value.AsDeConv2D()->format; - case schema::PrimitiveType_DeDepthwiseConv2D: - return node.primitive->value.AsDeDepthwiseConv2D()->format; - case schema::PrimitiveType_DepthwiseConv2D: - return node.primitive->value.AsDepthwiseConv2D()->format; - case schema::PrimitiveType_Pooling: - return node.primitive->value.AsPooling()->format; + case schema::PrimitiveType_Conv2DFusion: + return node.primitive->value.AsConv2DFusion()->format; + case schema::PrimitiveType_Conv2dTransposeFusion: + return node.primitive->value.AsConv2dTransposeFusion()->format; + case schema::PrimitiveType_AvgPoolFusion: + return node.primitive->value.AsAvgPoolFusion()->format; + case schema::PrimitiveType_MaxPoolFusion: + return node.primitive->value.AsMaxPoolFusion()->format; default: return schema::Format_NHWC; } } +STATUS FormatTransPass::ChangeOpAxis(schema::MetaGraphT *graph, const std::unique_ptr<schema::CNodeT> &node) { + MS_ASSERT(node->primitive != nullptr); + auto type = node->primitive->value.type; + auto input1_ndim = graph->allTensors.at(node->inputIndex[0])->dims.size(); + if (input1_ndim != 4) { + if (node->inputIndex.size() > 1) { + auto input2_ndim = graph->allTensors.at(node->inputIndex[1])->dims.size(); + if (input2_ndim != 4 && input2_ndim != 0) { + MS_LOG(ERROR) << "change op axis only support 4 dims"; + return RET_NOT_SUPPORT; + } + } else { + MS_LOG(ERROR) << "change op axis only support 4 dims"; + return RET_NOT_SUPPORT; + } + } + if (type == schema::PrimitiveType_Concat) { + MS_ASSERT(node->primitive->value.AsConcat() != nullptr); + auto origin_axis = node->primitive->value.AsConcat()->axis; + auto axis_map = GetNc2NhAxisMap(); + if (node->primitive->value.AsConcat() == nullptr) { + MS_LOG(ERROR) << "node->primitive->value.AsConcat() is nullptr"; + return RET_NULL_PTR; + } + node->primitive->value.AsConcat()->axis = axis_map[origin_axis < 0 ? origin_axis + 4 : origin_axis]; + } + if (type == schema::PrimitiveType_Split) { + MS_ASSERT(node->primitive->value.AsSplit() != nullptr); + auto origin_axis = node->primitive->value.AsSplit()->axis; + auto axis_map = GetNc2NhAxisMap(); + if (node->primitive->value.AsSplit() == nullptr) { + MS_LOG(ERROR) << "node->primitive->value.AsSplit() is nullptr"; + return RET_NULL_PTR; + } + node->primitive->value.AsSplit()->axis = axis_map[origin_axis]; + } + if (type == schema::PrimitiveType_Crop) { + MS_ASSERT(node->primitive->value.AsCrop() != nullptr); + auto origin_axis = node->primitive->value.AsCrop()->axis; + auto offsets = node->primitive->value.AsCrop()->offsets; + auto axis_map = GetNc2NhAxisMap(); + if (node->primitive->value.AsCrop() == nullptr) { + MS_LOG(ERROR) << "node->primitive->value.AsCrop() is nullptr"; + return RET_NULL_PTR; + } + // nchw->nhwc,offsets need pad 0; + if (axis_map[origin_axis] == 0) { + offsets = {offsets[0], offsets[2], offsets[3], offsets[1]}; + } else if (axis_map[origin_axis] == 1 || axis_map[origin_axis] == 2) { + // orgin_axis = 2 or orgin_axis = 3 + offsets.push_back(0); + } else if (axis_map[origin_axis] == -1) { + // origin_axis = 1 + offsets = {offsets[1], offsets[2], offsets[0]}; + } else { + // axis error + MS_LOG(ERROR) << "Crop error"; + return RET_ERROR; + } + node->primitive->value.AsCrop()->offsets = offsets; + } + if (type == schema::PrimitiveType_SliceFusion || type == schema::PrimitiveType_StridedSlice) { + return ChangeOpSliceAndStridedSlice(graph, node); + } + return RET_OK; +} + +void FormatTransPass::TransformAttrByAxes(int *origin_attr, int *axes, int element_size) { + if (origin_attr == nullptr || axes == nullptr || element_size == 0) { + return; + } + auto axis_map = GetNc2NhAxisMap(); + std::vector<int> cur_attr; + for (int dim = 0; dim < 4; ++dim) { + for (int index = 0; index < element_size; ++index) { + int nhwc_dim = axis_map[axes[index] < 0 ? axes[index] + 4 : axes[index]]; + if (nhwc_dim == dim || (nhwc_dim + 4) == dim) { + cur_attr.push_back(origin_attr[index]); + } + } + } + for (int index = 0; index < element_size; ++index) { + origin_attr[index] = cur_attr[index]; + } +} + +void FormatTransPass::TransformOpAxisAttr(int *origin_axis, int element_size) { + if (origin_axis == nullptr || element_size == 0) { + return; + } + auto axis_map = GetNc2NhAxisMap(); + std::vector<int> new_axis; + for (int i = 0; i < element_size; ++i) { + int axis = axis_map[origin_axis[i]]; + axis = axis < 0 ? axis + 4 : axis; + new_axis.push_back(axis); + } + std::sort(new_axis.begin(), new_axis.end()); + for (int i = 0; i < element_size; ++i) { + origin_axis[i] = new_axis[i]; + } +} + +STATUS FormatTransPass::ChangeOpSlice(schema::MetaGraphT *graph, const std::unique_ptr<schema::CNodeT> &node) { + auto attr = node->primitive->value.AsSliceFusion(); + if (attr == nullptr) { + MS_LOG(ERROR) << "node->primitive->value.AsSliceFusion() is nullptr."; + return RET_NULL_PTR; + } + // transform attr + if (node->inputIndex.size() < 2) { + MS_LOG(ERROR) << "slice input is error"; + return RET_ERROR; + } + for (size_t index = 1; index < node->inputIndex.size(); ++index) { + if (graph->allTensors[node->inputIndex[index]]->data.data() == nullptr) { + return RET_NOT_SUPPORT; + } + } + int element_num = graph->allTensors[node->inputIndex[1]]->dims[0]; + std::vector<int> axes; + auto axes_attr = attr->axes; + if (axes_attr.empty()) { + for (int index = 0; index < element_num; ++index) { + axes.push_back(index); + } + } else { + std::transform(axes_attr.begin(), axes_attr.end(), std::back_inserter(axes), + [](int64_t val) { return static_cast<int>(val); }); + } + for (size_t index = 1; index < node->inputIndex.size(); ++index) { + TransformAttrByAxes(reinterpret_cast<int *>(graph->allTensors[node->inputIndex[index]]->data.data()), + reinterpret_cast<int *>(axes.data()), element_num); + } + TransformOpAxisAttr(axes.data(), element_num); + attr->axes.clear(); + for (int i = 0; i < element_num; ++i) { + attr->axes.push_back(static_cast<int64_t>(axes[i])); + } + return RET_OK; +} + +STATUS FormatTransPass::ChangeOpStridedSlice(schema::MetaGraphT *graph, const std::unique_ptr<schema::CNodeT> &node) { + // onnx input size is equal to 5 always. + if (node->inputIndex.size() != 5) { + return RET_NOT_SUPPORT; + } + if (node->inputIndex.size() == 5) { + for (int index = 1; index < 5; ++index) { + if (graph->allTensors[node->inputIndex[index]]->data.data() == nullptr) { + return RET_NOT_SUPPORT; + } + } + int element_num = graph->allTensors[node->inputIndex[1]]->dims[0]; + auto axes = graph->allTensors[node->inputIndex[3]]->data; + for (int index = 1; index < 5; ++index) { + if (index == 3) { + continue; + } + TransformAttrByAxes(reinterpret_cast<int *>(graph->allTensors[node->inputIndex[index]]->data.data()), + reinterpret_cast<int *>(axes.data()), element_num); + } + TransformOpAxisAttr(reinterpret_cast<int *>(graph->allTensors[node->inputIndex[3]]->data.data()), element_num); + } + return RET_OK; +} + +STATUS FormatTransPass::ChangeOpSliceAndStridedSlice(schema::MetaGraphT *graph, + const std::unique_ptr<schema::CNodeT> &node) { + auto type = node->primitive->value.type; + if (type == schema::PrimitiveType_StridedSlice) { + return ChangeOpStridedSlice(graph, node); + } + if (type == schema::PrimitiveType_SliceFusion) { + return ChangeOpSlice(graph, node); + } + return RET_ERROR; +} } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.h index 5d0cd8a4b4..5bc5087507 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/format_trans_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,36 +28,48 @@ enum FormatTransNodeType { kNCHW2NHWC, kNHWC2NCHW, kNONE }; class FormatTransPass : public GraphPass { public: - FormatTransPass() : id(0) {} + FormatTransPass() : id_(0) {} ~FormatTransPass() override = default; STATUS Run(schema::MetaGraphT *graph) override; - void SetQuantType(QuantType quantType); + void SetQuantType(QuantType quantType) { this->quant_type_ = quantType; } - void SetFmk(converter::FmkType fmkType); + void SetFmk(converter::FmkType fmkType) { this->fmk_type_ = fmkType; } protected: NodeIter InsertFormatTransNode(schema::MetaGraphT *graph, NodeIter existNodeIter, InsertPlace place, size_t inoutIdx, FormatTransNodeType nodeType, STATUS *errorCode); + STATUS ChangeOpAxis(schema::MetaGraphT *graph, const std::unique_ptr<schema::CNodeT> &node); + private: STATUS DoModelInputFormatTrans(schema::MetaGraphT *graph); STATUS DoNodeInoutFormatTrans(schema::MetaGraphT *graph); + void TransformAttrByAxes(int *origin_attr, int *axes, int element_size); + + void TransformOpAxisAttr(int *origin_axis, int element_size); + + STATUS ChangeOpSlice(schema::MetaGraphT *graph, const std::unique_ptr<schema::CNodeT> &node); + + STATUS ChangeOpStridedSlice(schema::MetaGraphT *graph, const std::unique_ptr<schema::CNodeT> &node); + + STATUS ChangeOpSliceAndStridedSlice(schema::MetaGraphT *graph, const std::unique_ptr<schema::CNodeT> &node); + int GetFormat(const schema::CNodeT &); STATUS GetInsertFormatTrans(const schema::CNodeT &node, FormatTransNodeType *beforeNodeType, FormatTransNodeType *afterNodeType); protected: - size_t id = 0; + size_t id_ = 0; + converter::FmkType fmk_type_ = converter::FmkType_TF; private: - QuantType quantType = QuantType_QUANT_NONE; - converter::FmkType fmkType = converter::FmkType_TF; + QuantType quant_type_ = QuantType_QUANT_NONE; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/global_format_transform_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/global_format_transform_pass.cc index a5fb729bee..85813c12b4 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/global_format_transform_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/global_format_transform_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -41,8 +41,7 @@ STATUS GlobalFormatTransformPass::Run(MetaGraphT *graph) { if (type != PrimitiveType_Transpose) { continue; } - MS_ASSERT(pre_node->primitive->value.AsTranspose() != nullptr); - if (node->primitive->value.AsTranspose()->perm != nchw2nhwc_perm) { + if (GetTransposePerm(graph, node) != nchw2nhwc_perm) { continue; } std::vector<size_t> pre_nh2nc_nodes; @@ -184,9 +183,7 @@ STATUS GlobalFormatTransformPass::FindPreNh2NcNodes(MetaGraphT *graph, size_t nc auto &pre_node = graph->nodes.at(input_node_index); MS_ASSERT(pre_node != nullptr); auto node_type = pre_node->primitive->value.type; - MS_ASSERT(pre_node->primitive->value.AsTranspose() != nullptr); - if (node_type == schema::PrimitiveType_Transpose && - pre_node->primitive->value.AsTranspose()->perm == nhwc2nchw_perm) { + if (node_type == schema::PrimitiveType_Transpose && GetTransposePerm(graph, pre_node) == nhwc2nchw_perm) { if (!IsContain(*pre_nh2nc_nodes, input_node_index)) { pre_nh2nc_nodes->emplace_back(input_node_index); } @@ -203,7 +200,7 @@ STATUS GlobalFormatTransformPass::FindPreNh2NcNodes(MetaGraphT *graph, size_t nc } for (auto pre_node_output_index : pre_node_output_indexs) { MS_ASSERT(graph->nodes.size() > pre_node_output_index); - if (graph->nodes.at(pre_node_output_index)->primitive->value.type == schema::PrimitiveType_Pad) { + if (graph->nodes.at(pre_node_output_index)->primitive->value.type == schema::PrimitiveType_PadFusion) { pre_nh2nc_nodes->clear(); pre_not_trans_nodes->clear(); return RET_OK; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/global_format_transform_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/global_format_transform_pass.h index d0a7df793a..68f7161397 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/global_format_transform_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/global_format_transform_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,15 +25,16 @@ #include <utility> #include "tools/common/graph_util.h" #include "tools/converter/optimizer.h" +#include "tools/converter/legacy_optimizer/graph/format_trans_pass.h" using mindspore::schema::TensorT; namespace mindspore { namespace lite { -class GlobalFormatTransformPass : public GraphPass { +class GlobalFormatTransformPass : public FormatTransPass { public: GlobalFormatTransformPass() = default; - ~GlobalFormatTransformPass() = default; + ~GlobalFormatTransformPass() override = default; STATUS Run(MetaGraphT *graph) override; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc index 36a28edc0e..33176e7639 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,20 +16,23 @@ #include "tools/converter/legacy_optimizer/graph/infershape_pass.h" #include <vector> +#include "src/common/common.h" #include "src/common/log_adapter.h" #include "include/errorcode.h" #include "src/tensor.h" #include "src/tensorlist.h" -#include "src/ops/primitive_c.h" +#include "src/common/prim_util.h" +#include "src/ops/populate/populate_register.h" +#include "src/runtime/infer_manager.h" +#include "tools/common/node_util.h" -using mindspore::lite::PrimitiveC; using mindspore::lite::Tensor; namespace mindspore { namespace lite { namespace { constexpr int DEFAULT_DIM_VALUE = -1; -} -namespace { +constexpr size_t INITIAL_SIZE = 1024; + void FreeTensors(std::vector<Tensor *> input_tensors, std::vector<Tensor *> output_tensors) { for (auto &tensor : input_tensors) { delete tensor; @@ -112,6 +115,35 @@ std::vector<Tensor *> ConvertTensorToLiteTensor(MetaGraphT *graph, const std::ve } return lite_tensors; } + +STATUS NodeInferShpae(const std::unique_ptr<schema::CNodeT> &node, const std::vector<Tensor *> &inputs, + std::vector<Tensor *> *outputs) { + flatbuffers::FlatBufferBuilder fbb(INITIAL_SIZE); + auto prim = ConvertToPrimitive(node->primitive.get(), &fbb); + if (prim == nullptr) { + MS_LOG(ERROR) << "get primitive failed."; + fbb.Clear(); + return RET_ERROR; + } + auto parameter_gen = lite::PopulateRegistry::GetInstance()->GetParameterCreator(prim->value_type(), SCHEMA_CUR); + if (parameter_gen == nullptr) { + fbb.Clear(); + MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " << schema::EnumNamePrimitiveType(prim->value_type()); + return RET_ERROR; + } + auto parameter = parameter_gen(prim); + if (parameter == nullptr) { + fbb.Clear(); + MS_LOG(ERROR) << "parameter is nullptr."; + return RET_ERROR; + } + parameter->infer_flag_ = true; + auto ret = KernelInferShape(inputs, outputs, parameter); + fbb.Clear(); + free(parameter); + return ret; +} + void PrintTensorShape(const std::vector<Tensor *> &input_tensors, const std::vector<Tensor *> &output_tensors) { int i = 0; for (auto input_tensor : input_tensors) { @@ -165,26 +197,14 @@ STATUS InferShapePass::Run(MetaGraphT *graph) { FreeTensors(input_tensors, output_tensors); return RET_INFER_ERR; } - std::unique_ptr<PrimitiveT> primitiveT(new (std::nothrow) PrimitiveT(*node->primitive)); - if (primitiveT == nullptr) { - MS_LOG(ERROR) << "copy primitiveT error"; - FreeTensors(input_tensors, output_tensors); - return RET_ERROR; - } - auto primitiveC = std::shared_ptr<PrimitiveC>(PrimitiveC::Create(primitiveT.release())); - if (primitiveC == nullptr) { - MS_LOG(ERROR) << "unpack primitiveT error"; - FreeTensors(input_tensors, output_tensors); - return RET_ERROR; - } - auto ret = primitiveC->InferShape(input_tensors, output_tensors); + auto status = NodeInferShpae(node, input_tensors, &output_tensors); MS_LOG(DEBUG) << "cur node:" << node->name; - if (ret == RET_INFER_INVALID) { + if (status == RET_INFER_INVALID) { MS_LOG(INFO) << "InferShape shouldn't be done before runtime, name: " << node->name << ", type: " << schema::EnumNamePrimitiveType(node->primitive->value.type) << "flag set to false."; FreeTensors(input_tensors, output_tensors); return RET_INFER_INVALID; - } else if (ret != RET_OK) { + } else if (status != RET_OK) { MS_LOG(WARNING) << "InferShape failed, name: " << node->name << ", type: " << schema::EnumNamePrimitiveType(node->primitive->value.type); FreeTensors(input_tensors, output_tensors); diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.h index fb5e1104d7..66e4c11392 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/infershape_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.cc index 54ccceaf24..1707277c19 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.h index dba14fadf0..11e1ce4123 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/isolated_node_remove_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/nested_loop_expand_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/nested_loop_expand_pass.cc index 0a987af9f5..32e8ba1e39 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/nested_loop_expand_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/nested_loop_expand_pass.cc @@ -28,15 +28,15 @@ namespace mindspore { namespace lite { bool NestedLoopExpandPass::IsNestedPartial(const std::unique_ptr<CNodeT> &node) { - if (node->primitive->value.type != PrimitiveType_Partial) { + if (node->primitive->value.type != PrimitiveType_PartialFusion) { return false; } - auto subgraph_idx = ((schema::PartialT *)(node->primitive->value.value))->subGraphIndex; + auto subgraph_idx = ((schema::PartialFusionT *)(node->primitive->value.value))->sub_graph_index; auto &this_subgraph = graph_->subGraph.at(subgraph_idx); for (auto &node_idx : this_subgraph->nodeIndices) { auto &cnode = graph_->nodes.at(node_idx); - if (cnode->primitive->value.type == PrimitiveType_Partial) { + if (cnode->primitive->value.type == PrimitiveType_PartialFusion) { return true; } } @@ -52,7 +52,7 @@ void NestedLoopExpandPass::ReplacePartialNodeWithSubgraph(const std::unique_ptr< continue; } is_changed = true; - auto subgraph_idx = ((schema::PartialT *)(node->primitive->value.value))->subGraphIndex; + auto subgraph_idx = ((schema::PartialFusionT *)(node->primitive->value.value))->sub_graph_index; auto &this_subgraph = graph_->subGraph.at(subgraph_idx); subgraph_to_drop_.push_back(subgraph_idx); iter = main_graph->nodeIndices.erase(iter); @@ -77,8 +77,8 @@ STATUS NestedLoopExpandPass::Run(schema::MetaGraphT *graph) { for (auto &node_idx : main_graph->nodeIndices) { auto &node = graph_->nodes.at(node_idx); - if (node->primitive->value.type == PrimitiveType_Partial) { - auto &subgraph_idx = ((schema::PartialT *)(node->primitive->value.value))->subGraphIndex; + if (node->primitive->value.type == PrimitiveType_PartialFusion) { + auto &subgraph_idx = ((schema::PartialFusionT *)(node->primitive->value.value))->sub_graph_index; if (graph_->subGraph.at(subgraph_idx) == nullptr) { node = nullptr; continue; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/select_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/select_pass.cc index 8e96c86f6a..68660deabe 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/select_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/select_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,18 +14,16 @@ * limitations under the License. */ +#include "tools/converter/legacy_optimizer/graph/select_pass.h" #include <vector> #include <map> #include <algorithm> -#include "tools/converter/legacy_optimizer/graph/select_pass.h" #include "src/common/log_adapter.h" #include "include/errorcode.h" -#include "src/ops/primitive_c.h" #include "src/common/utils.h" #include "tools/common/graph_util.h" namespace mindspore::lite { - STATUS SelectPass::Run(mindspore::schema::MetaGraphT *graph) { MS_ASSERT(graph != nullptr); for (size_t i = 0; i < graph->nodes.size(); i++) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/select_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/select_pass.h index 049b4a41a4..e6d58b032e 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/select_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/select_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_node_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_node_pass.cc index aed7f1ff7a..6622509d95 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_node_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_node_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_node_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_node_pass.h index 594c9a0ca1..213cda8a3b 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_node_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_node_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_tensor_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_tensor_pass.cc index ba79e2c480..f2294dbd97 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_tensor_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_tensor_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_tensor_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_tensor_pass.h index 69fc02fad4..c806592811 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_tensor_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/subgraph_tensor_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.cc index 9e9991629d..f5966043d4 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,7 +21,6 @@ #include "tools/converter/legacy_optimizer/graph/switch_pass.h" #include "src/common/log_adapter.h" #include "include/errorcode.h" -#include "src/ops/primitive_c.h" #include "src/common/utils.h" #include "tools/common/graph_util.h" @@ -57,20 +56,20 @@ STATUS SwitchPass::Run(mindspore::schema::MetaGraphT *graph) { for (size_t i = 0; i < graph->nodes.size(); ++i) { auto &node = graph->nodes.at(i); auto type = node->primitive->value.type; - if (type != schema::PrimitiveType_Partial) { + if (type != schema::PrimitiveType_PartialFusion) { continue; } MS_ASSERT(node->primitive != nullptr); - MS_ASSERT(node->primitive->value.AsPartial() != nullptr); - auto partial_prim = node->primitive->value.AsPartial(); - if (partial_prim->subGraphIndex == -1) { + MS_ASSERT(node->primitive->value..AsPartialFusion() != nullptr); + auto partial_prim = node->primitive->value.AsPartialFusion(); + if (partial_prim->sub_graph_index == -1) { continue; } - if (sub_graph_index_map.find(partial_prim->subGraphIndex) == sub_graph_index_map.end()) { - MS_LOG(ERROR) << "subGraphIndex is illegal"; + if (sub_graph_index_map.find(partial_prim->sub_graph_index) == sub_graph_index_map.end()) { + MS_LOG(ERROR) << "sub_graph_index is illegal"; return RET_ERROR; } - partial_prim->subGraphIndex = sub_graph_index_map[partial_prim->subGraphIndex]; + partial_prim->sub_graph_index = sub_graph_index_map[partial_prim->sub_graph_index]; } return RET_OK; } @@ -125,8 +124,9 @@ STATUS SingleSwitchPass::UpdateSwitchUser() { bool SingleSwitchPass::IsLoop() { for (auto &node : second_graph_nodes_) { - if (node->primitive->value.type == schema::PrimitiveType_Partial && node->primitive->value.AsPartial() != nullptr && - node->primitive->value.AsPartial()->subGraphIndex == first_subgraph_index_) { + if (node->primitive->value.type == schema::PrimitiveType_PartialFusion && + node->primitive->value.AsPartialFusion() != nullptr && + node->primitive->value.AsPartialFusion()->sub_graph_index == first_subgraph_index_) { body_to_cond_partial_node_ = node; return true; } @@ -467,16 +467,16 @@ STATUS SingleSwitchPass::Init() { } // get cond_graph_nodes_ - MS_ASSERT(first_partial_node_->primitive->value.AsPartial() != nullptr); - first_subgraph_index_ = first_partial_node_->primitive->value.AsPartial()->subGraphIndex; + MS_ASSERT(first_partial_node_->primitive->value..AsPartialFusion() != nullptr); + first_subgraph_index_ = first_partial_node_->primitive->value.AsPartialFusion()->sub_graph_index; auto cond_node_indices = graph_->subGraph.at(first_subgraph_index_)->nodeIndices; for (auto &index : cond_node_indices) { first_graph_nodes_.push_back(graph_->nodes.at(index).get()); } // get second_graph_nodes_ - MS_ASSERT(second_partial_node_->primitive->value.AsPartial() != nullptr); - second_subgraph_index_ = second_partial_node_->primitive->value.AsPartial()->subGraphIndex; + MS_ASSERT(second_partial_node_->primitive->value..AsPartialFusion() != nullptr); + second_subgraph_index_ = second_partial_node_->primitive->value.AsPartialFusion()->sub_graph_index; auto body_node_indices = graph_->subGraph.at(second_subgraph_index_)->nodeIndices; for (auto &index : body_node_indices) { second_graph_nodes_.push_back(graph_->nodes.at(index).get()); @@ -624,8 +624,8 @@ STATUS SingleSwitchPass::UpdateSubgraphOutput(const size_t &subgraph_index, sche STATUS SingleSwitchPass::ConcatCondSubgraphInputAndOutput() { if (first_subgraph_index_ == -1) { MS_ASSERT(first_partial_node_->primitive != nullptr); - MS_ASSERT(first_partial_node_->primitive->value.AsPartial() != nullptr); - first_partial_node_->primitive->value.AsPartial()->subGraphIndex = -1; + MS_ASSERT(first_partial_node_->primitive->value..AsPartialFusion() != nullptr); + first_partial_node_->primitive->value.AsPartialFusion()->sub_graph_index = -1; return RET_OK; } int ret = UpdateSubgraphInput(first_subgraph_index_, first_partial_node_, first_graph_nodes_); @@ -645,8 +645,8 @@ STATUS SingleSwitchPass::ConcatCondSubgraphInputAndOutput() { STATUS SingleSwitchPass::ConcatBodySubgraphInputAndOutput() { if (second_subgraph_index_ == -1) { MS_ASSERT(first_partial_node_->primitive != nullptr); - MS_ASSERT(first_partial_node_->primitive->value.AsPartial() != nullptr); - first_partial_node_->primitive->value.AsPartial()->subGraphIndex = -1; + MS_ASSERT(first_partial_node_->primitive->value..AsPartialFusion() != nullptr); + first_partial_node_->primitive->value.AsPartialFusion()->sub_graph_index = -1; return RET_OK; } int ret = UpdateSubgraphInput(second_subgraph_index_, second_partial_node_, second_graph_nodes_); diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.h index 3dd5c8acfa..78230f9be4 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/switch_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/tensor_quant_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/tensor_quant_pass.cc index 521c35d729..93f44447d0 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/tensor_quant_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/tensor_quant_pass.cc @@ -22,7 +22,8 @@ #include "tools/common/tensor_util.h" namespace mindspore::lite { -STATUS TensorQuantPass::Run(schema::MetaGraphT *graph) { +namespace { +STATUS PreHandleQuantDtypeCast(schema::MetaGraphT *graph) { MS_ASSERT(graph != nullptr); for (auto &node : graph->nodes) { if (node == nullptr || node->primitive == nullptr) { @@ -32,98 +33,123 @@ STATUS TensorQuantPass::Run(schema::MetaGraphT *graph) { if (node->primitive->value.type == PrimitiveType_QuantDTypeCast) { auto attr = node->primitive->value.AsQuantDTypeCast(); auto &inputTensor = graph->allTensors.at(node->inputIndex.front()); - inputTensor->dataType = attr->srcT; + inputTensor->dataType = attr->src_t; auto &outputTensor = graph->allTensors.at(node->outputIndex.front()); - outputTensor->dataType = attr->dstT; + outputTensor->dataType = attr->dst_t; - if (attr->srcT == TypeId::kNumberTypeUInt8) { - attr->srcT = TypeId::kNumberTypeInt8; + if (attr->src_t == TypeId::kNumberTypeUInt8) { + attr->src_t = TypeId::kNumberTypeInt8; } - if (attr->dstT == TypeId::kNumberTypeUInt8) { - attr->dstT = TypeId::kNumberTypeInt8; + if (attr->dst_t == TypeId::kNumberTypeUInt8) { + attr->dst_t = TypeId::kNumberTypeInt8; } } } - unsigned int index = -1; + return RET_OK; +} + +STATUS ComputeDataToInt8(const std::unique_ptr<TensorT> &tensor, int32_t index) { + MS_ASSERT(tensor != nullptr); + size_t wShapeSize = tensor->data.empty() ? 0 : GetShapeSize(*(tensor.get())); + void *oriWeightData = tensor->data.data(); + std::vector<int8_t> qDatas(wShapeSize); + auto weightQauntParam = GetTensorQuantParam(tensor); + if (tensor->dataType == TypeId::kNumberTypeFloat || + tensor->dataType == TypeId::kNumberTypeFloat32) { // normal awareing quant + auto *weightData = static_cast<float *>(oriWeightData); + if (weightData == nullptr) { + return RET_OK; + } + for (size_t j = 0; j < wShapeSize; j++) { + qDatas[j] = quant::QuantizeData<int8_t>(weightData[j], weightQauntParam.get()); + } + } else { // convert uint8 to int8 + auto *weightData = static_cast<uint8_t *>(oriWeightData); + for (size_t j = 0; j < wShapeSize; j++) { + qDatas[j] = (int32_t)weightData[j] - 128; + } + weightQauntParam->zeroPoint -= 128; + tensor->quantParams.clear(); + tensor->quantParams.emplace_back(weightQauntParam.release()); + TensorDataType::GetInstance()->UpdateTensorType(index, TypeId::kNumberTypeUInt8); + } + tensor->dataType = TypeId::kNumberTypeInt8; + if (tensor->data.empty()) { + return RET_OK; + } + tensor->data.clear(); + tensor->data.resize(wShapeSize * sizeof(int8_t)); + if (memcpy_s(tensor->data.data(), wShapeSize * sizeof(int8_t), qDatas.data(), wShapeSize * sizeof(int8_t)) != EOK) { + MS_LOG(ERROR) << "memcpy_s failed"; + return RET_ERROR; + } + return RET_OK; +} + +STATUS ComputeDataToInt32(const std::unique_ptr<TensorT> &tensor) { + MS_ASSERT(tensor != nullptr); + auto bShapeSize = GetShapeSize(*(tensor)); + std::unique_ptr<int32_t[]> qDatas(new (std::nothrow) int32_t[bShapeSize]); + if (qDatas == nullptr) { + MS_LOG(ERROR) << "new qDatas failed"; + return RET_ERROR; + } + void *biasData = tensor->data.data(); + auto *rawDatas = static_cast<float *>(biasData); + if (fabs(tensor->quantParams.front()->scale) <= 0.0f) { + MS_LOG(ERROR) << "divisor 'scale' cannot be 0"; + return RET_ERROR; + } + for (size_t i = 0; i < bShapeSize; ++i) { + qDatas[i] = (int32_t)std::round(rawDatas[i] / tensor->quantParams.front()->scale); + } + tensor->dataType = TypeId::kNumberTypeInt32; + tensor->data.clear(); + tensor->data.resize(bShapeSize * sizeof(int32_t)); + if (memcpy_s(tensor->data.data(), bShapeSize * sizeof(int32_t), qDatas.get(), bShapeSize * sizeof(int32_t)) != EOK) { + MS_LOG(ERROR) << "memcpy_s failed"; + return RET_ERROR; + } + return RET_OK; +} +} // namespace + +STATUS TensorQuantPass::Run(schema::MetaGraphT *graph) { + MS_ASSERT(graph != nullptr); + auto status = PreHandleQuantDtypeCast(graph); + if (status != RET_OK) { + MS_LOG(ERROR) << "pre adjust failed."; + return status; + } + int32_t index = 0; for (auto &tensor : graph->allTensors) { - index++; if (tensor->quantParams.empty() || !tensor->quantParams.front()->inited) { + index++; continue; } if (tensor->dataType != TypeId::kNumberTypeFloat32 && tensor->dataType != TypeId::kNumberTypeFloat && tensor->dataType != TypeId::kNumberTypeUInt8 && tensor->dataType != TypeId::kTypeUnknown) { + index++; continue; } - // perlayer - if (tensor->quantParams.size() == 1) { - auto &quantParam = tensor->quantParams.front(); - size_t wShapeSize = tensor->data.empty() ? 0 : GetShapeSize(*(tensor.get())); - void *oriWeightData = tensor->data.data(); - if (quantParam->dstDtype == TypeId::kNumberTypeUInt8 || quantParam->dstDtype == TypeId::kNumberTypeFloat32 || - quantParam->dstDtype == TypeId::kNumberTypeFloat) { - std::vector<int8_t> qDatas(wShapeSize); - auto weightQauntParam = GetTensorQuantParam(tensor); - if (tensor->dataType == TypeId::kNumberTypeFloat || - tensor->dataType == TypeId::kNumberTypeFloat32) { // normal awareing quant - auto *weightData = static_cast<float *>(oriWeightData); - if (weightData == nullptr) { - continue; - } - for (size_t j = 0; j < wShapeSize; j++) { - qDatas[j] = quant::QuantizeData<int8_t>(weightData[j], weightQauntParam.get()); - } - } else { // convert uint8 to int8 - auto *weightData = static_cast<uint8_t *>(oriWeightData); - for (size_t j = 0; j < wShapeSize; j++) { - qDatas[j] = (int32_t)weightData[j] - 128; - } - weightQauntParam->zeroPoint -= 128; - tensor->quantParams.clear(); - tensor->quantParams.emplace_back(weightQauntParam.release()); - TensorDataType::GetInstance()->UpdateTensorType(index, TypeId::kNumberTypeUInt8); - } - tensor->dataType = TypeId::kNumberTypeInt8; - if (!tensor->data.empty()) { - tensor->data.clear(); - tensor->data.resize(wShapeSize * sizeof(int8_t)); - auto ret = - memcpy_s(tensor->data.data(), wShapeSize * sizeof(int8_t), qDatas.data(), wShapeSize * sizeof(int8_t)); - if (ret != EOK) { - MS_LOG(ERROR) << "memcpy_s failed: " << ret; - return RET_ERROR; - } - } - } else if (quantParam->dstDtype == TypeId::kNumberTypeInt32) { - // quant bias data - auto bShapeSize = GetShapeSize(*(tensor.get())); - std::unique_ptr<int32_t[]> qDatas(new (std::nothrow) int32_t[bShapeSize]); - if (qDatas == nullptr) { - MS_LOG(ERROR) << "new qDatas failed"; - return RET_ERROR; - } - void *biasData = tensor->data.data(); - auto *rawDatas = static_cast<float *>(biasData); - if (fabs(quantParam->scale) <= 0.0f) { - MS_LOG(ERROR) << "divisor 'scale' cannot be 0"; - return RET_ERROR; - } - for (size_t i = 0; i < bShapeSize; ++i) { - qDatas[i] = (int32_t)std::round(rawDatas[i] / quantParam->scale); - } - tensor->dataType = TypeId::kNumberTypeInt32; - tensor->data.clear(); - tensor->data.resize(bShapeSize * sizeof(int32_t)); - auto ret = - memcpy_s(tensor->data.data(), bShapeSize * sizeof(int32_t), qDatas.get(), bShapeSize * sizeof(int32_t)); - if (ret != EOK) { - MS_LOG(ERROR) << "memcpy_s failed: " << ret; - return RET_ERROR; - } - } - } else { // perchannel + if (tensor->quantParams.size() != 1) { // perchannel MS_LOG(ERROR) << "perchannel doquant is not supported yet"; return RET_ERROR; } + // perlayer + auto &quantParam = tensor->quantParams.front(); + if (quantParam->dstDtype == TypeId::kNumberTypeUInt8 || quantParam->dstDtype == TypeId::kNumberTypeFloat32 || + quantParam->dstDtype == TypeId::kNumberTypeFloat) { + status = ComputeDataToInt8(tensor, index); + } else if (quantParam->dstDtype == TypeId::kNumberTypeInt32) { + // quant bias data + status = ComputeDataToInt32(tensor); + } + if (status != RET_OK) { + MS_LOG(ERROR) << "compute data to int8 or int32 failed."; + return status; + } + index++; } return RET_OK; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc index bfb129eee9..1e875ec8a6 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -69,7 +69,7 @@ STATUS TopologicalSortPass::Run(schema::MetaGraphT *graph) { graph->subGraph[i]->nodeIndices.swap(new_subgraph_node_indices); } if (new_nodes.size() != old_nodes.size()) { - MS_LOG(ERROR) << "Unknow error in TopologicalSort, old_nodes size: " << old_nodes.size() + MS_LOG(ERROR) << "Unknown error in TopologicalSort, old_nodes size: " << old_nodes.size() << ", new_nodes size: " << new_nodes.size(); return RET_ERROR; } diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.h index 09884d2db3..a2cf0676a2 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/topological_sort_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.cc index 69d6b89c76..482f1f65f8 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -39,10 +39,10 @@ bool IsInOutCanFusion(schema::MetaGraphT *graph, const std::vector<size_t> &node MS_ASSERT(pre_node->primitive->value != nullptr); if (*trans_type == kNONE) { if (pre_node->primitive->value.type == schema::PrimitiveType_Transpose) { - MS_ASSERT(pre_node->primitive->value.AsTranspose() != nullptr); - if (pre_node->primitive->value.AsTranspose()->perm == nchw2nhwc_perm) { + auto perm = GetTransposePerm(graph, pre_node); + if (perm == nchw2nhwc_perm) { *trans_type = kNCHW2NHWC; - } else if (pre_node->primitive->value.AsTranspose()->perm == nhwc2nchw_perm) { + } else if (perm == nhwc2nchw_perm) { *trans_type = kNHWC2NCHW; } else { return false; @@ -52,9 +52,10 @@ bool IsInOutCanFusion(schema::MetaGraphT *graph, const std::vector<size_t> &node } else { if (pre_node->primitive->value.type == schema::PrimitiveType_Transpose) { auto cur_type = kNONE; - if (pre_node->primitive->value.AsTranspose()->perm == nchw2nhwc_perm) { + auto perm = GetTransposePerm(graph, pre_node); + if (perm == nchw2nhwc_perm) { cur_type = kNCHW2NHWC; - } else if (pre_node->primitive->value.AsTranspose()->perm == nhwc2nchw_perm) { + } else if (perm == nhwc2nchw_perm) { cur_type = kNHWC2NCHW; } else { return false; @@ -95,7 +96,7 @@ bool TransOpInsertPass::CanFusion(schema::MetaGraphT *graph, const std::unique_p MS_ASSERT(node->primitive->value != nullptr); MS_ASSERT(node->primitive->value.AsActivation() != nullptr); if (node->primitive->value.AsActivation() != nullptr && - node->primitive->value.AsActivation()->type == schema::ActivationType_LEAKY_RELU) { + node->primitive->value.AsActivation()->activation_type == schema::ActivationType_LEAKY_RELU) { return has_trans_count >= half_count; } } @@ -173,10 +174,6 @@ STATUS TransOpInsertPass::Run(schema::MetaGraphT *graph) { MS_LOG(ERROR) << "Insert" << pre_insert_trans_type_ << "before " << (*iter)->name << " failed"; return status; } - if ((*iter)->primitive->value.type == schema::PrimitiveType_StridedSlice || - (*iter)->primitive->value.type == schema::PrimitiveType_Slice) { - break; - } } auto output_tensor_size = (*iter)->outputIndex.size(); for (size_t i = 0; i < output_tensor_size; i++) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.h index e3172302a7..a9e785e369 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_insert_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -38,10 +38,6 @@ class TransOpInsertPass : public FormatTransPass { STATUS FindOutTransType(); - void TransformAttrByAxes(int *origin_attr, int *axes, int element_size); - - STATUS ChangeOpAttrForSlice(schema::MetaGraphT *graph, const std::unique_ptr<CNodeT> &node); - private: FormatTransNodeType pre_insert_trans_type_ = kNHWC2NCHW; FormatTransNodeType post_insert_trans_type_ = kNHWC2NCHW; diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.cc index 69d1d0d4ab..a0246a6dc6 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.cc +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,9 +20,7 @@ #include "include/errorcode.h" #include "tools/common/graph_util.h" #include "src/tensor.h" -#include "src/ops/primitive_c.h" -using mindspore::lite::PrimitiveC; using mindspore::lite::Tensor; namespace mindspore { namespace { @@ -35,9 +33,8 @@ STATUS TransOpRemovePass::Run(MetaGraphT *graph) { for (auto iter = graph->nodes.begin(); iter != graph->nodes.end(); iter++) { auto &node = *iter; auto type = node->primitive->value.type; - if (type == schema::PrimitiveType_Transpose && node->primitive->value.AsTranspose() != nullptr && - (node->primitive->value.AsTranspose()->perm == nchw2nhwc_perm || - node->primitive->value.AsTranspose()->perm == nhwc2nchw_perm)) { + auto perm = GetTransposePerm(graph, node); + if (type == schema::PrimitiveType_Transpose && (perm == nchw2nhwc_perm || perm == nhwc2nchw_perm)) { auto &input_tensor = graph->allTensors.at(node->inputIndex.at(0)); // less than 4 dims can delete if (!input_tensor->dims.empty() && input_tensor->dims.size() < 4) { diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.h index 8d5ce11484..30b8e20e27 100644 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.h +++ b/mindspore/lite/tools/converter/legacy_optimizer/graph/trans_format_remove_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.cc b/mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.cc deleted file mode 100644 index a25e05bd7f..0000000000 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.cc +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/legacy_optimizer/graph/unused_node_remove_pass.h" -#include <queue> -#include "src/common/log_adapter.h" -#include "tools/common/graph_util.h" -#include "include/errorcode.h" -#include "schema/inner/model_generated.h" - -namespace mindspore { -namespace lite { -STATUS UnusedNodeRemovePass::Run(schema::MetaGraphT *graph) { - MS_ASSERT(graph != nullptr); - bool ifChanged = false; - for (size_t i = 0; i < graph->nodes.size(); i++) { - auto &node = graph->nodes.at(i); - if (node->primitive->value.type == schema::PrimitiveType_TupleGetItem) { - ifChanged = true; - auto status = IsolateOneWayNode(graph, i); - if (status != RET_OK) { - MS_LOG(ERROR) << "IsolateOneWayNode failed, subGraph: " << graph->name << ", node: " << node->name - << ", error: " << status; - return status; - } - } - } - return ifChanged ? RET_OK : RET_NO_CHANGE; -} -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.h b/mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.h deleted file mode 100644 index 647ea39e49..0000000000 --- a/mindspore/lite/tools/converter/legacy_optimizer/graph/unused_node_remove_pass.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_LEGACY_OPTIMIZER_GRAGP_UNUSED_NODE_REMOVE_PASS_H -#define MINDSPORE_LITE_TOOLS_CONVERTER_LEGACY_OPTIMIZER_GRAGP_UNUSED_NODE_REMOVE_PASS_H - -#include <unordered_map> -#include "tools/converter/optimizer.h" - -namespace mindspore { -namespace lite { -class UnusedNodeRemovePass : public GraphPass { - public: - UnusedNodeRemovePass() = default; - - ~UnusedNodeRemovePass() override = default; - - STATUS Run(schema::MetaGraphT *graph) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_LEGACY_OPTIMIZER_GRAGP_UNUSED_NODE_REMOVE_PASS_H diff --git a/mindspore/lite/tools/converter/model_parser.h b/mindspore/lite/tools/converter/model_parser.h index 3c223abe7f..d7eb1c4163 100644 --- a/mindspore/lite/tools/converter/model_parser.h +++ b/mindspore/lite/tools/converter/model_parser.h @@ -20,9 +20,11 @@ #include <string> #include <memory> #include "schema/inner/model_generated.h" -#include "tools/anf_importer/import_from_meta_graphT.h" #include "ir/anf.h" +#include "ir/func_graph.h" #include "tools/converter/converter_context.h" +#include "tools/converter/converter_flags.h" +#include "tools/converter/quant_param_holder.h" namespace mindspore::lite { using namespace schema; @@ -33,38 +35,7 @@ class ModelParser { virtual ~ModelParser() = default; virtual FuncGraphPtr Parse(const std::string &model_file, const std::string &weight_file, - const QuantType &quant_type) { - auto *meta_graph = ParseToFb(model_file, weight_file, quant_type); - if (meta_graph == nullptr) { - MS_LOG(ERROR) << "parse model to fb failed"; - return nullptr; - } - auto func_graph = this->Fb2Anf(meta_graph); - delete (meta_graph); - return func_graph; - } - - protected: - virtual schema::MetaGraphT *ParseToFb(const std::string &model_file, const std::string &weight_file, - const QuantType &quant_type = QuantType_QUANT_NONE) = 0; - - public: - static FuncGraphPtr Fb2Anf(schema::MetaGraphT *meta_graph) { - if (meta_graph == nullptr) { - MS_LOG(ERROR) << "meta_graph is null"; - ReturnCode::GetSingleReturnCode()->UpdateReturnCode(RET_NULL_PTR); - return nullptr; - } - auto func_graph = std::make_shared<FuncGraph>(); - AnfImporterFromMetaGraphT importer(meta_graph, func_graph); - auto status = importer.Import(); - if (RET_OK != status) { - MS_LOG(ERROR) << "Import anf_graph from meta_graphT failed, ret: " << status; - ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status); - return nullptr; - } - return func_graph; - } + const QuantType &quant_type) = 0; }; } // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/ops/enter.h b/mindspore/lite/tools/converter/ops/enter.h index 6a7d58749a..9290b9c40d 100644 --- a/mindspore/lite/tools/converter/ops/enter.h +++ b/mindspore/lite/tools/converter/ops/enter.h @@ -14,25 +14,26 @@ * limitations under the License. */ -#ifndef LITE_MINDSPORE_LITE_C_OPS_ENTER_H_ -#define LITE_MINDSPORE_LITE_C_OPS_ENTER_H_ +#ifndef LITE_MINDSPORE_LITE_TOOLS_CONVERTER_OPS_ENTER_H_ +#define LITE_MINDSPORE_LITE_TOOLS_CONVERTER_OPS_ENTER_H_ #include <vector> #include <set> #include <cmath> -#include "src/ops/primitive_c.h" +#include "ops/primitive_c.h" + +using mindspore::ops::PrimitiveC; namespace mindspore { namespace lite { - +constexpr auto kNameEnter = "Enter"; class Enter : public PrimitiveC { public: - Enter() { op_type_ = ConverterPrimitiveType_Enter; } + Enter() : PrimitiveC(kNameEnter) {} ~Enter() = default; MS_DECLARE_PARENT(Enter, PrimitiveC); - explicit Enter(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} }; } // namespace lite } // namespace mindspore -#endif // LITE_MINDSPORE_LITE_C_OPS_ENTER_H_ +#endif // LITE_MINDSPORE_LITE_TOOLS_CONVERTER_OPS_ENTER_H_ diff --git a/mindspore/lite/tools/converter/ops/exit.h b/mindspore/lite/tools/converter/ops/exit.h index 1b0b497ccc..5e0c431d83 100644 --- a/mindspore/lite/tools/converter/ops/exit.h +++ b/mindspore/lite/tools/converter/ops/exit.h @@ -14,25 +14,26 @@ * limitations under the License. */ -#ifndef LITE_MINDSPORE_LITE_C_OPS_EXIT_H_ -#define LITE_MINDSPORE_LITE_C_OPS_EXIT_H_ +#ifndef LITE_MINDSPORE_LITE_TOOLS_CONVERTER_OPS_EXIT_H_ +#define LITE_MINDSPORE_LITE_TOOLS_CONVERTER_OPS_EXIT_H_ #include <vector> #include <set> #include <cmath> -#include "src/ops/primitive_c.h" +#include "ops/primitive_c.h" + +using mindspore::ops::PrimitiveC; namespace mindspore { namespace lite { - +constexpr auto kNameExit = "Exit"; class Exit : public PrimitiveC { public: - Exit() { op_type_ = ConverterPrimitiveType_Exit; } + Exit() : PrimitiveC(kNameExit) {} ~Exit() = default; MS_DECLARE_PARENT(Exit, PrimitiveC); - explicit Exit(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} }; } // namespace lite } // namespace mindspore -#endif // LITE_MINDSPORE_LITE_C_OPS_EXIT_H_ +#endif // LITE_MINDSPORE_LITE_TOOLS_CONVERTER_OPS_EXIT_H_ diff --git a/mindspore/lite/tools/converter/ops/loop_cond.h b/mindspore/lite/tools/converter/ops/loop_cond.h index 9ac8617cd3..25cbc4d096 100644 --- a/mindspore/lite/tools/converter/ops/loop_cond.h +++ b/mindspore/lite/tools/converter/ops/loop_cond.h @@ -14,25 +14,26 @@ * limitations under the License. */ -#ifndef LITE_MINDSPORE_LITE_C_OPS_LOOPCOND_H_ -#define LITE_MINDSPORE_LITE_C_OPS_LOOPCOND_H_ +#ifndef LITE_MINDSPORE_LITE_TOOLS_CONVERTER_OPS_LOOP_COND_H_ +#define LITE_MINDSPORE_LITE_TOOLS_CONVERTER_OPS_LOOP_COND_H_ #include <vector> #include <set> #include <cmath> -#include "src/ops/primitive_c.h" +#include "ops/primitive_c.h" + +using mindspore::ops::PrimitiveC; namespace mindspore { namespace lite { - +constexpr auto kNameLoopCond = "LoopCond"; class LoopCond : public PrimitiveC { public: - LoopCond() { op_type_ = ConverterPrimitiveType_LoopCond; } + LoopCond() : PrimitiveC(kNameLoopCond) {} ~LoopCond() = default; MS_DECLARE_PARENT(LoopCond, PrimitiveC); - explicit LoopCond(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} }; } // namespace lite } // namespace mindspore -#endif // LITE_MINDSPORE_LITE_C_OPS_LOOPCOND_H_ +#endif // LITE_MINDSPORE_LITE_TOOLS_CONVERTER_OPS_LOOP_COND_H_ diff --git a/mindspore/lite/tools/converter/ops/next_iteration.h b/mindspore/lite/tools/converter/ops/next_iteration.h index dc9ec18656..b98a8601b8 100644 --- a/mindspore/lite/tools/converter/ops/next_iteration.h +++ b/mindspore/lite/tools/converter/ops/next_iteration.h @@ -14,25 +14,26 @@ * limitations under the License. */ -#ifndef LITE_MINDSPORE_LITE_C_OPS_NEXTITERATION_H_ -#define LITE_MINDSPORE_LITE_C_OPS_NEXTITERATION_H_ +#ifndef LITE_MINDSPORE_LITE_TOOLS_CONVERTER_OPS_NEXT_ITERATION_H_ +#define LITE_MINDSPORE_LITE_TOOLS_CONVERTER_OPS_NEXT_ITERATION_H_ #include <vector> #include <set> #include <cmath> -#include "src/ops/primitive_c.h" +#include "ops/primitive_c.h" + +using mindspore::ops::PrimitiveC; namespace mindspore { namespace lite { - +constexpr auto kNameNextIteration = "NextIteration"; class NextIteration : public PrimitiveC { public: - NextIteration() { op_type_ = ConverterPrimitiveType_NextIteration; } + NextIteration() : PrimitiveC(kNameNextIteration) {} ~NextIteration() = default; MS_DECLARE_PARENT(NextIteration, PrimitiveC); - explicit NextIteration(schema::PrimitiveT *primitive) : PrimitiveC(primitive) {} }; } // namespace lite } // namespace mindspore -#endif // LITE_MINDSPORE_LITE_C_OPS_NEXTITERATION_H_ +#endif // LITE_MINDSPORE_LITE_TOOLS_CONVERTER_OPS_NEXT_ITERATION_H_ diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_activation_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_activation_parser.cc new file mode 100644 index 0000000000..ad802be17e --- /dev/null +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_activation_parser.cc @@ -0,0 +1,68 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tools/converter/parser/caffe/caffe_activation_parser.h" +#include <memory> +#include "ops/fusion/activation.h" + +namespace mindspore { +namespace lite { +ops::PrimitiveC *CaffeReluParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Activation>(); + + prim->set_activation_type(mindspore::ActivationType::RELU); + + if (proto.has_relu_param() && proto.relu_param().has_negative_slope()) { + float negative_slope = proto.relu_param().negative_slope(); + if (negative_slope != 0) { + prim->set_activation_type(mindspore::ActivationType::LEAKY_RELU); + prim->set_alpha(negative_slope); + } + } + + return prim.release(); +} + +ops::PrimitiveC *CaffeRelu6Parser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Activation>(); + + prim->set_activation_type(mindspore::ActivationType::RELU6); + + return prim.release(); +} + +ops::PrimitiveC *CaffeSigmoidParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Activation>(); + + prim->set_activation_type(mindspore::ActivationType::SIGMOID); + + return prim.release(); +} + +ops::PrimitiveC *CaffeTanhParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Activation>(); + + prim->set_activation_type(mindspore::ActivationType::TANH); + + return prim.release(); +} + +CaffeNodeRegistrar g_caffeReluParser("ReLU", new CaffeReluParser()); +CaffeNodeRegistrar g_caffeRelu6Parser("ReLU6", new CaffeRelu6Parser()); +CaffeNodeRegistrar g_caffeSigmoidParser("Sigmoid", new CaffeSigmoidParser()); +CaffeNodeRegistrar g_caffeTanhParser("TanH", new CaffeTanhParser()); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_activation_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_activation_parser.h new file mode 100644 index 0000000000..094a74e226 --- /dev/null +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_activation_parser.h @@ -0,0 +1,60 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ACTIVATION_PARSER_H_ +#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ACTIVATION_PARSER_H_ + +#include <vector> +#include "tools/converter/parser/caffe/caffe_node_parser.h" +#include "tools/converter/parser/caffe/caffe_node_parser_registry.h" + +namespace mindspore { +namespace lite { +class CaffeReluParser : public CaffeNodeParser { + public: + CaffeReluParser() : CaffeNodeParser("relu") {} + ~CaffeReluParser() override = default; + + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; +}; + +class CaffeRelu6Parser : public CaffeNodeParser { + public: + CaffeRelu6Parser() : CaffeNodeParser("relu6") {} + ~CaffeRelu6Parser() override = default; + + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; +}; + +class CaffeSigmoidParser : public CaffeNodeParser { + public: + CaffeSigmoidParser() : CaffeNodeParser("sigmoid") {} + ~CaffeSigmoidParser() override = default; + + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; +}; + +class CaffeTanhParser : public CaffeNodeParser { + public: + CaffeTanhParser() : CaffeNodeParser("tanh") {} + ~CaffeTanhParser() override = default; + + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; +}; +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ACTIVATION_PARSER_H_ diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.cc index b5b9501700..f9d8748f4c 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.cc @@ -16,41 +16,29 @@ #include "tools/converter/parser/caffe/caffe_argmax_parser.h" #include <memory> +#include "ops/fusion/arg_max_fusion.h" namespace mindspore { namespace lite { -lite::PrimitiveC *CaffeArgMaxParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::ArgMaxT> attr = std::make_unique<schema::ArgMaxT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffeArgMaxParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::ArgMaxFusion>(); + + prim->set_keep_dims(true); + prim->set_out_max_value(false); + prim->set_top_k(1); - attr->outMaxValue = false; - attr->topK = 1; const caffe::ArgMaxParameter &argmaxParam = proto.argmax_param(); if (argmaxParam.has_out_max_val()) { - attr->outMaxValue = argmaxParam.out_max_val(); + prim->set_out_max_value(argmaxParam.out_max_val()); } if (argmaxParam.has_top_k()) { - attr->topK = argmaxParam.top_k(); + prim->set_top_k(argmaxParam.top_k()); } - int32_t axisType = 0; - int32_t axis = 0; - if (!argmaxParam.has_axis()) { - axisType = 2; - } else { - axisType = 1; - axis = (int64_t)argmaxParam.axis(); + if (argmaxParam.has_axis()) { + prim->set_axis(argmaxParam.axis()); } - attr->axis = axis; - attr->axisType = axisType; - attr->keepDims = true; - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_ArgMax; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } CaffeNodeRegistrar g_caffeArgMaxParser("ArgMax", new CaffeArgMaxParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.h index 590c7f73e0..317721d472 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_argmax_parser.h @@ -18,7 +18,6 @@ #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_ARGMAX_PARSER_H_ #include <vector> -#include "src/ops/primitive_c.h" #include "tools/converter/parser/caffe/caffe_node_parser.h" #include "tools/converter/parser/caffe/caffe_node_parser_registry.h" @@ -29,8 +28,7 @@ class CaffeArgMaxParser : public CaffeNodeParser { CaffeArgMaxParser() : CaffeNodeParser("argmax") {} ~CaffeArgMaxParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc index 65b9377045..fd3678191f 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.cc @@ -18,18 +18,16 @@ #include <cmath> #include <memory> #include "tools/common/tensor_util.h" +#include "ops/batch_norm.h" namespace mindspore { namespace lite { using STATUS = int; +ops::PrimitiveC *CaffeBatchNormParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::BatchNorm>(); -PrimitiveC *CaffeBatchNormParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::BatchNormT> attr = std::make_unique<schema::BatchNormT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } + prim->set_is_training(false); + prim->set_format(mindspore::NCHW); const caffe::BatchNormParameter &batchNormParam = proto.batch_norm_param(); if (proto.bottom_size() != 1) { @@ -43,21 +41,13 @@ PrimitiveC *CaffeBatchNormParser::ParseLitePrimitive(const caffe::LayerParameter return nullptr; } - if (batchNormParam.has_eps()) { - if (std::fabs(1e-5 - batchNormParam.eps()) < 1e-9) { - attr->epsilon = 1e-5; - } else { - auto tmpAuto = batchNormParam.eps(); - attr->epsilon = tmpAuto; - } - } else { - attr->epsilon = 1e-5; + float epsilon = 1e-5; + if (batchNormParam.has_eps() && std::fabs(1e-5 - batchNormParam.eps()) >= 1e-9) { + epsilon = batchNormParam.eps(); } + prim->set_epsilon(epsilon); - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_BatchNorm; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } CaffeNodeRegistrar g_caffeBatchNormParser("BatchNorm", new CaffeBatchNormParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h index c82487b6e0..f66ed322ff 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_batchnorm_parser.h @@ -18,7 +18,6 @@ #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_BATCHNORM_PARSER_H_ #include <vector> -#include "src/ops/primitive_c.h" #include "tools/converter/parser/caffe/caffe_node_parser.h" #include "tools/converter/parser/caffe/caffe_node_parser_registry.h" @@ -29,7 +28,7 @@ class CaffeBatchNormParser : public CaffeNodeParser { CaffeBatchNormParser() : CaffeNodeParser("batchnorm") {} ~CaffeBatchNormParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc index 3201b81333..4582425231 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.cc @@ -16,16 +16,12 @@ #include "tools/converter/parser/caffe/caffe_concat_parser.h" #include <memory> +#include "ops/concat.h" namespace mindspore { namespace lite { -PrimitiveC *CaffeConcatParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::ConcatT> attr = std::make_unique<schema::ConcatT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffeConcatParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Concat>(); const caffe::ConcatParameter &concatParam = proto.concat_param(); if (concatParam.has_axis() && concatParam.has_concat_dim()) { @@ -33,28 +29,21 @@ PrimitiveC *CaffeConcatParser::ParseLitePrimitive(const caffe::LayerParameter &p return nullptr; } + int64_t axis = 1; if (concatParam.has_concat_dim()) { MS_LOG(DEBUG) << "Concat dim , set axis: " << concatParam.concat_dim(); - auto concat_dim_value = (int32_t)concatParam.concat_dim(); - if (concat_dim_value < 0) { - MS_LOG(ERROR) << "concat_dim value in model is smaller than 0:" << concat_dim_value; + axis = concatParam.concat_dim(); + if (axis < 0) { + MS_LOG(ERROR) << "concat_dim value in model is smaller than 0:" << axis; return nullptr; } - attr->axis = concat_dim_value; } else if (concatParam.has_axis()) { MS_LOG(DEBUG) << "set axis: " << concatParam.axis(); - auto tmpInt = (int32_t)concatParam.axis(); - attr->axis = tmpInt; - } else { - MS_LOG(DEBUG) << "by default, set axis = 1"; - attr->axis = 1; + axis = concatParam.axis(); } - attr->n = proto.bottom_size(); + prim->set_axis(axis); - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Concat; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } CaffeNodeRegistrar g_caffeConcatParser("Concat", new CaffeConcatParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.h index 769b3eddb2..eef3caee0c 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_concat_parser.h @@ -21,14 +21,15 @@ #include "tools/converter/parser/caffe/caffe_node_parser.h" #include "tools/converter/parser/caffe/caffe_node_parser_registry.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { class CaffeConcatParser : public CaffeNodeParser { public: CaffeConcatParser() : CaffeNodeParser("concat") {} ~CaffeConcatParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; -} // namespace mindspore::lite - +} // namespace lite +} // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_CONCAT_PARSER_H_ diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_converter.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_converter.cc deleted file mode 100644 index a63d5602f4..0000000000 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_converter.cc +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/parser/caffe/caffe_converter.h" -#include "tools/converter/parser/caffe/caffe_model_parser.h" - -namespace mindspore::lite { -CaffeConverter::CaffeConverter() { modelParser = new CaffeModelParser(); } -} // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_converter.h b/mindspore/lite/tools/converter/parser/caffe/caffe_converter.h index 0c0367b32c..11b823609e 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_converter.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_converter.h @@ -21,13 +21,20 @@ #include <memory> #include "tools/converter/converter.h" #include "tools/converter/graphdef_transform.h" +#include "tools/converter/parser/caffe/caffe_model_parser.h" namespace mindspore::lite { class CaffeConverter : public Converter { public: - CaffeConverter(); + CaffeConverter() = default; ~CaffeConverter() override = default; + + FuncGraphPtr BuildFuncGraph(const std::string &model_file, const std::string &weight_file, + schema::QuantType quant_type) override { + CaffeModelParser parser; + return parser.Parse(model_file, weight_file, quant_type); + } }; } // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc index 0edbeb208f..f21d4ead15 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.cc @@ -16,122 +16,73 @@ #include "tools/converter/parser/caffe/caffe_convolution_parser.h" #include <memory> +#include "ops/fusion/conv2d_fusion.h" namespace mindspore { namespace lite { -STATUS CaffeConvolutionParser::ParseDepthwiseConvolution(schema::PrimitiveT *primitiveT, schema::Conv2DT *attr) { - if (attr->group == 1 || attr->group != attr->channelOut) { - return RET_OK; - } - std::unique_ptr<schema::DepthwiseConv2DT> depthwiseConv2DParam = std::make_unique<schema::DepthwiseConv2DT>(); - if (depthwiseConv2DParam == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_ERROR; - } - - depthwiseConv2DParam->format = attr->format; - depthwiseConv2DParam->channelIn = attr->channelIn; - depthwiseConv2DParam->channelMultiplier = attr->channelOut / attr->channelIn; - depthwiseConv2DParam->kernelW = attr->kernelW; - depthwiseConv2DParam->kernelH = attr->kernelH; - depthwiseConv2DParam->strideW = attr->strideW; - depthwiseConv2DParam->strideH = attr->strideH; - depthwiseConv2DParam->padMode = attr->padMode; - depthwiseConv2DParam->padUp = attr->padUp; - depthwiseConv2DParam->padDown = attr->padDown; - depthwiseConv2DParam->padLeft = attr->padLeft; - depthwiseConv2DParam->padRight = attr->padRight; - depthwiseConv2DParam->dilateW = attr->dilateW; - depthwiseConv2DParam->dilateH = attr->dilateH; - depthwiseConv2DParam->activationType = attr->activationType; - delete attr; - primitiveT->value.type = schema::PrimitiveType_DepthwiseConv2D; - primitiveT->value.value = depthwiseConv2DParam.release(); - return RET_OK; -} - -PrimitiveC *CaffeConvolutionParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - auto attr = std::make_unique<schema::Conv2DT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return nullptr; - } +ops::PrimitiveC *CaffeConvolutionParser::Parse(const caffe::LayerParameter &proto, + const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Conv2DFusion>(); - attr->format = schema::Format_NCHW; + prim->set_pad({0, 0, 0, 0}); + prim->set_pad_mode(mindspore::PadMode::PAD); + prim->set_format(mindspore::Format::NCHW); + prim->set_activation_type(mindspore::NO_ACTIVATION); const caffe::ConvolutionParameter &convParam = proto.convolution_param(); - // parse pad - std::vector<int64_t> pad(4, 0); - auto status = CaffeConvBaseParser::ParsePads(convParam, &pad); - if (status != RET_OK) { - MS_LOG(ERROR) << "ParsePads for " << proto.name().c_str() << " failed"; + // parse kernel + std::vector<int64_t> kernel(2, 0); + if (CaffeConvBaseParser::ParseKernels(convParam, &kernel) != RET_OK) { return nullptr; } - attr->padUp = pad[0]; - attr->padDown = pad[1]; - attr->padLeft = pad[2]; - attr->padRight = pad[3]; + prim->set_kernel_size(kernel); // parse stride std::vector<int64_t> stride(2, 0); - status = CaffeConvBaseParser::ParseStrides(convParam, &stride); - if (status != RET_OK) { - MS_LOG(ERROR) << "ParseStrides for " << proto.name().c_str() << " failed"; + if (CaffeConvBaseParser::ParseStrides(convParam, &stride) != RET_OK) { return nullptr; } - attr->strideH = stride[0]; - attr->strideW = stride[1]; + prim->set_stride(stride); // parse dilation std::vector<int64_t> dilation(2, 0); - status = CaffeConvBaseParser::ParseDilations(convParam, &dilation); - if (status != RET_OK) { - MS_LOG(ERROR) << "ParseDilations for " << proto.name().c_str() << " failed"; + if (CaffeConvBaseParser::ParseDilations(convParam, &dilation) != RET_OK) { return nullptr; } - attr->dilateH = dilation[0]; - attr->dilateW = dilation[1]; + prim->set_dilation(dilation); - // parse kernel - std::vector<int64_t> kernel(2, 0); - status = CaffeConvBaseParser::ParseKernels(convParam, &kernel); - if (status != RET_OK) { - MS_LOG(ERROR) << "ParseKernels for " << proto.name().c_str() << " failed"; + // parse pad + std::vector<int64_t> pad(4, 0); + if (CaffeConvBaseParser::ParsePads(convParam, &pad) != RET_OK) { return nullptr; } - attr->kernelH = kernel[0]; - attr->kernelW = kernel[1]; + prim->set_pad_list(pad); - attr->group = CaffeConvBaseParser::ParseGroup(convParam, proto.type()); - auto ret = CaffeConvBaseParser::ParseChannelOut(convParam, &(attr->channelOut)); - if (ret != RET_OK) { - MS_LOG(ERROR) << "conv channel out failed"; + // parse channelOut + int channel_out = 0; + if (CaffeConvBaseParser::ParseChannelOut(convParam, &channel_out) != RET_OK) { return nullptr; } + prim->set_out_channel(channel_out); + + // parse group + auto group = CaffeConvBaseParser::ParseGroup(convParam, proto.type()); + prim->set_group(group); + + // parse channelIn if (weight.blobs_size() < 1) { MS_LOG(ERROR) << "conv weight blob is empty"; return nullptr; } auto &weightBlob = weight.blobs(0); - if (weightBlob.has_shape()) { - attr->channelIn = weightBlob.shape().dim(1) * attr->group; - } else { - attr->channelIn = weightBlob.channels() * attr->group; - } - attr->padMode = schema::PadMode_CAFFE; + auto channelIn = weightBlob.has_shape() ? weightBlob.shape().dim(1) * group : weightBlob.channels() * group; + prim->set_in_channel(channelIn); - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Conv2D; - primitive->value.value = attr.release(); - - status = ParseDepthwiseConvolution(primitive.get(), static_cast<schema::Conv2DT *>(primitive->value.value)); - if (status != RET_OK) { - MS_LOG(ERROR) << "Parse depthwise convolution failed"; - return nullptr; + if (group != 1 || group != channel_out) { + prim->AddAttr(ops::kIsDepthWise, MakeValue<bool>(true)); } - return PrimitiveC::Create(primitive.release()); + return prim.release(); } CaffeNodeRegistrar g_caffeConvolutionParser("Convolution", new CaffeConvolutionParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h index dd104f99b6..51a2066d4a 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_convolution_parser.h @@ -29,10 +29,7 @@ class CaffeConvolutionParser : public CaffeNodeParser { CaffeConvolutionParser() : CaffeNodeParser("convolution") {} ~CaffeConvolutionParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; - - private: - static STATUS ParseDepthwiseConvolution(schema::PrimitiveT *primitiveT, schema::Conv2DT *attr); + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc index 53962956e7..d890d7c49a 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.cc @@ -16,30 +16,26 @@ #include "tools/converter/parser/caffe/caffe_crop_parser.h" #include <memory> +#include "ops/crop.h" namespace mindspore { namespace lite { -PrimitiveC *CaffeCropParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::CropT> attr = std::make_unique<schema::CropT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffeCropParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Crop>(); if (!proto.has_crop_param()) { - attr->axis = 2; + prim->set_axis(2); std::vector<int64_t> offsets(2, 0); - attr->offsets = offsets; + prim->set_offsets(offsets); } else { const caffe::CropParameter &cropParam = proto.crop_param(); if (cropParam.has_axis()) { if (cropParam.axis() == -1) { MS_LOG(WARNING) << "axis with -1 may lead to calculation errors when input less than 4 dims."; } - attr->axis = cropParam.axis(); + prim->set_axis(cropParam.axis()); } else { - attr->axis = 2; + prim->set_axis(2); } if (cropParam.offset_size() != 0) { @@ -48,13 +44,11 @@ PrimitiveC *CaffeCropParser::ParseLitePrimitive(const caffe::LayerParameter &pro for (int i = 0; i < cropParam.offset_size(); i++) { offsets.push_back(cropParam.offset(i)); } - attr->offsets = offsets; + prim->set_offsets(offsets); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Crop; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } CaffeNodeRegistrar g_caffeCropParser("Crop", new CaffeCropParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.h index 69194ec13b..e8667940be 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_crop_parser.h @@ -28,7 +28,7 @@ class CaffeCropParser : public CaffeNodeParser { CaffeCropParser() : CaffeNodeParser("crop") {} ~CaffeCropParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc index cbe36d3087..36fcc287e5 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.cc @@ -16,115 +16,73 @@ #include "tools/converter/parser/caffe/caffe_deconvolution_parser.h" #include <memory> +#include "ops/fusion/conv2d_transpose_fusion.h" namespace mindspore { namespace lite { -STATUS CaffeDeconvolutionParser::ParseGroupDeconvolution(schema::PrimitiveT *primitive, schema::DeConv2DT *attr) { - if (attr->group == 1) { - return RET_OK; - } - - std::unique_ptr<schema::DeDepthwiseConv2DT> deDepthwiseConv2DParam = std::make_unique<schema::DeDepthwiseConv2DT>(); - if (deDepthwiseConv2DParam == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_ERROR; - } - deDepthwiseConv2DParam->format = attr->format; - deDepthwiseConv2DParam->channelIn = attr->channelOut; - deDepthwiseConv2DParam->channelMultiplier = attr->channelIn / attr->channelOut; - deDepthwiseConv2DParam->kernelW = attr->kernelW; - deDepthwiseConv2DParam->kernelH = attr->kernelH; - deDepthwiseConv2DParam->strideW = attr->strideW; - deDepthwiseConv2DParam->strideH = attr->strideH; - deDepthwiseConv2DParam->padMode = attr->padMode; - deDepthwiseConv2DParam->padUp = attr->padUp; - deDepthwiseConv2DParam->padDown = attr->padDown; - deDepthwiseConv2DParam->padLeft = attr->padLeft; - deDepthwiseConv2DParam->padRight = attr->padRight; - deDepthwiseConv2DParam->dilateW = attr->dilateW; - deDepthwiseConv2DParam->dilateH = attr->dilateH; - deDepthwiseConv2DParam->activationType = attr->activationType; - delete attr; - primitive->value.type = schema::PrimitiveType_DeDepthwiseConv2D; - primitive->value.value = deDepthwiseConv2DParam.release(); - return RET_OK; -} - -PrimitiveC *CaffeDeconvolutionParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::DeConv2DT> attr(new (std::nothrow) schema::DeConv2DT()); +ops::PrimitiveC *CaffeDeconvolutionParser::Parse(const caffe::LayerParameter &proto, + const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Conv2dTransposeFusion>(); - attr->format = schema::Format::Format_NCHW; + prim->set_pad({0, 0, 0, 0}); + prim->set_format(mindspore::Format::NCHW); + prim->set_pad_mode(mindspore::PadMode::PAD); const caffe::ConvolutionParameter &convParam = proto.convolution_param(); // parse pad std::vector<int64_t> pad(4, 0); - auto status = CaffeConvBaseParser::ParsePads(convParam, &pad); - if (status != RET_OK) { - MS_LOG(ERROR) << "ParsePads for " << proto.name().c_str() << " failed"; + if (CaffeConvBaseParser::ParsePads(convParam, &pad) != RET_OK) { return nullptr; } - attr->padUp = pad[0]; - attr->padDown = pad[1]; - attr->padLeft = pad[2]; - attr->padRight = pad[3]; + prim->set_pad_list({pad[0], pad[1], pad[2], pad[3]}); // parse stride std::vector<int64_t> stride(2, 0); - status = CaffeConvBaseParser::ParseStrides(convParam, &stride); - if (status != RET_OK) { - MS_LOG(ERROR) << "ParseStrides for " << proto.name().c_str() << " failed"; + if (CaffeConvBaseParser::ParseStrides(convParam, &stride) != RET_OK) { return nullptr; } - attr->strideH = stride[0]; - attr->strideW = stride[1]; + prim->set_stride({stride[0], stride[1]}); // parse dilation std::vector<int64_t> dilation(2, 0); - status = CaffeConvBaseParser::ParseDilations(convParam, &dilation); - if (status != RET_OK) { - MS_LOG(ERROR) << "ParseDilations for " << proto.name().c_str() << " failed"; + if (CaffeConvBaseParser::ParseDilations(convParam, &dilation) != RET_OK) { return nullptr; } - attr->dilateH = dilation[0]; - attr->dilateW = dilation[1]; + prim->set_dilation({dilation[0], dilation[1]}); // parse kernel std::vector<int64_t> kernel(2, 0); - status = CaffeConvBaseParser::ParseKernels(convParam, &kernel); - if (status != RET_OK) { - MS_LOG(ERROR) << "ParseKernels for " << proto.name().c_str() << " failed"; + if (CaffeConvBaseParser::ParseKernels(convParam, &kernel) != RET_OK) { return nullptr; } - attr->kernelH = kernel[0]; - attr->kernelW = kernel[1]; + prim->set_kernel_size({kernel[0], kernel[1]}); - attr->group = CaffeConvBaseParser::ParseGroup(convParam, proto.type()); - auto ret = CaffeConvBaseParser::ParseChannelOut(convParam, &(attr->channelOut)); - if (ret != RET_OK) { - MS_LOG(ERROR) << "deconv channel get failed"; + // parse group + auto group = CaffeConvBaseParser::ParseGroup(convParam, proto.type()); + prim->set_group(group); + + // parse channelOut + int32_t channelOut; + if (CaffeConvBaseParser::ParseChannelOut(convParam, &channelOut) != RET_OK) { return nullptr; } + prim->set_out_channel((int64_t)channelOut); + + // parse channelIN auto &weightBlob = weight.blobs(0); if (weightBlob.has_shape()) { - if (attr->group == 1) - attr->channelIn = weightBlob.shape().dim(0) * attr->group; + if (group == 1) + prim->set_in_channel(weightBlob.shape().dim(0) * group); else - attr->channelIn = weightBlob.shape().dim(1) * attr->group; + prim->set_in_channel(weightBlob.shape().dim(1) * group); } else { - attr->channelIn = weightBlob.num() * attr->group; + prim->set_in_channel(weightBlob.num() * group); } - attr->padMode = schema::PadMode_CAFFE; - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_DeConv2D; - primitive->value.value = attr.release(); - - status = ParseGroupDeconvolution(primitive.get(), primitive->value.AsDeConv2D()); - if (status != RET_OK) { - MS_LOG(ERROR) << "Parse group deconvolution failed"; - return nullptr; + if (group != 1) { + prim->AddAttr(ops::kIsDepthWise, MakeValue<bool>(true)); } - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } CaffeNodeRegistrar g_caffeDeconvolutionParser("Deconvolution", new CaffeDeconvolutionParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.h index 53136419df..2d9c88a4a5 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_deconvolution_parser.h @@ -29,10 +29,7 @@ class CaffeDeconvolutionParser : public CaffeNodeParser { CaffeDeconvolutionParser() : CaffeNodeParser("deconvolution") {} ~CaffeDeconvolutionParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; - - private: - static STATUS ParseGroupDeconvolution(schema::PrimitiveT *primitive, schema::DeConv2DT *attr); + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc index bb37506265..0290f22119 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.cc @@ -17,16 +17,12 @@ #include "tools/converter/parser/caffe/caffe_eltwise_parser.h" #include <cmath> #include <memory> +#include "ops/eltwise.h" namespace mindspore { namespace lite { -PrimitiveC *CaffeEltwiseParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::EltwiseT> attr = std::make_unique<schema::EltwiseT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffeEltwiseParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Eltwise>(); if (proto.bottom_size() < 2) { MS_LOG(ERROR) << "Eltwise Op " << proto.name() << " need at least 2 inputs,but input size is " @@ -55,25 +51,23 @@ PrimitiveC *CaffeEltwiseParser::ParseLitePrimitive(const caffe::LayerParameter & if (proto.has_eltwise_param() && eltwiseParam.has_operation()) { switch (eltwiseParam.operation()) { case caffe::EltwiseParameter::PROD: - attr->mode = schema::EltwiseMode_PROD; + prim->set_mode(mindspore::EltwiseMode::PROD); break; case caffe::EltwiseParameter::SUM: - attr->mode = schema::EltwiseMode_SUM; + prim->set_mode(mindspore::EltwiseMode::SUM); break; case caffe::EltwiseParameter::MAX: - attr->mode = schema::EltwiseMode_MAXIMUM; + prim->set_mode(mindspore::EltwiseMode::MAXIMUM); break; default: - MS_LOG(ERROR) << "Eltwise parse params fail, unsupported opration: " << eltwiseParam.operation(); + MS_LOG(ERROR) << "Eltwise parse params fail, unsupported operation: " << eltwiseParam.operation(); return nullptr; } } else { - attr->mode = schema::EltwiseMode_SUM; + prim->set_mode(mindspore::EltwiseMode::SUM); } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Eltwise; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } CaffeNodeRegistrar g_caffeEltwiseParser("Eltwise", new CaffeEltwiseParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h index 126aa921d9..13ededeeb8 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_eltwise_parser.h @@ -28,7 +28,7 @@ class CaffeEltwiseParser : public CaffeNodeParser { CaffeEltwiseParser() : CaffeNodeParser("eltwise") {} ~CaffeEltwiseParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.cc index d7ab4d5ee6..5e53728916 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.cc @@ -16,27 +16,21 @@ #include "tools/converter/parser/caffe/caffe_elu_parser.h" #include <memory> +#include "ops/elu.h" namespace mindspore { namespace lite { -PrimitiveC *CaffeEluParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::EluT> attr = std::make_unique<schema::EluT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffeEluParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Elu>(); if (proto.has_elu_param()) { const caffe::ELUParameter &eluParameter = proto.elu_param(); if (eluParameter.has_alpha()) { - attr->alpha = eluParameter.alpha(); + prim->set_alpha(eluParameter.alpha()); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Elu; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } CaffeNodeRegistrar g_caffeEluParser("ELU", new CaffeEluParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.h index d9757c4ac3..306d47f654 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_elu_parser.h @@ -28,7 +28,7 @@ class CaffeEluParser : public CaffeNodeParser { CaffeEluParser() : CaffeNodeParser("elu") {} ~CaffeEluParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.cc index c0cf5c8ff8..1ae3a8625b 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.cc @@ -17,37 +17,31 @@ #include "tools/converter/parser/caffe/caffe_exp_parser.h" #include <memory> #include <vector> +#include "ops/fusion/exp_fusion.h" namespace mindspore { namespace lite { -PrimitiveC *CaffeExpParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::ExpT> attr = std::make_unique<schema::ExpT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffeExpParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::ExpFusion>(); const caffe::ExpParameter &exp_param = proto.exp_param(); if (exp_param.has_base()) { - attr->base = exp_param.base(); + prim->set_base(exp_param.base()); } else { - attr->base = -1; // -1 represent base = e + prim->set_base(-1); // -1 represent base = e } if (exp_param.has_scale()) { - attr->scale = exp_param.scale(); + prim->set_scale(exp_param.scale()); } else { - attr->scale = 1; + prim->set_scale(1); } if (exp_param.has_shift()) { - attr->shift = exp_param.shift(); + prim->set_shift(exp_param.shift()); } else { - attr->shift = 0; + prim->set_shift(0); } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Exp; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } CaffeNodeRegistrar g_caffeExpParser("Exp", new CaffeExpParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.h index 9e8ba424bf..c6e649e30e 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_exp_parser.h @@ -28,7 +28,7 @@ class CaffeExpParser : public CaffeNodeParser { CaffeExpParser() : CaffeNodeParser("exp") {} ~CaffeExpParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.cc index 78263fe24a..6d892bed08 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.cc @@ -16,20 +16,14 @@ #include "tools/converter/parser/caffe/caffe_flatten_parser.h" #include <memory> +#include "ops/flatten.h" namespace mindspore { namespace lite { -PrimitiveC *CaffeFlattenParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::FlattenT> attr = std::make_unique<schema::FlattenT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Flatten; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *CaffeFlattenParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Flatten>(); + + return prim.release(); } CaffeNodeRegistrar g_CaffeFlattenParser("Flatten", new CaffeFlattenParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.h index 71f79f6643..93b3d4ea27 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_flatten_parser.h @@ -21,16 +21,14 @@ #include "tools/converter/parser/caffe/caffe_node_parser.h" #include "tools/converter/parser/caffe/caffe_node_parser_registry.h" -namespace mindspore { -namespace lite { +namespace mindspore::lite { class CaffeFlattenParser : public CaffeNodeParser { public: CaffeFlattenParser() : CaffeNodeParser("flatten") {} ~CaffeFlattenParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; -} // namespace lite -} // namespace mindspore +} // namespace mindspore::lite #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_FLATTEN_PARSER_H_ diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.cc index 8ea77c35e7..b8a92308f8 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.cc @@ -16,16 +16,15 @@ #include "tools/converter/parser/caffe/caffe_innerproduct_parser.h" #include <memory> +#include "ops/fusion/full_connection.h" namespace mindspore { namespace lite { -PrimitiveC *CaffeInnerProductParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::FullConnectionT> attr = std::make_unique<schema::FullConnectionT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffeInnerProductParser::Parse(const caffe::LayerParameter &proto, + const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::FullConnection>(); + + prim->set_activation_type(mindspore::ActivationType::NO_ACTIVATION); const caffe::InnerProductParameter &innerProductParam = proto.inner_product_param(); if (!innerProductParam.has_num_output()) { @@ -34,21 +33,17 @@ PrimitiveC *CaffeInnerProductParser::ParseLitePrimitive(const caffe::LayerParame } if (innerProductParam.axis() == 1) { - attr->axis = 1; - attr->useAxis = true; + prim->set_axis(1); + prim->set_use_axis(true); } else { MS_LOG(ERROR) << "InnerProduct Parse axis only support default 1, but actually " << innerProductParam.axis(); return nullptr; } - if (innerProductParam.bias_term()) { - attr->hasBias = true; + prim->set_has_bias(true); } - attr->activationType = schema::ActivationType_NO_ACTIVATION; - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_FullConnection; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } CaffeNodeRegistrar g_caffeInnerProductParser("InnerProduct", new CaffeInnerProductParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.h index 298f81a7d6..c02193a99a 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_innerproduct_parser.h @@ -28,7 +28,7 @@ class CaffeInnerProductParser : public CaffeNodeParser { CaffeInnerProductParser() : CaffeNodeParser("innerproduct") {} ~CaffeInnerProductParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc index afc3420606..9d93b1fa1f 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020-2021 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,16 +16,15 @@ #include "tools/converter/parser/caffe/caffe_interp_parser.h" #include <memory> +#include "ops/resize.h" namespace mindspore { namespace lite { -PrimitiveC *CaffeInterpParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::ResizeT> attr = std::make_unique<schema::ResizeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffeInterpParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Resize>(); + + prim->set_method(mindspore::ResizeMethod::LINEAR); + prim->set_coordinate_transform_mode(mindspore::CoordinateTransformMode::ALIGN_CORNERS); const caffe::InterpParameter &interp_param = proto.interp_param(); if (interp_param.has_height()) { @@ -34,7 +33,7 @@ PrimitiveC *CaffeInterpParser::ParseLitePrimitive(const caffe::LayerParameter &p MS_LOG(ERROR) << "Interp height must be > 0"; return nullptr; } - attr->newHeight = height; + prim->set_new_height(height); } if (interp_param.has_width()) { @@ -43,18 +42,13 @@ PrimitiveC *CaffeInterpParser::ParseLitePrimitive(const caffe::LayerParameter &p MS_LOG(ERROR) << "Interp width must be > 0"; return nullptr; } - attr->newWidth = width; + prim->set_new_width(width); } - attr->method = schema::ResizeMethod_LINEAR; - attr->coordinateTransformMode = schema::CoordinateTransformMode_ALIGN_CORNERS; - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Resize; - primitive->value.value = attr.release(); - auto primitive_c = PrimitiveC::Create(primitive.release()); + if (interp_param.has_zoom_factor()) { - primitive_c->AddAttr("zoom_factor", MakeValue(interp_param.zoom_factor())); + prim->AddAttr("zoom_factor", MakeValue(interp_param.zoom_factor())); } - return primitive_c; + return prim.release(); } CaffeNodeRegistrar g_caffeInterpParser("Interp", new CaffeInterpParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.h index bdaaa170c1..b289b60d96 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_interp_parser.h @@ -28,7 +28,7 @@ class CaffeInterpParser : public CaffeNodeParser { CaffeInterpParser() : CaffeNodeParser("Interp") {} ~CaffeInterpParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc index 94dd65426e..4e624f6eaa 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.cc @@ -17,12 +17,18 @@ #include <vector> #include <iostream> #include <map> +#include <memory> #include <algorithm> #include "tools/converter/parser/caffe/caffe_node_parser_registry.h" #include "tools/converter/parser/caffe/caffe_inspector.h" #include "tools/common/graph_util.h" #include "tools/common/protobuf_utils.h" #include "src/param_value_lite.h" +#include "ops/return.h" +#include "ops/make_tuple.h" +#include "ops/tuple_get_item.h" +#include "ir/func_graph.h" +#include "tools/converter/converter_flags.h" namespace mindspore::lite { bool IsSkipedLayer(const caffe::LayerParameter &layer) { @@ -90,6 +96,7 @@ STATUS CaffeModelParser::ConvertLayers() { } // parse primitive + MS_LOG(INFO) << "parse op : " << layer.type(); auto node_parser = CaffeNodeParserRegistry::GetInstance()->GetNodeParser(layer.type()); if (node_parser == nullptr) { NoSupportOp::GetInstance()->InsertOp(layer.type()); @@ -101,7 +108,7 @@ STATUS CaffeModelParser::ConvertLayers() { continue; } - auto primitive_c = node_parser->ParseLitePrimitive(layer, weight); + auto primitive_c = node_parser->Parse(layer, weight); if (primitive_c == nullptr) { MS_LOG(ERROR) << "parse node " << layer.name() << " failed."; status = RET_ERROR; @@ -125,7 +132,7 @@ STATUS CaffeModelParser::ConvertLayers() { } // build cnode - std::vector<AnfNodePtr> op_inputs = {NewValueNode(std::shared_ptr<lite::PrimitiveC>(primitive_c))}; + std::vector<AnfNodePtr> op_inputs = {NewValueNode(std::shared_ptr<ops::PrimitiveC>(primitive_c))}; op_inputs.insert(op_inputs.end(), input_nodes.begin(), input_nodes.end()); op_inputs.insert(op_inputs.end(), const_parameters.begin(), const_parameters.end()); auto new_cnode = func_graph_ptr_->NewCNode(op_inputs); @@ -244,9 +251,9 @@ STATUS CaffeModelParser::ConvertGraphOutputs() { caffeInspector.InspectModel(caffe_model_); if (caffeInspector.GetGraphOutput().size() > 1) { std::vector<AnfNodePtr> make_tuple_inputs; - auto make_tuple_prim_ptr = GetMakeTuplePrim(); + auto make_tuple_prim_ptr = std::make_shared<ops::MakeTuple>(); if (make_tuple_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetMakeTuplePrim return nullptr"; + MS_LOG(ERROR) << "new MakeTuple failed"; return RET_NULL_PTR; } auto make_tuple_prim = NewValueNode(make_tuple_prim_ptr); @@ -263,9 +270,9 @@ STATUS CaffeModelParser::ConvertGraphOutputs() { make_tuple_cnode->set_fullname_with_scope("return tuple"); std::vector<AnfNodePtr> op_inputs; - auto return_prim_ptr = GetReturnPrim(); + auto return_prim_ptr = std::make_shared<ops::Return>(); if (return_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetReturnPrim return nullptr"; + MS_LOG(ERROR) << "new Return failed"; return RET_NULL_PTR; } auto value_node = NewValueNode(return_prim_ptr); @@ -275,9 +282,9 @@ STATUS CaffeModelParser::ConvertGraphOutputs() { cnode->set_fullname_with_scope("Return"); func_graph_ptr_->set_return(cnode); } else { - auto returnPrim = GetReturnPrim(); + auto returnPrim = std::make_shared<ops::Return>(); if (returnPrim == nullptr) { - MS_LOG(ERROR) << "GetReturnPrim return nullptr"; + MS_LOG(ERROR) << "new Return failed"; return RET_NULL_PTR; } auto valueNode = NewValueNode(returnPrim); @@ -300,23 +307,25 @@ STATUS CaffeModelParser::ConvertGraphOutputs() { } STATUS CaffeModelParser::ConvertLayerQuantParams(const caffe::LayerParameter &layer, - const caffe::LayerParameter &weight, lite::PrimitiveC *primitive_c) { + const caffe::LayerParameter &weight, ops::PrimitiveC *primitive_c) { if (primitive_c == nullptr) { MS_LOG(ERROR) << "primitive_c is null, get quant params failed."; return RET_NULL_PTR; } + auto quant_params_holder = std::make_shared<QuantParamHolder>(); for (auto input_idx : layer.bottom()) { std::vector<schema::QuantParamT> notinited_quant_params(1); - primitive_c->AddInputQuantParam(notinited_quant_params); + quant_params_holder->AddInputQuantParam(notinited_quant_params); } for (auto input_idx : weight.blobs()) { std::vector<schema::QuantParamT> notinited_quant_params(1); - primitive_c->AddInputQuantParam(notinited_quant_params); + quant_params_holder->AddInputQuantParam(notinited_quant_params); } for (auto output_idx : layer.top()) { std::vector<schema::QuantParamT> notinited_quant_params(1); - primitive_c->AddOutputQuantParam(notinited_quant_params); + quant_params_holder->AddOutputQuantParam(notinited_quant_params); } + primitive_c->AddAttr("quant_params", quant_params_holder); return RET_OK; } @@ -424,9 +433,9 @@ STATUS CaffeModelParser::ConvertTop(const caffe::LayerParameter &layer, const CN AbstractBasePtrList abstract_list; for (int i = 0; i < layer.top_size(); i++) { abstract_list.emplace_back(std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector)); - auto tuple_get_item_prim_ptr = GetTupleGetItemPrim(); + auto tuple_get_item_prim_ptr = std::make_shared<ops::TupleGetItem>(); if (tuple_get_item_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetTupleGetItemPrim return nullptr"; + MS_LOG(ERROR) << "new TupleGetItem failed"; return RET_NULL_PTR; } auto tuple_get_item_prim = NewValueNode(tuple_get_item_prim_ptr); @@ -457,10 +466,4 @@ std::string CaffeModelParser::GetOriginLayerName(const std::string &layer_name) } return layer.name(); } - -MetaGraphT *CaffeModelParser::ParseToFb(const std::string &model_file, const std::string &weight_file, - const QuantType &quant_type) { - return nullptr; -} - } // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.h index e6fb5221a4..8eefd3cfab 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_model_parser.h @@ -23,6 +23,7 @@ #include <unordered_map> #include "tools/converter/model_parser.h" #include "proto/caffe.pb.h" +#include "ops/primitive_c.h" namespace mindspore::lite { class CaffeModelParser : public ModelParser { @@ -34,9 +35,6 @@ class CaffeModelParser : public ModelParser { FuncGraphPtr Parse(const std::string &model_file, const std::string &weight_file, const QuantType &quant_type) override; - MetaGraphT *ParseToFb(const std::string &model_file, const std::string &weight_file, - const QuantType &quant_type) override; - private: STATUS InitOriginModel(const std::string &model_file, const std::string &weight_file); @@ -46,8 +44,8 @@ class CaffeModelParser : public ModelParser { STATUS ConvertLayers(); - STATUS ConvertLayerQuantParams(const caffe::LayerParameter &layer, const caffe::LayerParameter &weight, - lite::PrimitiveC *primitive_c); + static STATUS ConvertLayerQuantParams(const caffe::LayerParameter &layer, const caffe::LayerParameter &weight, + ops::PrimitiveC *primitive_c); STATUS ConvertBlobs(const caffe::LayerParameter &layer, std::vector<ParameterPtr> *const_parameters); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h index 3390f3118a..ba736b0d3e 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_node_parser.h @@ -19,14 +19,14 @@ #include <string> #include <vector> -#include "src/ops/primitive_c.h" -#include "ops/primitive_c.h" #include "google/protobuf/message.h" #include "schema/inner/model_generated.h" #include "proto/caffe.pb.h" #include "tools/converter/parser/caffe/caffe_node_parser.h" #include "include/errorcode.h" #include "src/common/log_adapter.h" +#include "ops/primitive_c.h" +#include "mindspore/core/utils/check_convert_utils.h" namespace mindspore { namespace lite { @@ -36,8 +36,7 @@ class CaffeNodeParser { virtual ~CaffeNodeParser() {} - virtual lite::PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { + virtual ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { return nullptr; } diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc index eeafed06d8..a88a769fc4 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.cc @@ -16,27 +16,23 @@ #include "tools/converter/parser/caffe/caffe_permute_parser.h" #include <memory> +#include "ops/transpose.h" namespace mindspore { namespace lite { -PrimitiveC *CaffePermuteParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::TransposeT> attr = std::make_unique<schema::TransposeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffePermuteParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Transpose>(); + std::vector<int32_t> perm; const caffe::PermuteParameter &permuteParam = proto.permute_param(); const int num_order_dims = permuteParam.order_size(); - attr->perm.resize(num_order_dims); + perm.resize(num_order_dims); for (int i = 0; i < num_order_dims; ++i) { - attr->perm[i] = (int32_t)permuteParam.order()[i]; + perm[i] = permuteParam.order()[i]; } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Transpose; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->AddAttr("perm", MakeValue(perm)); + + return prim.release(); } CaffeNodeRegistrar g_caffePermuteParser("Permute", new CaffePermuteParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.h index ae19bc391c..2e230386f3 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_permute_parser.h @@ -28,7 +28,7 @@ class CaffePermuteParser : public CaffeNodeParser { CaffePermuteParser() : CaffeNodeParser("Permute") {} ~CaffePermuteParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.cc index 0cd88d6088..fa82200b0d 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.cc @@ -16,52 +16,53 @@ #include "tools/converter/parser/caffe/caffe_pooling_parser.h" #include <memory> +#include "ops/fusion/avg_pool_fusion.h" +#include "ops/fusion/max_pool_fusion.h" namespace mindspore { namespace lite { -STATUS CaffePoolingParser::ParsePads(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr) { +STATUS CaffePoolingParser::ParsePads(const caffe::PoolingParameter &poolingParam, std::vector<int64_t> *pad) { if (poolingParam.has_pad_h() && poolingParam.has_pad_w()) { if (poolingParam.has_pad()) { MS_LOG(ERROR) << "Either pad or pad_h/w should be specified; not both"; return RET_ERROR; } - attr->padLeft = poolingParam.pad_w(); - attr->padRight = poolingParam.pad_w(); - attr->padUp = poolingParam.pad_h(); - attr->padDown = poolingParam.pad_h(); + (*pad)[0] = poolingParam.pad_h(); + (*pad)[1] = poolingParam.pad_h(); + (*pad)[2] = poolingParam.pad_w(); + (*pad)[3] = poolingParam.pad_w(); } else { - attr->padLeft = poolingParam.pad(); - attr->padRight = poolingParam.pad(); - attr->padUp = poolingParam.pad(); - attr->padDown = poolingParam.pad(); + (*pad)[0] = poolingParam.pad(); + (*pad)[1] = poolingParam.pad(); + (*pad)[2] = poolingParam.pad(); + (*pad)[3] = poolingParam.pad(); } return RET_OK; } -STATUS CaffePoolingParser::ParseStrides(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr) { +STATUS CaffePoolingParser::ParseStrides(const caffe::PoolingParameter &poolingParam, std::vector<int64_t> *strides) { if (poolingParam.has_stride_h() && poolingParam.has_stride_w()) { if (poolingParam.has_stride()) { MS_LOG(ERROR) << "Either stride or stride_h/w should be specified; not both"; return RET_ERROR; } - attr->strideH = poolingParam.stride_h(); - attr->strideW = poolingParam.stride_w(); + (*strides)[0] = poolingParam.stride_h(); + (*strides)[1] = poolingParam.stride_w(); } else { - attr->strideH = poolingParam.stride(); - attr->strideW = poolingParam.stride(); + (*strides)[0] = poolingParam.stride(); + (*strides)[1] = poolingParam.stride(); } return RET_OK; } -STATUS CaffePoolingParser::ParseWindows(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr) { +STATUS CaffePoolingParser::ParseWindows(const caffe::PoolingParameter &poolingParam, std::vector<int64_t> *windows) { if (poolingParam.has_global_pooling() && poolingParam.global_pooling()) { if (poolingParam.has_kernel_size() || poolingParam.has_kernel_h() || poolingParam.has_kernel_w()) { MS_LOG(ERROR) << "With Global_pooling: true Filter size cannot specified"; return RET_ERROR; } - attr->windowH = 0; - attr->windowW = 0; - attr->global = true; + (*windows)[0] = 0; + (*windows)[1] = 0; } else { if (poolingParam.has_kernel_size() == (poolingParam.has_kernel_h() || poolingParam.has_kernel_w())) { MS_LOG(ERROR) << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; @@ -73,75 +74,79 @@ STATUS CaffePoolingParser::ParseWindows(const caffe::PoolingParameter &poolingPa } if (poolingParam.has_kernel_h() && poolingParam.has_kernel_w()) { - attr->windowH = poolingParam.kernel_h(); - attr->windowW = poolingParam.kernel_w(); + (*windows)[0] = poolingParam.kernel_h(); + (*windows)[1] = poolingParam.kernel_w(); } else { - attr->windowH = poolingParam.kernel_size(); - attr->windowW = poolingParam.kernel_size(); + (*windows)[0] = poolingParam.kernel_size(); + (*windows)[1] = poolingParam.kernel_size(); } } return RET_OK; } -STATUS CaffePoolingParser::ParsePoolingMode(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr) { - if (poolingParam.pool() == caffe::PoolingParameter::MAX) { - attr->poolingMode = schema::PoolMode_MAX_POOLING; - } else if (poolingParam.pool() == caffe::PoolingParameter::AVE) { - attr->poolingMode = schema::PoolMode_MEAN_POOLING; - } else { - MS_LOG(ERROR) << "MindSpore support MAX and AVE PoolingMode only."; - return RET_ERROR; +mindspore::RoundMode CaffePoolingParser::ParseRoundMode(const caffe::PoolingParameter &poolingParam) { + mindspore::RoundMode roundMode = mindspore::RoundMode::CEIL; + if (poolingParam.has_round_mode()) { + if (poolingParam.round_mode() == caffe::PoolingParameter_RoundMode_FLOOR) { + roundMode = mindspore::RoundMode::FLOOR; + } else if (poolingParam.round_mode() == caffe::PoolingParameter_RoundMode_CEIL) { + roundMode = mindspore::RoundMode::CEIL; + } } - return RET_OK; + return roundMode; } -PrimitiveC *CaffePoolingParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::PoolingT> attr = std::make_unique<schema::PoolingT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - attr->format = schema::Format::Format_NCHW; +ops::PrimitiveC *CaffePoolingParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { const caffe::PoolingParameter &poolingParam = proto.pooling_param(); - auto status = ParsePads(poolingParam, attr.get()); - if (status != RET_OK) { - MS_LOG(ERROR) << "ParsePads for " << proto.name().c_str() << " failed"; + + // parse kernel params + std::vector<int64_t> windows(2, 0); + if (ParseWindows(poolingParam, &windows) != RET_OK) { + MS_LOG(ERROR) << "ParseWindows for " << proto.name().c_str() << " failed"; return nullptr; } - status = ParseStrides(poolingParam, attr.get()); - if (status != RET_OK) { + // parse strides params + std::vector<int64_t> strides(2, 0); + if (ParseStrides(poolingParam, &strides) != RET_OK) { MS_LOG(ERROR) << "ParseStrides for " << proto.name().c_str() << " failed"; return nullptr; } - status = ParseWindows(poolingParam, attr.get()); - if (status != RET_OK) { - MS_LOG(ERROR) << "ParseWindows for " << proto.name().c_str() << " failed"; + // parse pad params + std::vector<int64_t> pad(4, 0); + if (ParsePads(poolingParam, &pad) != RET_OK) { + MS_LOG(ERROR) << "ParsePads for " << proto.name().c_str() << " failed"; return nullptr; } - status = ParsePoolingMode(poolingParam, attr.get()); - if (status != RET_OK) { - MS_LOG(ERROR) << "ParsePoolingMode for " << proto.name().c_str() << " failed"; - return nullptr; - } + // parse round mode + auto roundMode = ParseRoundMode(poolingParam); - attr->roundMode = schema::RoundMode_CEIL; - if (poolingParam.has_round_mode()) { - if (poolingParam.round_mode() == caffe::PoolingParameter_RoundMode_FLOOR) { - attr->roundMode = schema::RoundMode_FLOOR; - } else if (poolingParam.round_mode() == caffe::PoolingParameter_RoundMode_CEIL) { - attr->roundMode = schema::RoundMode_CEIL; - } + if (poolingParam.pool() == caffe::PoolingParameter::MAX) { + auto prim = std::make_unique<ops::MaxPoolFusion>(); + prim->set_format(mindspore::Format::NCHW); + prim->set_pad_mode(mindspore::PadMode::PAD); + prim->set_kernel_size(windows); + prim->set_strides(strides); + prim->set_pad(pad); + prim->set_round_mode(roundMode); + prim->set_global(poolingParam.global_pooling()); + return prim.release(); + } else if (poolingParam.pool() == caffe::PoolingParameter::AVE) { + auto prim = std::make_unique<ops::AvgPoolFusion>(); + prim->set_format(mindspore::Format::NCHW); + prim->set_pad_mode(mindspore::PadMode::PAD); + prim->set_kernel_size(windows); + prim->set_strides(strides); + prim->set_pad(pad); + prim->set_round_mode(roundMode); + prim->set_global(poolingParam.global_pooling()); + return prim.release(); + } else { + MS_LOG(ERROR) << "poolingParam.pool() is not MAX or AVE"; + return nullptr; } - attr->padMode = schema::PadMode_CAFFE; - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Pooling; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); } CaffeNodeRegistrar g_caffePoolingParser("Pooling", new CaffePoolingParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.h index f0d62c25db..c91109e260 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_pooling_parser.h @@ -28,15 +28,15 @@ class CaffePoolingParser : public CaffeNodeParser { CaffePoolingParser() : CaffeNodeParser("pooling") {} ~CaffePoolingParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; - static STATUS ParsePads(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); + static STATUS ParsePads(const caffe::PoolingParameter &poolingParam, std::vector<int64_t> *pad); - static STATUS ParseStrides(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); + static STATUS ParseStrides(const caffe::PoolingParameter &poolingParam, std::vector<int64_t> *strides); - static STATUS ParseWindows(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); + static STATUS ParseWindows(const caffe::PoolingParameter &poolingParam, std::vector<int64_t> *windows); - static STATUS ParsePoolingMode(const caffe::PoolingParameter &poolingParam, schema::PoolingT *attr); + mindspore::RoundMode ParseRoundMode(const caffe::PoolingParameter &poolingParam); }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.cc index 78e6ce9cab..e4efa24113 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.cc @@ -15,34 +15,35 @@ */ #include "tools/converter/parser/caffe/caffe_power_parser.h" -#include <memory> #include <vector> +#include <memory> +#include "ops/fusion/pow_fusion.h" namespace mindspore { namespace lite { -PrimitiveC *CaffePowerParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::PowerT> attr = std::make_unique<schema::PowerT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffePowerParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::PowFusion>(); const caffe::PowerParameter &powerParam = proto.power_param(); + float power = 1.0; + float scale = 1.0; + float shift = 0.0; if (proto.has_power_param()) { - attr->power = powerParam.has_power() ? powerParam.power() : 1.0; - attr->scale = powerParam.has_scale() ? powerParam.scale() : 1.0; - attr->shift = powerParam.has_shift() ? powerParam.shift() : 0.0; - } else { - attr->power = 1.0; - attr->scale = 1.0; - attr->shift = 0.0; + if (powerParam.has_power()) { + power = powerParam.power(); + } + if (powerParam.has_scale()) { + scale = powerParam.scale(); + } + if (powerParam.has_shift()) { + shift = powerParam.shift(); + } } + prim->AddAttr("power", MakeValue(power)); + prim->set_scale(scale); + prim->set_shift(shift); - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Power; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } CaffeNodeRegistrar g_caffePowerParser("Power", new CaffePowerParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.h index 89c67763a1..3e320cbb7d 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_power_parser.h @@ -28,7 +28,7 @@ class CaffePowerParser : public CaffeNodeParser { CaffePowerParser() : CaffeNodeParser("power") {} ~CaffePowerParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc index ec3f35aab3..b6e0b387bd 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.cc @@ -16,27 +16,21 @@ #include "tools/converter/parser/caffe/caffe_prelu_parser.h" #include <memory> +#include "ops/fusion/prelu_fusion.h" namespace mindspore { namespace lite { -PrimitiveC *CaffePReluParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::PReLUT> attr = std::make_unique<schema::PReLUT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffePReluParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::PReLUFusion>(); - const caffe::PReLUParameter &pReluParam = proto.prelu_param(); - if (pReluParam.has_channel_shared()) { - attr->channelShared = pReluParam.channel_shared(); + const caffe::PReLUParameter &prelu_param = proto.prelu_param(); + if (prelu_param.has_channel_shared()) { + prim->set_channel_shared(prelu_param.channel_shared()); } else { - attr->channelShared = false; + prim->set_channel_shared(false); } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_PReLU; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } CaffeNodeRegistrar g_caffePReluParser("PReLU", new CaffePReluParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.h index 2a1e715d16..e9e2669dd0 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_prelu_parser.h @@ -28,7 +28,7 @@ class CaffePReluParser : public CaffeNodeParser { CaffePReluParser() : CaffeNodeParser("pRelu") {} ~CaffePReluParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.cc index f13bc206fe..d0cd1f1fbc 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.cc @@ -17,35 +17,32 @@ #include "tools/converter/parser/caffe/caffe_reduce_parser.h" #include <memory> #include <vector> +#include "ops/fusion/reduce_fusion.h" namespace mindspore { namespace lite { -PrimitiveC *CaffeReduceParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - auto attr = std::make_unique<schema::ReduceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffeReduceParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::ReduceFusion>(); - attr->keepDims = false; + prim->set_keep_dims(false); + prim->set_reduce_to_end(true); const caffe::ReductionParameter &reduce_param = proto.reduction_param(); if (reduce_param.has_operation()) { if (reduce_param.operation() == caffe::ReductionParameter_ReductionOp_MEAN) { - attr->mode = schema::ReduceMode_ReduceMean; + prim->set_mode(mindspore::ReduceMode::Reduce_Mean); } else if (reduce_param.operation() == caffe::ReductionParameter_ReductionOp_SUM) { - attr->mode = schema::ReduceMode_ReduceSum; + prim->set_mode(mindspore::ReduceMode::Reduce_Sum); } else if (reduce_param.operation() == caffe::ReductionParameter_ReductionOp_SUMSQ) { - attr->mode = schema::ReduceMode_ReduceSumSquare; + prim->set_mode(mindspore::ReduceMode::Reduce_Sum_Square); } else if (reduce_param.operation() == caffe::ReductionParameter_ReductionOp_ASUM) { - attr->mode = schema::ReduceMode_ReduceASum; + prim->set_mode(mindspore::ReduceMode::Reduce_ASum); } else { MS_LOG(ERROR) << "nsupported reduce mode: " << reduce_param.operation(); return nullptr; } } else { - attr->mode = schema::ReduceMode_ReduceSum; + prim->set_mode(mindspore::ReduceMode::Reduce_Sum); } std::vector<int32_t> axes; @@ -54,13 +51,9 @@ PrimitiveC *CaffeReduceParser::ParseLitePrimitive(const caffe::LayerParameter &p } else { axes = std::vector<int>(1, 0); } - attr->axes = axes; - attr->reduceToEnd = true; + prim->AddAttr("axes", MakeValue(axes)); - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Reduce; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } CaffeNodeRegistrar g_caffeReduceParser("Reduction", new CaffeReduceParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.h index f818e0a114..ff87638be4 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_reduce_parser.h @@ -28,7 +28,7 @@ class CaffeReduceParser : public CaffeNodeParser { CaffeReduceParser() : CaffeNodeParser("reduce") {} ~CaffeReduceParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.cc deleted file mode 100644 index 345fbf72e1..0000000000 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.cc +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/parser/caffe/caffe_relu6_parser.h" -#include <memory> - -namespace mindspore { -namespace lite { -PrimitiveC *CaffeRelu6Parser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::ActivationT> attr(new schema::ActivationT()); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - attr->type = schema::ActivationType_RELU6; - if (proto.has_relu_param() && proto.relu_param().has_negative_slope()) { - float negative_slope = proto.relu_param().negative_slope(); - if (0 != negative_slope) { - attr->type = schema::ActivationType_LEAKY_RELU; - attr->alpha = negative_slope; - } - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Activation; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); -} - -CaffeNodeRegistrar g_caffeRelu6Parser("ReLU6", new CaffeRelu6Parser()); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.h deleted file mode 100644 index 82b6256e8e..0000000000 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_relu6_parser.h +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_RELU6_PARSER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_RELU6_PARSER_H_ - -#include <vector> -#include "tools/converter/parser/caffe/caffe_node_parser.h" -#include "tools/converter/parser/caffe/caffe_node_parser_registry.h" - -namespace mindspore { -namespace lite { -class CaffeRelu6Parser : public CaffeNodeParser { - public: - CaffeRelu6Parser() : CaffeNodeParser("relu6") {} - ~CaffeRelu6Parser() override = default; - - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_RELU6_PARSER_H_ diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.cc deleted file mode 100644 index 110be37d9c..0000000000 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.cc +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/parser/caffe/caffe_relu_parser.h" -#include <memory> - -namespace mindspore { -namespace lite { -PrimitiveC *CaffeReluParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::ActivationT> attr = std::make_unique<schema::ActivationT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - attr->type = schema::ActivationType_RELU; - if (proto.has_relu_param() && proto.relu_param().has_negative_slope()) { - float negative_slope = proto.relu_param().negative_slope(); - if (0 != negative_slope) { - attr->type = schema::ActivationType_LEAKY_RELU; - attr->alpha = negative_slope; - } - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Activation; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); -} - -CaffeNodeRegistrar g_caffeReluParser("ReLU", new CaffeReluParser()); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.h deleted file mode 100644 index f76d1816a2..0000000000 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_relu_parser.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_RELU_PARSER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_RELU_PARSER_H_ - -#include <vector> -#include "tools/converter/parser/caffe/caffe_node_parser.h" -#include "tools/converter/parser/caffe/caffe_node_parser_registry.h" - -namespace mindspore { -namespace lite { -class CaffeReluParser : public CaffeNodeParser { - public: - CaffeReluParser() : CaffeNodeParser("relu") {} - ~CaffeReluParser() override = default; - - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_RELU_PARSER_H_ diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc index 7c9aaf94a0..846824c92d 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.cc @@ -16,33 +16,26 @@ #include "tools/converter/parser/caffe/caffe_reshape_parser.h" #include <memory> +#include "ops/reshape.h" namespace mindspore { namespace lite { -PrimitiveC *CaffeReshapeParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::ReshapeT> attr = std::make_unique<schema::ReshapeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - attr->format = schema::Format::Format_NCHW; +ops::PrimitiveC *CaffeReshapeParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Reshape>(); const caffe::ReshapeParameter &reshapeParam = proto.reshape_param(); if (!reshapeParam.has_shape()) { MS_LOG(ERROR) << "Reshape has no shape info, ret fail"; return nullptr; } - + std::vector<int32_t> shape; const caffe::BlobShape &blob_shape = reshapeParam.shape(); for (int i = 0; i < blob_shape.dim_size(); i++) { - attr->shape.push_back(blob_shape.dim(i)); + shape.push_back(blob_shape.dim(i)); } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Reshape; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->AddAttr("shape", MakeValue(shape)); + + return prim.release(); } CaffeNodeRegistrar g_caffeReshapeParser("Reshape", new CaffeReshapeParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.h index 55c4aca68d..1456e3d560 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_reshape_parser.h @@ -28,7 +28,7 @@ class CaffeReshapeParser : public CaffeNodeParser { CaffeReshapeParser() : CaffeNodeParser("reshape") {} ~CaffeReshapeParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc index 0726aa010a..a987bd416d 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020-2021 Huawei Technologies Co., Ltd + * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,17 +16,28 @@ #include "tools/converter/parser/caffe/caffe_scale_parser.h" #include <memory> +#include "ops/fusion/scale_fusion.h" namespace mindspore { namespace lite { -PrimitiveC *CaffeScaleParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::ScaleT> attr = std::make_unique<schema::ScaleT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; + +STATUS CaffeScaleParser::GetAxisIndex(const int32_t &axis, uint32_t *axis_index) { + if (axis < -4 || axis >= 4) { + MS_LOG(ERROR) << "Scale axis value(" << axis << ") is not correct"; + return RET_ERROR; + } + + if (axis == -1) { + MS_LOG(WARNING) << "axis with -1 may lead to calculation errors when input less than 4 dims."; } + *axis_index = (axis + 4) % 4; + return RET_OK; +} + +ops::PrimitiveC *CaffeScaleParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::ScaleFusion>(); + if (weight.blobs_size() + weight.bottom_size() < 2) { MS_LOG(ERROR) << "Scale bottom size:" << weight.bottom_size() << ", blobs size:" << weight.blobs_size() << " invalid in layer " << weight.name().c_str(); @@ -34,33 +45,17 @@ PrimitiveC *CaffeScaleParser::ParseLitePrimitive(const caffe::LayerParameter &pr } const caffe::ScaleParameter &scaleParam = weight.scale_param(); - attr->axis = 1; + prim->set_axis(1); if (scaleParam.has_axis()) { uint32_t axis_index = 1; if (GetAxisIndex(scaleParam.axis(), &axis_index)) { MS_LOG(ERROR) << "scale get axis failed for layer " << weight.name().c_str(); return nullptr; } - attr->axis = axis_index; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Scale; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); -} - -STATUS CaffeScaleParser::GetAxisIndex(const int32_t &axis, uint32_t *axis_index) { - if (axis < -4 || axis >= 4) { - MS_LOG(ERROR) << "Scale axis value(" << axis << ") is not correct"; - return RET_ERROR; + prim->set_axis(axis_index); } - if (axis == -1) { - MS_LOG(WARNING) << "axis with -1 may lead to calculation errors when input less than 4 dims."; - } - - *axis_index = (axis + 4) % 4; - return RET_OK; + return prim.release(); } CaffeNodeRegistrar g_caffeScaleParser("Scale", new CaffeScaleParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.h index ab34a2e491..12cb209215 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_scale_parser.h @@ -28,9 +28,10 @@ class CaffeScaleParser : public CaffeNodeParser { CaffeScaleParser() : CaffeNodeParser("scale") {} ~CaffeScaleParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; - static STATUS GetAxisIndex(const int32_t &axis, uint32_t *axis_index); + private: + STATUS GetAxisIndex(const int32_t &axis, uint32_t *axis_index); }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.cc deleted file mode 100644 index f8ff9ccf85..0000000000 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.cc +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/parser/caffe/caffe_sigmoid_parser.h" -#include <memory> - -namespace mindspore { -namespace lite { -PrimitiveC *CaffeSigmoidParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::ActivationT> attr = std::make_unique<schema::ActivationT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - attr->type = schema::ActivationType_SIGMOID; - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Activation; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); -} - -CaffeNodeRegistrar g_caffeSigmoidParser("Sigmoid", new CaffeSigmoidParser()); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.h deleted file mode 100644 index fd2f730981..0000000000 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_sigmoid_parser.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SIGMOID_PARSER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SIGMOID_PARSER_H_ - -#include <vector> -#include "tools/converter/parser/caffe/caffe_node_parser.h" -#include "tools/converter/parser/caffe/caffe_node_parser_registry.h" - -namespace mindspore { -namespace lite { -class CaffeSigmoidParser : public CaffeNodeParser { - public: - CaffeSigmoidParser() : CaffeNodeParser("sigmoid") {} - ~CaffeSigmoidParser() override = default; - - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_SIGMOID_PARSER_H_ diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.cc index c9df8641d5..d7546c6d5f 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.cc @@ -16,23 +16,18 @@ #include "tools/converter/parser/caffe/caffe_slice_parser.h" #include <memory> +#include "ops/split.h" namespace mindspore { namespace lite { -PrimitiveC *CaffeSliceParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::SplitT> attr = std::make_unique<schema::SplitT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffeSliceParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Split>(); const caffe::SliceParameter &slice_param = proto.slice_param(); - - attr->numberSplit = 2; + prim->set_output_num(2); if (!slice_param.slice_point().empty()) { - attr->numberSplit = slice_param.slice_point_size() + 1; - std::vector<int32_t> size_splits; + prim->set_output_num(slice_param.slice_point_size() + 1); + std::vector<int64_t> size_splits; for (int i = 0; i < slice_param.slice_point_size(); ++i) { if (i == 0) { size_splits.push_back(slice_param.slice_point(i)); @@ -41,18 +36,16 @@ PrimitiveC *CaffeSliceParser::ParseLitePrimitive(const caffe::LayerParameter &pr } } size_splits.push_back(-1); - attr->sizeSplits = size_splits; + prim->set_size_splits(size_splits); } if (slice_param.has_axis()) { - attr->splitDim = slice_param.axis(); + prim->set_axis(slice_param.axis()); } else if (slice_param.has_slice_dim()) { - attr->splitDim = slice_param.slice_dim(); + prim->set_axis(slice_param.slice_dim()); } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Split; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } CaffeNodeRegistrar g_caffeSliceParser("Slice", new CaffeSliceParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.h index 578faad338..818a48fa6f 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_slice_parser.h @@ -28,7 +28,7 @@ class CaffeSliceParser : public CaffeNodeParser { CaffeSliceParser() : CaffeNodeParser("slice") {} ~CaffeSliceParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.cc index d5d8667f84..217c7bdbfd 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.cc @@ -16,29 +16,23 @@ #include "tools/converter/parser/caffe/caffe_softmax_parser.h" #include <memory> +#include "ops/softmax.h" namespace mindspore { namespace lite { -PrimitiveC *CaffeSoftmaxParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::SoftMaxT> attr = std::make_unique<schema::SoftMaxT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffeSoftmaxParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::Softmax>(); if (proto.has_softmax_param() && proto.softmax_param().has_axis()) { if (proto.softmax_param().axis() == -1) { MS_LOG(DEBUG) << "axis with -1 may lead to calculation errors when input less than 4 dims."; } - attr->axis = proto.softmax_param().axis(); + prim->set_axis({proto.softmax_param().axis()}); } else { - attr->axis = 1; + prim->set_axis({1}); } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_SoftMax; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } CaffeNodeRegistrar g_caffeSoftmaxParser("Softmax", new CaffeSoftmaxParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.h index 2da6c324ee..ffe75ec92e 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_softmax_parser.h @@ -28,7 +28,7 @@ class CaffeSoftmaxParser : public CaffeNodeParser { CaffeSoftmaxParser() : CaffeNodeParser("softmax") {} ~CaffeSoftmaxParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.cc deleted file mode 100644 index 49b00cf7bf..0000000000 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.cc +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/parser/caffe/caffe_tanh_parser.h" -#include <memory> -#include <vector> - -namespace mindspore { -namespace lite { -PrimitiveC *CaffeTanhParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::ActivationT> attr(new schema::ActivationT()); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - attr->type = schema::ActivationType_TANH; - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Activation; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); -} - -CaffeNodeRegistrar g_caffeTanhParser("TanH", new CaffeTanhParser()); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.h deleted file mode 100644 index c721b1b547..0000000000 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_tanh_parser.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_TANH_PARSER_H -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_TANH_PARSER_H - -#include <vector> -#include "tools/converter/parser/caffe/caffe_node_parser.h" -#include "tools/converter/parser/caffe/caffe_node_parser_registry.h" - -namespace mindspore { -namespace lite { -class CaffeTanhParser : public CaffeNodeParser { - public: - CaffeTanhParser() : CaffeNodeParser("tanh") {} - ~CaffeTanhParser() override = default; - - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_CAFFE_CAFFE_TANH_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.cc b/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.cc index 10319f757e..81f8a9d6f9 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.cc +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.cc @@ -17,39 +17,33 @@ #include "tools/converter/parser/caffe/caffe_tile_parser.h" #include <memory> #include <vector> +#include "ops/fusion/tile_fusion.h" namespace mindspore { namespace lite { -PrimitiveC *CaffeTileParser::ParseLitePrimitive(const caffe::LayerParameter &proto, - const caffe::LayerParameter &weight) { - std::unique_ptr<schema::TileT> attr = std::make_unique<schema::TileT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *CaffeTileParser::Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) { + auto prim = std::make_unique<ops::TileFusion>(); const caffe::TileParameter &tile_param = proto.tile_param(); - std::vector<int> dims; - std::vector<int> multiples; + std::vector<int64_t> dims; dims.clear(); - multiples.clear(); if (tile_param.has_axis()) { dims.push_back(tile_param.axis()); } else { dims.push_back(1); } + prim->set_dims(dims); + + std::vector<int32_t> multiples; + multiples.clear(); if (tile_param.has_tiles()) { multiples.push_back(tile_param.tiles()); } else { multiples.push_back(1); } + prim->AddAttr("multiples", MakeValue(multiples)); - attr->dims = dims; - attr->multiples = multiples; - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Tile; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } CaffeNodeRegistrar g_caffeTileParser("Tile", new CaffeTileParser()); diff --git a/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h b/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h index da906ba1b0..a5f8cfbfaa 100644 --- a/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h +++ b/mindspore/lite/tools/converter/parser/caffe/caffe_tile_parser.h @@ -28,7 +28,7 @@ class CaffeTileParser : public CaffeNodeParser { CaffeTileParser() : CaffeNodeParser("tile") {} ~CaffeTileParser() override = default; - PrimitiveC *ParseLitePrimitive(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; + ops::PrimitiveC *Parse(const caffe::LayerParameter &proto, const caffe::LayerParameter &weight) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_activation_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_activation_parser.cc new file mode 100644 index 0000000000..e251a0a335 --- /dev/null +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_activation_parser.cc @@ -0,0 +1,139 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tools/converter/parser/onnx/onnx_activation_parser.h" +#include <memory> +#include <vector> +#include "securec/include/securec.h" +#include "ops/fusion/prelu_fusion.h" +#include "ops/elu.h" +#include "ops/fusion/activation.h" + +namespace mindspore { +namespace lite { +ops::PrimitiveC *OnnxReluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Activation>(); + + prim->set_activation_type(mindspore::ActivationType::RELU); + + return prim.release(); +} + +ops::PrimitiveC *OnnxLeakyReluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Activation>(); + + for (const auto &onnx_node_attr : onnx_node.attribute()) { + const auto &attribute_name = onnx_node_attr.name(); + if (attribute_name == "alpha") { + prim->set_alpha(onnx_node_attr.f()); + } + } + + prim->set_activation_type(mindspore::ActivationType::LEAKY_RELU); + + return prim.release(); +} + +ops::PrimitiveC *OnnxPReluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::PReLUFusion>(); + + std::vector<onnx::TensorProto> params; + const auto &input_name = onnx_node.input(1); + auto node_iter = std::find_if(onnx_graph.initializer().begin(), onnx_graph.initializer().end(), + [input_name](const onnx::TensorProto &proto) { return proto.name() == input_name; }); + if (node_iter == onnx_graph.initializer().end()) { + MS_LOG(ERROR) << "not find node: " << input_name.c_str(); + return nullptr; + } else { + params.push_back(*node_iter); + } + + if (!params.empty()) { + const onnx::TensorProto *slope_data = &params[0]; + if (slope_data == nullptr) { + MS_LOG(ERROR) << "input error: params[0] is null"; + return nullptr; + } + std::vector<float> slope; + if (slope_data->float_data_size() > 0) { + const int64_t slope_size = slope_data->float_data_size(); + for (int64_t i = 0; i < slope_size; i++) { + slope.emplace_back(slope_data->float_data(i)); + } + prim->set_slope(slope); + prim->set_channel_shared(slope_size == 1); + } else { + const auto slope_raw_data = reinterpret_cast<const float *>(slope_data->raw_data().data()); + const int64_t slope_size = slope_data->raw_data().size() / sizeof(float); + slope.resize(slope_size); + bool channel_shared = false; + if (slope_size == 1) { + slope.push_back(*slope_raw_data); + channel_shared = true; + } else { + slope.resize(slope_size); + if (memcpy_s(slope.data(), slope_size * sizeof(float), slope_raw_data, slope_size * sizeof(float)) != EOK) { + MS_LOG(ERROR) << "memcpy_s failed"; + return nullptr; + } + } + prim->set_slope(slope); + prim->set_channel_shared(channel_shared); + } + } else { + MS_LOG(WARNING) << "The slope pf prelu is null, which may cause errors."; + } + + return prim.release(); +} + +ops::PrimitiveC *OnnxEluParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Elu>(); + + for (const auto &onnx_node_attr : onnx_node.attribute()) { + const auto &attribute_name = onnx_node_attr.name(); + if (attribute_name == "alpha") { + prim->set_alpha(onnx_node_attr.f()); + } + } + + return prim.release(); +} + +ops::PrimitiveC *OnnxTanhParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Activation>(); + + prim->set_activation_type(mindspore::ActivationType::TANH); + + return prim.release(); +} + +ops::PrimitiveC *OnnxSigmoidParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Activation>(); + + prim->set_activation_type(mindspore::ActivationType::SIGMOID); + + return prim.release(); +} + +OnnxNodeRegistrar g_onnxReluParser("Relu", new OnnxReluParser()); +OnnxNodeRegistrar g_onnxLeakyReluParser("LeakyRelu", new OnnxLeakyReluParser()); +OnnxNodeRegistrar g_onnxPReluParser("PRelu", new OnnxPReluParser()); +OnnxNodeRegistrar g_onnxEluParser("Elu", new OnnxEluParser()); +OnnxNodeRegistrar g_onnxTanhParser("Tanh", new OnnxTanhParser()); +OnnxNodeRegistrar g_onnxSigmoodParser("Sigmoid", new OnnxSigmoidParser()); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_activation_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_activation_parser.h new file mode 100644 index 0000000000..35bc8cc682 --- /dev/null +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_activation_parser.h @@ -0,0 +1,75 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_RELU_PARSER_H +#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_RELU_PARSER_H + +#include "tools/converter/parser/onnx/onnx_node_parser.h" +#include "tools/converter/parser/onnx/onnx_node_parser_registry.h" + +namespace mindspore { +namespace lite { +class OnnxReluParser : public OnnxNodeParser { + public: + OnnxReluParser() : OnnxNodeParser("Relu") {} + ~OnnxReluParser() override = default; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; +}; + +class OnnxLeakyReluParser : public OnnxNodeParser { + public: + OnnxLeakyReluParser() : OnnxNodeParser("LeakyRelu") {} + ~OnnxLeakyReluParser() override = default; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; +}; + +class OnnxPReluParser : public OnnxNodeParser { + public: + OnnxPReluParser() : OnnxNodeParser("Prelu") {} + ~OnnxPReluParser() override = default; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; +}; + +class OnnxEluParser : public OnnxNodeParser { + public: + OnnxEluParser() : OnnxNodeParser("Elu") {} + ~OnnxEluParser() override = default; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; +}; + +class OnnxTanhParser : public OnnxNodeParser { + public: + OnnxTanhParser() : OnnxNodeParser("Tanh") {} + ~OnnxTanhParser() override = default; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; +}; + +class OnnxSigmoidParser : public OnnxNodeParser { + public: + OnnxSigmoidParser() : OnnxNodeParser("Sigmoid") {} + ~OnnxSigmoidParser() override = default; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; +}; + +} // namespace lite +} // namespace mindspore +#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_RELU_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_adder_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_adder_parser.cc index 41a54fef94..79531537ef 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_adder_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_adder_parser.cc @@ -16,25 +16,13 @@ #include "tools/converter/parser/onnx/onnx_adder_parser.h" #include <memory> +#include "ops/fusion/adder_fusion.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxAdderParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx AdderParser"; - auto attr = std::make_unique<schema::AdderT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Adder; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxAdderParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::AdderFusion>(); + return prim.release(); } OnnxNodeRegistrar g_onnxAdderParser("adder_f", new OnnxAdderParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_adder_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_adder_parser.h index 59c13aa93c..31f6b131c7 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_adder_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_adder_parser.h @@ -26,7 +26,8 @@ class OnnxAdderParser : public OnnxNodeParser { public: OnnxAdderParser() : OnnxNodeParser("Adder") {} ~OnnxAdderParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.cc index b901e49cb0..fcc96f29b8 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.cc @@ -16,35 +16,23 @@ #include "tools/converter/parser/onnx/onnx_argmax_parser.h" #include <memory> +#include "ops/fusion/arg_max_fusion.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxArgMaxParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ArgMaxParser"; - - auto attr = std::make_unique<schema::ArgMaxT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxArgMaxParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::ArgMaxFusion>(); for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "axis") { - attr->axis = static_cast<int32_t>(onnx_node_attr.i()); + prim->set_axis(onnx_node_attr.i()); } else if (attribute_name == "keepdims") { - attr->keepDims = static_cast<bool>(onnx_node_attr.i()); + prim->set_keep_dims(static_cast<bool>(onnx_node_attr.i())); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_ArgMax; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } OnnxNodeRegistrar g_onnxArgMaxParser("ArgMax", new OnnxArgMaxParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.h index 65f888e107..4dea29b724 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_argmax_parser.h @@ -27,7 +27,7 @@ class OnnxArgMaxParser : public OnnxNodeParser { OnnxArgMaxParser() : OnnxNodeParser("ArgMax") {} ~OnnxArgMaxParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.cc index bb23706f17..5d189fe06c 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.cc @@ -18,539 +18,194 @@ #include <memory> #include <numeric> #include <functional> +#include "ops/fusion/add_fusion.h" +#include "ops/fusion/mul_fusion.h" +#include "ops/fusion/div_fusion.h" +#include "ops/fusion/sub_fusion.h" +#include "ops/fusion/exp_fusion.h" +#include "ops/equal.h" +#include "ops/less.h" +#include "ops/greater.h" +#include "ops/floor.h" +#include "ops/abs.h" +#include "ops/cos.h" +#include "ops/ceil.h" +#include "ops/log.h" +#include "ops/atan.h" +#include "ops/asin.h" +#include "ops/logical_and.h" +#include "ops/logical_not.h" +#include "ops/logical_or.h" +#include "ops/neg.h" +#include "ops/round.h" +#include "ops/tan.h" +#include "ops/sqrt.h" +#include "ops/fusion/pow_fusion.h" +#include "ops/minimum.h" +#include "ops/maximum.h" +#include "ops/eltwise.h" +#include "ops/sin.h" +#include "ops/reciprocal.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxAddParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx AddParser"; - auto attr = std::make_unique<schema::AddT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Add; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxAddParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::AddFusion>(); + return prim.release(); } -lite::PrimitiveC *OnnxSubParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx SubParser"; - auto attr = std::make_unique<schema::SubT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Sub; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxSubParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::SubFusion>(); + return prim.release(); } -lite::PrimitiveC *OnnxMulParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx MulParser"; - auto attr = std::make_unique<schema::MulT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Mul; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxDivParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::DivFusion>(); + return prim.release(); } -lite::PrimitiveC *OnnxDivParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx DivParser"; - auto attr = std::make_unique<schema::DivT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Div; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxMulParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::MulFusion>(); + return prim.release(); } -lite::PrimitiveC *OnnxPowParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx PowParser"; - auto attr = std::make_unique<schema::PowerT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - attr->scale = 1.0f; - attr->shift = 0.0f; - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Power; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxEqualParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Equal>(); + return prim.release(); } -lite::PrimitiveC *OnnxEqualParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx EqualParser"; - auto attr = std::make_unique<schema::EqualT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Equal; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxLessParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Less>(); + return prim.release(); } -lite::PrimitiveC *OnnxLessParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx LessParser"; - auto attr = std::make_unique<schema::LessT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Less; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxGreaterParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Greater>(); + return prim.release(); } -lite::PrimitiveC *OnnxGreaterParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx GreaterParser"; - auto attr = std::make_unique<schema::GreaterT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Greater; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxFloorParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Floor>(); + return prim.release(); } -lite::PrimitiveC *OnnxMinParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx MinParser"; - auto attr = std::make_unique<schema::MinimumT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Minimum; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxAbsParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Abs>(); + return prim.release(); } -lite::PrimitiveC *OnnxEltwiseParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx EltwiseParser"; - auto attr = std::make_unique<schema::EltwiseT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxExpParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::ExpFusion>(); - if (onnx_node.op_type() == "Sum") { - attr->mode = schema::EltwiseMode_SUM; - } else if (onnx_node.op_type() == "Max") { - attr->mode = schema::EltwiseMode_MAXIMUM; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Eltwise; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); -} + prim->set_base(-1.0); + prim->set_scale(1.0); + prim->set_shift(0.0); -lite::PrimitiveC *OnnxFloorParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx FloorParser"; - auto attr = std::make_unique<schema::FloorT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Floor; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } -lite::PrimitiveC *OnnxAbsParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx AbsParser"; - auto attr = std::make_unique<schema::AbsT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Abs; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxCosParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Cos>(); + return prim.release(); } -lite::PrimitiveC *OnnxNegParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx NegParser"; - auto attr = std::make_unique<schema::NegT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Neg; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxCeilParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Ceil>(); + return prim.release(); } -lite::PrimitiveC *OnnxExpParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ExpParser"; - auto attr = std::make_unique<schema::ExpT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Exp; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxLogParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Log>(); + return prim.release(); } -lite::PrimitiveC *OnnxCosParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx CosParser"; - auto attr = std::make_unique<schema::CosT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Cos; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxAtanParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Atan>(); + return prim.release(); } -lite::PrimitiveC *OnnxSinParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx SinParser"; - auto attr = std::make_unique<schema::SinT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Sin; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxAsinParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Asin>(); + return prim.release(); } -lite::PrimitiveC *OnnxSqrtParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx SqrtParser"; - auto attr = std::make_unique<schema::SqrtT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Sqrt; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxAndParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::LogicalAnd>(); + return prim.release(); } -lite::PrimitiveC *OnnxCeilParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx CeilParser"; - auto attr = std::make_unique<schema::CeilT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Ceil; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxOrParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::LogicalOr>(); + return prim.release(); } -lite::PrimitiveC *OnnxLogParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx LogParser"; - auto attr = std::make_unique<schema::LogT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Log; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxNotParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::LogicalNot>(); + return prim.release(); } -lite::PrimitiveC *OnnxTanParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx TanParser"; - auto attr = std::make_unique<schema::TanT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Tan; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxNegParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Neg>(); + return prim.release(); } -lite::PrimitiveC *OnnxAtanParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx AtanParser"; - auto attr = std::make_unique<schema::AtanT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Atan; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxRoundParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Round>(); + return prim.release(); } -lite::PrimitiveC *OnnxAsinParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - auto attr = std::make_unique<schema::AsinT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Asin; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxSinParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Sin>(); + return prim.release(); } -lite::PrimitiveC *OnnxTanhParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx TanhParser"; - auto attr = std::make_unique<schema::ActivationT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - attr->type = schema::ActivationType_TANH; - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Activation; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxTanParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Tan>(); + return prim.release(); } -lite::PrimitiveC *OnnxSignParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx TanhParser"; - auto attr = std::make_unique<schema::ActivationT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - attr->type = schema::ActivationType_SIGN; - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Activation; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxSqrtParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Sqrt>(); + return prim.release(); } -lite::PrimitiveC *OnnxAndParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx AndParser"; - auto attr = std::make_unique<schema::LogicalAndT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_LogicalAnd; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxPowParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::PowFusion>(); + + prim->set_scale(1.0); + prim->set_shift(0.0); + + return prim.release(); } -lite::PrimitiveC *OnnxOrParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx OrParser"; - auto attr = std::make_unique<schema::LogicalOrT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_LogicalOr; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxMinParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Minimum>(); + return prim.release(); } -lite::PrimitiveC *OnnxNotParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx NotParser"; - auto attr = std::make_unique<schema::LogicalNotT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_LogicalNot; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxMaxParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Maximum>(); + return prim.release(); } -lite::PrimitiveC *OnnxRoundParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx RoundParser"; - auto attr = std::make_unique<schema::RoundT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; +ops::PrimitiveC *OnnxEltwiseParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Eltwise>(); + + if (onnx_node.op_type() == "Sum") { + prim->set_mode(mindspore::EltwiseMode::SUM); + } else { + MS_LOG(ERROR) << "unsupported Eltwise type"; return nullptr; } - primitive->value.type = schema::PrimitiveType_Round; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } -lite::PrimitiveC *OnnxReciprocalParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ReciprocalParser"; - auto attr = std::make_unique<schema::ReciprocalT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Reciprocal; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxReciprocalParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Reciprocal>(); + return prim.release(); } + OnnxNodeRegistrar g_onnxAddParser("Add", new OnnxAddParser()); OnnxNodeRegistrar g_onnxInt8AddParser("Int8Add", new OnnxAddParser()); OnnxNodeRegistrar g_onnxSubParser("Sub", new OnnxSubParser()); @@ -562,7 +217,7 @@ OnnxNodeRegistrar g_onnxLessParser("Less", new OnnxLessParser()); OnnxNodeRegistrar g_onnxGreaterParser("Greater", new OnnxGreaterParser()); OnnxNodeRegistrar g_onnxMinParser("Min", new OnnxMinParser()); OnnxNodeRegistrar g_onnxSumParser("Sum", new OnnxEltwiseParser()); -OnnxNodeRegistrar g_onnxMaxParser("Max", new OnnxEltwiseParser()); +OnnxNodeRegistrar g_onnxMaxParser("Max", new OnnxMaxParser()); OnnxNodeRegistrar g_onnxFloorParser("Floor", new OnnxFloorParser()); OnnxNodeRegistrar g_onnxAbsParser("Abs", new OnnxAbsParser()); OnnxNodeRegistrar g_onnxNegParser("Neg", new OnnxNegParser()); @@ -575,8 +230,6 @@ OnnxNodeRegistrar g_onnxLogParser("Log", new OnnxLogParser()); OnnxNodeRegistrar g_onnxTanParser("Tan", new OnnxTanParser()); OnnxNodeRegistrar g_onnxAtanParser("Atan", new OnnxAtanParser()); OnnxNodeRegistrar g_onnxAsinParser("Asin", new OnnxAsinParser()); -OnnxNodeRegistrar g_onnxTanhParser("Tanh", new OnnxTanhParser()); -OnnxNodeRegistrar g_onnxSignParser("Sign", new OnnxTanhParser()); OnnxNodeRegistrar g_onnxAndParser("And", new OnnxAndParser()); OnnxNodeRegistrar g_onnxOrParser("Or", new OnnxOrParser()); OnnxNodeRegistrar g_onnxNotParser("Not", new OnnxNotParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.h index 7fc62cc306..557c91bcb4 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_arithmetic_operation_parser.h @@ -26,203 +26,224 @@ class OnnxAddParser : public OnnxNodeParser { public: OnnxAddParser() : OnnxNodeParser("Add") {} ~OnnxAddParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxSubParser : public OnnxNodeParser { public: OnnxSubParser() : OnnxNodeParser("Sub") {} ~OnnxSubParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxMulParser : public OnnxNodeParser { public: OnnxMulParser() : OnnxNodeParser("Mul") {} ~OnnxMulParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxDivParser : public OnnxNodeParser { public: OnnxDivParser() : OnnxNodeParser("Div") {} ~OnnxDivParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxPowParser : public OnnxNodeParser { public: OnnxPowParser() : OnnxNodeParser("Power") {} ~OnnxPowParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxEqualParser : public OnnxNodeParser { public: OnnxEqualParser() : OnnxNodeParser("Equal") {} ~OnnxEqualParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxLessParser : public OnnxNodeParser { public: OnnxLessParser() : OnnxNodeParser("Less") {} ~OnnxLessParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxGreaterParser : public OnnxNodeParser { public: OnnxGreaterParser() : OnnxNodeParser("Greater") {} ~OnnxGreaterParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxMinParser : public OnnxNodeParser { public: OnnxMinParser() : OnnxNodeParser("Min") {} ~OnnxMinParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; +}; + +class OnnxMaxParser : public OnnxNodeParser { + public: + OnnxMaxParser() : OnnxNodeParser("Max") {} + ~OnnxMaxParser() override = default; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxEltwiseParser : public OnnxNodeParser { public: OnnxEltwiseParser() : OnnxNodeParser("Eltwise") {} ~OnnxEltwiseParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxFloorParser : public OnnxNodeParser { public: OnnxFloorParser() : OnnxNodeParser("Floor") {} ~OnnxFloorParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxAbsParser : public OnnxNodeParser { public: OnnxAbsParser() : OnnxNodeParser("Abs") {} ~OnnxAbsParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxNegParser : public OnnxNodeParser { public: OnnxNegParser() : OnnxNodeParser("Neg") {} ~OnnxNegParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxExpParser : public OnnxNodeParser { public: OnnxExpParser() : OnnxNodeParser("Exp") {} ~OnnxExpParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxCosParser : public OnnxNodeParser { public: OnnxCosParser() : OnnxNodeParser("Cos") {} ~OnnxCosParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxSinParser : public OnnxNodeParser { public: OnnxSinParser() : OnnxNodeParser("Sin") {} ~OnnxSinParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxSqrtParser : public OnnxNodeParser { public: OnnxSqrtParser() : OnnxNodeParser("Sqrt") {} ~OnnxSqrtParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxCeilParser : public OnnxNodeParser { public: OnnxCeilParser() : OnnxNodeParser("Ceil") {} ~OnnxCeilParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxLogParser : public OnnxNodeParser { public: OnnxLogParser() : OnnxNodeParser("Log") {} ~OnnxLogParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxTanParser : public OnnxNodeParser { public: OnnxTanParser() : OnnxNodeParser("Tan") {} ~OnnxTanParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxAtanParser : public OnnxNodeParser { public: OnnxAtanParser() : OnnxNodeParser("Atan") {} ~OnnxAtanParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxAsinParser : public OnnxNodeParser { public: OnnxAsinParser() : OnnxNodeParser("Asin") {} ~OnnxAsinParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; -}; -class OnnxTanhParser : public OnnxNodeParser { - public: - OnnxTanhParser() : OnnxNodeParser("Tanh") {} - ~OnnxTanhParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; -}; - -class OnnxSignParser : public OnnxNodeParser { - public: - OnnxSignParser() : OnnxNodeParser("Sign") {} - ~OnnxSignParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxAndParser : public OnnxNodeParser { public: OnnxAndParser() : OnnxNodeParser("And") {} ~OnnxAndParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxOrParser : public OnnxNodeParser { public: OnnxOrParser() : OnnxNodeParser("Or") {} ~OnnxOrParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxNotParser : public OnnxNodeParser { public: OnnxNotParser() : OnnxNodeParser("Not") {} ~OnnxNotParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxRoundParser : public OnnxNodeParser { public: OnnxRoundParser() : OnnxNodeParser("Round") {} ~OnnxRoundParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; class OnnxReciprocalParser : public OnnxNodeParser { public: OnnxReciprocalParser() : OnnxNodeParser("Reciprocal") {} ~OnnxReciprocalParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.cc index 3ea9a670ed..dc5d9ae50f 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.cc @@ -16,35 +16,22 @@ #include "tools/converter/parser/onnx/onnx_batchnorm_parser.h" #include <memory> +#include "ops/fused_batch_norm.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxBatchNormParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx BatchNormParser"; - auto attr = std::make_unique<schema::FusedBatchNormT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxBatchNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::FusedBatchNorm>(); for (const auto &onnx_node_attr : onnx_node.attribute()) { if (onnx_node_attr.name() == "epsilon") { - attr->epsilon = onnx_node_attr.f(); + prim->set_epsilon(onnx_node_attr.f()); } else if (onnx_node_attr.name() == "momentum") { - attr->momentum = onnx_node_attr.f(); - } else if (onnx_node_attr.name() == "spatial") { - attr->spatial = static_cast<int32_t>(onnx_node_attr.i()); + prim->set_momentum(onnx_node_attr.f()); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_FusedBatchNorm; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } OnnxNodeRegistrar g_onnxBatchNormParser("BatchNormalization", new OnnxBatchNormParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.h index 18f2b7ee3c..fff6fcd4a2 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_batchnorm_parser.h @@ -27,7 +27,7 @@ class OnnxBatchNormParser : public OnnxNodeParser { OnnxBatchNormParser() : OnnxNodeParser("BatchNormalization") {} ~OnnxBatchNormParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.cc index 935c62f3e7..2337522d72 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.cc @@ -16,28 +16,13 @@ #include "tools/converter/parser/onnx/onnx_biasadd_parser.h" #include <memory> +#include "ops/bias_add.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxBiasAddParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx BiasAddParser"; - auto attr = std::make_unique<schema::BiasAddT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - attr->axis = {1}; - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_BiasAdd; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxBiasAddParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::BiasAdd>(); + return prim.release(); } OnnxNodeRegistrar g_onnxBiasAddParser("BiasAdd", new OnnxBiasAddParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.h index 01b15db53e..265ff970fe 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_biasadd_parser.h @@ -27,7 +27,7 @@ class OnnxBiasAddParser : public OnnxNodeParser { OnnxBiasAddParser() : OnnxNodeParser("BiasAdd") {} ~OnnxBiasAddParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.cc index 1a2a93cc07..a54dfc9273 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.cc @@ -17,17 +17,13 @@ #include "tools/converter/parser/onnx/onnx_cast_parser.h" #include "tools/converter/parser/onnx/onnx_model_parser.h" #include <memory> +#include "ops/cast.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxCastParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx CastParser"; - auto attr = std::make_unique<schema::CastT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } + +ops::PrimitiveC *OnnxCastParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Cast>(); for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); @@ -36,17 +32,11 @@ lite::PrimitiveC *OnnxCastParser::ParseLitePrimitive(const onnx::GraphProto &onn if (dst_type == kNumberTypeInt64) { dst_type = kNumberTypeInt32; } - attr->dstT = static_cast<int>(dst_type); + prim->AddAttr("to", MakeValue(static_cast<int32_t>(dst_type))); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Cast; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } OnnxNodeRegistrar g_onnxCastParser("Cast", new OnnxCastParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.h index 45389ce215..3bf67beb25 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_cast_parser.h @@ -27,7 +27,7 @@ class OnnxCastParser : public OnnxNodeParser { OnnxCastParser() : OnnxNodeParser("Cast") {} ~OnnxCastParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.cc index 3012b91c04..0cf2c5baa0 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.cc @@ -16,35 +16,25 @@ #include "tools/converter/parser/onnx/onnx_clip_parser.h" #include <memory> +#include "ops/clip.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxClipParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ClipParser"; - auto attr = std::make_unique<schema::ClipT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - attr->max = -1; - attr->min = -1; +ops::PrimitiveC *OnnxClipParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Clip>(); + + prim->set_min(-1); + prim->set_max(-1); for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "max") { - attr->max = onnx_node_attr.f(); + prim->set_max(onnx_node_attr.f()); } else if (attribute_name == "min") { - attr->min = onnx_node_attr.f(); + prim->set_min(onnx_node_attr.f()); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Clip; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } OnnxNodeRegistrar g_onnxClipParser("Clip", new OnnxClipParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.h index bd6dcb8d75..44c58fe04c 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_clip_parser.h @@ -27,7 +27,7 @@ class OnnxClipParser : public OnnxNodeParser { OnnxClipParser() : OnnxNodeParser("Clip") {} ~OnnxClipParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.cc index 4c83fa4992..8c51746057 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.cc @@ -16,32 +16,21 @@ #include "tools/converter/parser/onnx/onnx_concat_parser.h" #include <memory> +#include "ops/concat.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxConcatParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ConcatParser"; - auto attr = std::make_unique<schema::ConcatT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxConcatParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Concat>(); for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "axis") { - attr->axis = static_cast<int32_t>(onnx_node_attr.i()); + prim->set_axis(onnx_node_attr.i()); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Concat; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } OnnxNodeRegistrar g_onnxConcatParser("Concat", new OnnxConcatParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.h index ccab17ca15..fc12edd90f 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_concat_parser.h @@ -27,7 +27,7 @@ class OnnxConcatParser : public OnnxNodeParser { OnnxConcatParser() : OnnxNodeParser("Concat") {} ~OnnxConcatParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_constant_of_shape_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_constant_of_shape_parser.cc index dc1c930706..340518c13f 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_constant_of_shape_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_constant_of_shape_parser.cc @@ -16,53 +16,51 @@ #include "tools/converter/parser/onnx/onnx_constant_of_shape_parser.h" #include <memory> +#include <vector> #include "tools/converter/parser/onnx/onnx_model_parser.h" +#include "ops/constant_of_shape.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxConstantOfShapeParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ConstantOfShapeParser"; - auto attr = std::make_unique<schema::ConstantOfShapeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxConstantOfShapeParser::Parse(const onnx::GraphProto &onnx_graph, + const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::ConstantOfShape>(); + int data_type = 0; + std::vector<float> values; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "value") { switch (onnx_node_attr.type()) { case onnx::AttributeProto_AttributeType_FLOAT: - attr->dataType = OnnxModelParser::GetDataTypeFromOnnx(onnx::TensorProto_DataType_FLOAT); - attr->value.push_back(onnx_node_attr.f()); + data_type = OnnxModelParser::GetDataTypeFromOnnx(onnx::TensorProto_DataType_FLOAT); + values.push_back(onnx_node_attr.f()); break; case onnx::AttributeProto_AttributeType_INT: - attr->dataType = OnnxModelParser::GetDataTypeFromOnnx(onnx::TensorProto_DataType_INT32); - attr->value.push_back(static_cast<float>(onnx_node_attr.i())); + data_type = OnnxModelParser::GetDataTypeFromOnnx(onnx::TensorProto_DataType_INT32); + values.push_back(static_cast<float>(onnx_node_attr.i())); break; case onnx::AttributeProto_AttributeType_TENSOR: { const auto &tensor = onnx_node_attr.t(); - auto ret = GetTensorDataFromOnnx(tensor, &attr->value, &attr->dataType); + auto ret = GetTensorDataFromOnnx(tensor, &values, &data_type); if (ret != RET_OK) { MS_LOG(ERROR) << "get data from tensor failed"; return nullptr; } } break; default: - MS_LOG(ERROR) << "The data type is not supported."; + MS_LOG(ERROR) << "Datatype : " << onnx_node_attr.type() << " is not supported."; return nullptr; } } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; + if (values.empty()) { + values = {0}; } - primitive->value.type = schema::PrimitiveType_ConstantOfShape; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->set_value(values); + prim->set_data_type((int64_t)data_type); + + return prim.release(); } OnnxNodeRegistrar g_onnxConstantOfShapeParser("ConstantOfShape", new OnnxConstantOfShapeParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_constant_of_shape_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_constant_of_shape_parser.h index 09e5d4a1b5..2cabe1e0be 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_constant_of_shape_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_constant_of_shape_parser.h @@ -27,7 +27,7 @@ class OnnxConstantOfShapeParser : public OnnxNodeParser { OnnxConstantOfShapeParser() : OnnxNodeParser("ConstantOfShape") {} ~OnnxConstantOfShapeParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.cc index 51b8b04da0..253570c78e 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.cc @@ -15,14 +15,16 @@ */ #include "tools/converter/parser/onnx/onnx_constant_parser.h" -#include <memory> #include <vector> +#include <memory> #include <algorithm> #include "tools/converter/parser/onnx/onnx_model_parser.h" +#include "ops/constant.h" +#include "src/param_value_lite.h" namespace mindspore { namespace lite { -STATUS OnnxConstantParser::AddDataInfoAttr(const onnx::TensorProto &onnx_const_tensor, lite::PrimitiveC *primitive_c) { +STATUS OnnxConstantParser::AddDataInfoAttr(const onnx::TensorProto &onnx_const_tensor, ops::PrimitiveC *prim) { ParamValueLitePtr param_value = std::make_shared<ParamValueLite>(); if (param_value == nullptr) { MS_LOG(ERROR) << "new a paramValueLite failed."; @@ -46,24 +48,13 @@ STATUS OnnxConstantParser::AddDataInfoAttr(const onnx::TensorProto &onnx_const_t MS_LOG(ERROR) << "get value failed."; return RET_ERROR; } - primitive_c->set_attr("const_data", param_value); + prim->set_attr("const_data", param_value); return RET_OK; } -lite::PrimitiveC *OnnxConstantParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ConstantParser"; - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Constant; - auto primitive_c = PrimitiveC::Create(primitive.release()); - if (primitive_c == nullptr) { - MS_LOG(ERROR) << "create primitiveC failed."; - return nullptr; - } +ops::PrimitiveC *OnnxConstantParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Constant>(); + for (const auto &attr : onnx_node.attribute()) { if (attr.name() == "sparse_value") { MS_LOG(WARNING) << "sparse_value"; @@ -71,18 +62,16 @@ lite::PrimitiveC *OnnxConstantParser::ParseLitePrimitive(const onnx::GraphProto } if (attr.name() == "value") { const auto &const_tensor = attr.t(); - if (AddDataInfoAttr(const_tensor, primitive_c) != RET_OK) { + if (AddDataInfoAttr(const_tensor, prim.get()) != RET_OK) { MS_LOG(ERROR) << "add basic attr failed."; - delete primitive_c; return nullptr; } } else { MS_LOG(ERROR) << "processing Constant op attr " << attr.name() << " not implemented"; - delete primitive_c; return nullptr; } } - return primitive_c; + return prim.release(); } OnnxNodeRegistrar g_onnxConstantParser("Constant", new OnnxConstantParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.h index d58736bf91..6147e16392 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_constant_parser.h @@ -27,8 +27,9 @@ class OnnxConstantParser : public OnnxNodeParser { OnnxConstantParser() : OnnxNodeParser("Constant") {} ~OnnxConstantParser() override = default; - STATUS AddDataInfoAttr(const onnx::TensorProto &onnx_const_tensor, lite::PrimitiveC *primitive_c); - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + STATUS AddDataInfoAttr(const onnx::TensorProto &onnx_const_tensor, ops::PrimitiveC *prim); }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc index d146f43af4..f8ccdceb1a 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.cc @@ -18,111 +18,57 @@ #include <algorithm> #include <memory> #include <vector> +#include <string> +#include "ops/fusion/conv2d_fusion.h" namespace mindspore::lite { -bool OnnxConvParser::ParseGroupConvolution(const std::unique_ptr<schema::Conv2DT> &attr, - schema::PrimitiveT *primitive) { - MS_LOG(DEBUG) << "onnx DepthwiseConvParser"; - if (attr == nullptr || primitive == nullptr) { - MS_LOG(ERROR) << "input parameter is nullptr"; - return false; - } - auto depthwiseConv2DParam = std::make_unique<schema::DepthwiseConv2DT>(); - if (depthwiseConv2DParam == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return false; - } - depthwiseConv2DParam->format = attr->format; - depthwiseConv2DParam->channelIn = attr->channelIn; - depthwiseConv2DParam->channelMultiplier = attr->channelOut / attr->channelIn; - depthwiseConv2DParam->kernelW = attr->kernelW; - depthwiseConv2DParam->kernelH = attr->kernelH; - depthwiseConv2DParam->strideW = attr->strideW; - depthwiseConv2DParam->strideH = attr->strideH; - depthwiseConv2DParam->padMode = attr->padMode; - depthwiseConv2DParam->padUp = attr->padUp; - depthwiseConv2DParam->padDown = attr->padDown; - depthwiseConv2DParam->padLeft = attr->padLeft; - depthwiseConv2DParam->padRight = attr->padRight; - depthwiseConv2DParam->dilateW = attr->dilateW; - depthwiseConv2DParam->dilateH = attr->dilateH; - depthwiseConv2DParam->activationType = attr->activationType; - - primitive->value.type = schema::PrimitiveType_DepthwiseConv2D; - primitive->value.value = depthwiseConv2DParam.release(); - return true; -} - -lite::PrimitiveC *OnnxConvParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ConvParser"; - auto attr = std::make_unique<schema::Conv2DT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - attr->strideH = 1; - attr->strideW = 1; - attr->dilateH = 1; - attr->dilateW = 1; - attr->group = 1; - attr->padMode = schema::PadMode_NOTSET; - attr->format = schema::Format::Format_NCHW; - - // set opdef each attr params +STATUS ParseVecAttr(const onnx::NodeProto &onnx_node, std::vector<int64_t> *kernels, std::vector<int64_t> *strides, + std::vector<int64_t> *dilation, std::vector<int64_t> *pads) { for (const auto &onnx_node_attr : onnx_node.attribute()) { - if (onnx_node_attr.name() == "group") { - attr->group = static_cast<int32_t>(onnx_node_attr.i()); - } else if (onnx_node_attr.name() == "dilations") { + if (onnx_node_attr.name() == "dilations") { if (onnx_node_attr.ints().size() != 2) { MS_LOG(ERROR) << "dilations size " << onnx_node_attr.ints().size() << " is not 2"; - return nullptr; + return RET_ERROR; } - attr->dilateH = static_cast<int32_t>(onnx_node_attr.ints(0)); - attr->dilateW = static_cast<int32_t>(onnx_node_attr.ints(1)); + dilation->push_back(onnx_node_attr.ints(0)); + dilation->push_back(onnx_node_attr.ints(1)); } else if (onnx_node_attr.name() == "kernels") { if (onnx_node_attr.ints().size() != 2) { MS_LOG(ERROR) << "kernel_shape size " << onnx_node_attr.ints().size() << " is not 2"; - return nullptr; + return RET_ERROR; } - attr->kernelH = static_cast<int32_t>(onnx_node_attr.ints(0)); - attr->kernelW = static_cast<int32_t>(onnx_node_attr.ints(1)); + kernels->push_back(onnx_node_attr.ints(0)); + kernels->push_back(onnx_node_attr.ints(1)); } else if (onnx_node_attr.name() == "kernel_shape") { if (onnx_node_attr.ints().size() != 2) { MS_LOG(ERROR) << "kernel_shape size " << onnx_node_attr.ints().size() << " is not 2"; - return nullptr; + return RET_ERROR; } - attr->kernelH = static_cast<int32_t>(onnx_node_attr.ints(0)); - attr->kernelW = static_cast<int32_t>(onnx_node_attr.ints(1)); - } else if (onnx_node_attr.name() == "auto_pad") { - attr->padMode = GetOnnxPadMode(onnx_node_attr); + kernels->push_back(onnx_node_attr.ints(0)); + kernels->push_back(onnx_node_attr.ints(1)); } else if (onnx_node_attr.name() == "pads") { if (onnx_node_attr.ints().size() != 4) { MS_LOG(ERROR) << "pads size " << onnx_node_attr.ints().size() << " is not 4"; - return nullptr; + return RET_ERROR; } - attr->padUp = static_cast<int32_t>(onnx_node_attr.ints(0)); - attr->padLeft = static_cast<int32_t>(onnx_node_attr.ints(1)); - attr->padDown = static_cast<int32_t>(onnx_node_attr.ints(2)); - attr->padRight = static_cast<int32_t>(onnx_node_attr.ints(3)); + pads->push_back(onnx_node_attr.ints(0)); + pads->push_back(onnx_node_attr.ints(2)); + pads->push_back(onnx_node_attr.ints(1)); + pads->push_back(onnx_node_attr.ints(3)); } else if (onnx_node_attr.name() == "strides") { if (onnx_node_attr.ints().size() != 2) { MS_LOG(ERROR) << "strides size " << onnx_node_attr.ints().size() << " is not 2"; - return nullptr; - } - attr->strideH = static_cast<int32_t>(onnx_node_attr.ints(0)); - attr->strideW = static_cast<int32_t>(onnx_node_attr.ints(1)); - } else if (onnx_node_attr.name() == "order") { - if (onnx_node_attr.s() == "NHWC") { - attr->format = schema::Format::Format_NHWC; - } else { - MS_LOG(ERROR) << "Unsupported format: " << onnx_node_attr.s(); - return nullptr; + return RET_ERROR; } + strides->push_back(onnx_node_attr.ints(0)); + strides->push_back(onnx_node_attr.ints(1)); } } + return RET_OK; +} +STATUS GetConvChannel(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node, int64_t group, + int64_t *channel_out, int64_t *channel_in) { const auto &onnx_conv_weight = onnx_node.input(1); if (onnx_node.op_type() == "Conv") { auto node_iter = @@ -137,8 +83,8 @@ lite::PrimitiveC *OnnxConvParser::ParseLitePrimitive(const onnx::GraphProto &onn for (int i = 0; i < size; ++i) { weight_shape.emplace_back((*node_iter).dims(i)); } - attr->channelOut = weight_shape[0]; - attr->channelIn = weight_shape[1] * attr->group; + *channel_out = weight_shape[0]; + *channel_in = weight_shape[1] * group; } } else { auto node_iter = @@ -146,7 +92,7 @@ lite::PrimitiveC *OnnxConvParser::ParseLitePrimitive(const onnx::GraphProto &onn [onnx_conv_weight](const onnx::NodeProto &proto) { return proto.output(0) == onnx_conv_weight; }); if (node_iter == onnx_graph.node().end()) { MS_LOG(ERROR) << "can not find node: " << onnx_conv_weight; - return nullptr; + return RET_ERROR; } std::vector<int> dims; auto iter = std::find_if((*node_iter).attribute().begin(), (*node_iter).attribute().end(), @@ -154,34 +100,85 @@ lite::PrimitiveC *OnnxConvParser::ParseLitePrimitive(const onnx::GraphProto &onn if (iter != (*node_iter).attribute().end()) { if (iter->ints().begin() == nullptr || iter->ints().end() == nullptr) { MS_LOG(ERROR) << "dims insert failed"; - return nullptr; + return RET_ERROR; } dims.insert(dims.begin(), iter->ints().begin(), iter->ints().end()); } - attr->channelOut = dims.at(0); - attr->channelIn = dims.at(3) * attr->group; + *channel_out = dims.at(0); + *channel_in = dims.at(3) * group; } - if (onnx_node.op_type() == "ConvRelu" || onnx_node.op_type() == "Int8ConvRelu") { - attr->activationType = schema::ActivationType_RELU; + return RET_OK; +} + +ops::PrimitiveC *OnnxConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Conv2DFusion>(); + + prim->set_pad({0, 0, 0, 0}); + mindspore::Format format = mindspore::Format::NCHW; + mindspore::PadMode pad_mode = mindspore::PadMode::PAD; + int64_t channel_out = 1, channel_in = 1, group = 1; + std::vector<int64_t> kernels, strides, dilation, pads; + + for (const auto &onnx_node_attr : onnx_node.attribute()) { + if (onnx_node_attr.name() == "group") { + group = onnx_node_attr.i(); + } else if (onnx_node_attr.name() == "auto_pad") { + pad_mode = GetOnnxPadMode(onnx_node_attr); + } else if (onnx_node_attr.name() == "order" && onnx_node_attr.s() != "NHWC") { + MS_LOG(ERROR) << "Unsupported format: " << onnx_node_attr.s(); + return nullptr; + } else if (onnx_node_attr.name() == "order") { + if (onnx_node_attr.s() == "NHWC") { + format = mindspore::Format::NHWC; + } else { + MS_LOG(ERROR) << "Unsupported format: " << onnx_node_attr.s(); + return nullptr; + } + } + } + prim->set_format(format); + prim->set_pad_mode(pad_mode); + prim->set_group(group); + + if (ParseVecAttr(onnx_node, &kernels, &strides, &dilation, &pads) != RET_OK) { + return nullptr; + } + if (dilation.empty()) { + prim->set_dilation({1, 1}); } else { - attr->activationType = schema::ActivationType_NO_ACTIVATION; + prim->set_dilation(dilation); + } + if (pads.empty()) { + prim->set_pad_list({0, 0, 0, 0}); + } else { + prim->set_pad_list(pads); + } + if (!kernels.empty()) { + prim->set_kernel_size(kernels); + } + if (!strides.empty()) { + prim->set_stride(strides); } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; + // get channel_out and channel_in + if (GetConvChannel(onnx_graph, onnx_node, group, &channel_out, &channel_in) != RET_OK) { return nullptr; } - if (attr->group == attr->channelIn && attr->channelIn == attr->channelOut) { - if (!ParseGroupConvolution(attr, primitive.get())) { - MS_LOG(ERROR) << "Convert Convolution to Depthwise failed"; - return nullptr; - } + prim->set_in_channel(channel_in); + prim->set_out_channel(channel_out); + + // parse activationType + if (onnx_node.op_type() == "ConvRelu" || onnx_node.op_type() == "Int8ConvRelu") { + prim->set_activation_type(mindspore::ActivationType::RELU); } else { - primitive->value.type = schema::PrimitiveType_Conv2D; - primitive->value.value = attr.release(); + prim->set_activation_type(mindspore::ActivationType::NO_ACTIVATION); } - return PrimitiveC::Create(primitive.release()); + + if (group == channel_in && channel_in == channel_out) { + prim->AddAttr(ops::kIsDepthWise, MakeValue<bool>(true)); + } + + return prim.release(); } OnnxNodeRegistrar g_onnxConvParser("Conv", new OnnxConvParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.h index 9f9987e1ae..baee30e253 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_parser.h @@ -18,8 +18,10 @@ #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_CONV_PARSER_H #include <memory> +#include <vector> #include "tools/converter/parser/onnx/onnx_node_parser.h" #include "tools/converter/parser/onnx/onnx_node_parser_registry.h" +#include "ops/primitive_c.h" namespace mindspore { namespace lite { @@ -28,11 +30,11 @@ class OnnxConvParser : public OnnxNodeParser { OnnxConvParser() : OnnxNodeParser("Conv") {} ~OnnxConvParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; - - private: - static bool ParseGroupConvolution(const std::unique_ptr<schema::Conv2DT> &attr, schema::PrimitiveT *primitive); + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; + +STATUS ParseVecAttr(const onnx::NodeProto &onnx_node, std::vector<int64_t> *kernels, std::vector<int64_t> *strides, + std::vector<int64_t> *dilation, std::vector<int64_t> *pads); } // namespace lite } // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_CONV_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_transpose_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_transpose_parser.cc new file mode 100644 index 0000000000..62fb554c3f --- /dev/null +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_transpose_parser.cc @@ -0,0 +1,99 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tools/converter/parser/onnx/onnx_conv_transpose_parser.h" +#include <vector> +#include <memory> +#include <algorithm> +#include "ops/fusion/conv2d_transpose_fusion.h" +#include "tools/converter/parser/onnx/onnx_conv_parser.h" + +namespace mindspore { +namespace lite { +ops::PrimitiveC *OnnxDeConvParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Conv2dTransposeFusion>(); + + prim->set_pad({0, 0, 0, 0}); + mindspore::PadMode pad_mode = mindspore::PadMode::PAD; + std::vector<int64_t> kernel, dilate, stride, pads, output_padding_h, output_padding_w; + int64_t group = 1; + for (const auto &onnx_node_attr : onnx_node.attribute()) { + if (onnx_node_attr.name() == "group") { + group = onnx_node_attr.i(); + } else if (onnx_node_attr.name() == "auto_pad") { + pad_mode = GetOnnxPadMode(onnx_node_attr); + } else if (onnx_node_attr.name() == "order" && onnx_node_attr.s() != "NHWC") { + MS_LOG(ERROR) << "Unsupported format: " << onnx_node_attr.s().c_str(); + return nullptr; + } + if (onnx_node_attr.name() == "output_padding") { + output_padding_h.push_back(static_cast<int32_t>(onnx_node_attr.ints(0))); + output_padding_w.push_back(static_cast<int32_t>(onnx_node_attr.ints(1))); + prim->set_output_padding_h(output_padding_h); + prim->set_output_padding_w(output_padding_w); + } + } + prim->set_format(mindspore::Format::NCHW); + prim->set_group(group); + prim->set_pad_mode(pad_mode); + + if (ParseVecAttr(onnx_node, &kernel, &stride, &dilate, &pads) != RET_OK) { + return nullptr; + } + if (!dilate.empty()) { + prim->set_dilation(dilate); + } + if (!pads.empty()) { + prim->set_pad_list(pads); + } + if (!kernel.empty()) { + prim->set_kernel_size(kernel); + } + if (!stride.empty()) { + prim->set_stride(stride); + } + + const auto &onnx_conv_weight = onnx_node.input(1); + auto node_iter = + std::find_if(onnx_graph.initializer().begin(), onnx_graph.initializer().end(), + [onnx_conv_weight](const onnx::TensorProto &proto) { return proto.name() == onnx_conv_weight; }); + if (node_iter == onnx_graph.initializer().end()) { + MS_LOG(ERROR) << "not find node: " << onnx_conv_weight.c_str(); + return nullptr; + } + std::vector<int> weight_shape; + auto size = (*node_iter).dims_size(); + weight_shape.reserve(size); + for (int i = 0; i < size; ++i) { + weight_shape.emplace_back((*node_iter).dims(i)); + } + if (weight_shape.size() != 4) { + MS_LOG(ERROR) << "weight_shape.size() should be 4, but is " << weight_shape.size(); + return nullptr; + } + prim->set_in_channel(weight_shape[0]); + prim->set_out_channel(weight_shape[1] * group); + + if (group != 1 && weight_shape[1] == 1) { + prim->AddAttr(ops::kIsDepthWise, MakeValue<bool>(true)); + } + + return prim.release(); +} + +OnnxNodeRegistrar g_onnxDeConvParser("ConvTranspose", new OnnxDeConvParser()); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_conv_transpose_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_transpose_parser.h new file mode 100644 index 0000000000..4a5a505263 --- /dev/null +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_conv_transpose_parser.h @@ -0,0 +1,35 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_DECONV_PARSER_H +#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_DECONV_PARSER_H + +#include <memory> +#include "tools/converter/parser/onnx/onnx_node_parser.h" +#include "tools/converter/parser/onnx/onnx_node_parser_registry.h" + +namespace mindspore { +namespace lite { +class OnnxDeConvParser : public OnnxNodeParser { + public: + OnnxDeConvParser() : OnnxNodeParser("DeConv") {} + ~OnnxDeConvParser() override = default; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; +}; +} // namespace lite +} // namespace mindspore +#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_DECONV_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_converter.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_converter.cc deleted file mode 100644 index c25c778964..0000000000 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_converter.cc +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/parser/onnx/onnx_converter.h" -#include "tools/converter/parser/onnx/onnx_model_parser.h" - -namespace mindspore { -namespace lite { -OnnxConverter::OnnxConverter() { modelParser = new OnnxModelParser(); } - -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_converter.h b/mindspore/lite/tools/converter/parser/onnx/onnx_converter.h index 3344ba7b13..4e253ed308 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_converter.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_converter.h @@ -21,14 +21,21 @@ #include <memory> #include "tools/converter/converter.h" #include "tools/converter/graphdef_transform.h" +#include "tools/converter/parser/onnx/onnx_model_parser.h" namespace mindspore { namespace lite { class OnnxConverter : public Converter { public: - OnnxConverter(); + OnnxConverter() = default; ~OnnxConverter() override = default; + + FuncGraphPtr BuildFuncGraph(const std::string &model_file, const std::string &weight_file, + schema::QuantType quant_type) override { + OnnxModelParser parser; + return parser.Parse(model_file, weight_file, quant_type); + } }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc deleted file mode 100644 index e44948df62..0000000000 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.cc +++ /dev/null @@ -1,180 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/parser/onnx/onnx_deconv_parser.h" -#include <vector> -#include <memory> -#include <algorithm> - -namespace mindspore { -namespace lite { -bool OnnxDeConvParser::ParseGroupDeConvolution(const std::unique_ptr<schema::DeConv2DT> &attr, - schema::PrimitiveT *primitive) { - if (attr == nullptr || attr->group != attr->channelOut || primitive == nullptr) { - MS_LOG(ERROR) << "input parameter is nullptr"; - return false; - } - auto deDepthwiseConv2DParam = std::make_unique<schema::DeDepthwiseConv2DT>(); - if (deDepthwiseConv2DParam == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return false; - } - deDepthwiseConv2DParam->format = attr->format; - deDepthwiseConv2DParam->channelIn = attr->channelIn; - deDepthwiseConv2DParam->channelMultiplier = attr->channelOut / attr->channelIn; - deDepthwiseConv2DParam->kernelW = attr->kernelW; - deDepthwiseConv2DParam->kernelH = attr->kernelH; - deDepthwiseConv2DParam->strideW = attr->strideW; - deDepthwiseConv2DParam->strideH = attr->strideH; - deDepthwiseConv2DParam->padMode = attr->padMode; - deDepthwiseConv2DParam->padUp = attr->padUp; - deDepthwiseConv2DParam->padDown = attr->padDown; - deDepthwiseConv2DParam->padLeft = attr->padLeft; - deDepthwiseConv2DParam->padRight = attr->padRight; - deDepthwiseConv2DParam->dilateW = attr->dilateW; - deDepthwiseConv2DParam->dilateH = attr->dilateH; - deDepthwiseConv2DParam->activationType = attr->activationType; - - primitive->value.type = schema::PrimitiveType_DeDepthwiseConv2D; - primitive->value.value = deDepthwiseConv2DParam.release(); - return true; -} - -int OnnxDeConvParser::ParseParameters(const onnx::NodeProto &onnx_node, - const std::unique_ptr<schema::DeConv2DT> &attr) { - attr->padMode = schema::PadMode_NOTSET; - attr->group = 1; - attr->strideW = 1; - attr->strideH = 1; - attr->dilateW = 1; - attr->dilateH = 1; - - for (const auto &onnx_node_attr : onnx_node.attribute()) { - if (onnx_node_attr.name() == "group") { - attr->group = static_cast<int32_t>(onnx_node_attr.i()); - } else if (onnx_node_attr.name() == "dilations") { - if (onnx_node_attr.ints().size() != 2) { - MS_LOG(ERROR) << "dilations size " << onnx_node_attr.ints().size() << " is not 2"; - return RET_ERROR; - } - attr->dilateH = static_cast<int32_t>(onnx_node_attr.ints(0)); - attr->dilateW = static_cast<int32_t>(onnx_node_attr.ints(1)); - } else if (onnx_node_attr.name() == "kernels") { - if (onnx_node_attr.ints().size() != 2) { - MS_LOG(ERROR) << "kernel_shape size " << onnx_node_attr.ints().size() << " is not 2"; - return RET_ERROR; - } - attr->kernelH = static_cast<int32_t>(onnx_node_attr.ints(0)); - attr->kernelW = static_cast<int32_t>(onnx_node_attr.ints(1)); - } else if (onnx_node_attr.name() == "kernel_shape") { - if (onnx_node_attr.ints().size() != 2) { - MS_LOG(ERROR) << "kernel_shape size " << onnx_node_attr.ints().size() << " is not 2"; - return RET_ERROR; - } - attr->kernelH = static_cast<int32_t>(onnx_node_attr.ints(0)); - attr->kernelW = static_cast<int32_t>(onnx_node_attr.ints(1)); - } else if (onnx_node_attr.name() == "auto_pad") { - attr->padMode = GetOnnxPadMode(onnx_node_attr); - } else if (onnx_node_attr.name() == "pads") { - if (onnx_node_attr.ints().size() != 4) { - MS_LOG(ERROR) << "pads size " << onnx_node_attr.ints().size() << " is not 4"; - return RET_ERROR; - } - attr->padUp = static_cast<int32_t>(onnx_node_attr.ints(0)); - attr->padLeft = static_cast<int32_t>(onnx_node_attr.ints(1)); - attr->padDown = static_cast<int32_t>(onnx_node_attr.ints(2)); - attr->padRight = static_cast<int32_t>(onnx_node_attr.ints(3)); - } else if (onnx_node_attr.name() == "strides") { - if (onnx_node_attr.ints().size() != 2) { - MS_LOG(ERROR) << "strides size " << onnx_node_attr.ints().size() << " is not 2"; - return RET_ERROR; - } - attr->strideH = static_cast<int32_t>(onnx_node_attr.ints(0)); - attr->strideW = static_cast<int32_t>(onnx_node_attr.ints(1)); - } else if (onnx_node_attr.name() == "order") { - if (onnx_node_attr.s() == "NHWC") { - attr->format = schema::Format::Format_NHWC; - } else { - MS_LOG(ERROR) << "Unsupported format: " << onnx_node_attr.s().c_str(); - return RET_ERROR; - } - } else if (onnx_node_attr.name() == "output_padding") { - attr->outputPaddingH = static_cast<int32_t>(onnx_node_attr.ints(0)); - attr->outputPaddingW = static_cast<int32_t>(onnx_node_attr.ints(1)); - } - } - return RET_OK; -} - -lite::PrimitiveC *OnnxDeConvParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx DeConvParser"; - auto attr = std::make_unique<schema::DeConv2DT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - auto status = ParseParameters(onnx_node, attr); - if (status != RET_OK) { - MS_LOG(ERROR) << "Parse parameters failed."; - return nullptr; - } - - const auto &onnx_conv_weight = onnx_node.input(1); - auto node_iter = - std::find_if(onnx_graph.initializer().begin(), onnx_graph.initializer().end(), - [onnx_conv_weight](const onnx::TensorProto &proto) { return proto.name() == onnx_conv_weight; }); - if (node_iter == onnx_graph.initializer().end()) { - MS_LOG(ERROR) << "not find node: " << onnx_conv_weight.c_str(); - return nullptr; - } - std::vector<int> weight_shape; - auto size = (*node_iter).dims_size(); - weight_shape.reserve(size); - for (int i = 0; i < size; ++i) { - weight_shape.emplace_back((*node_iter).dims(i)); - } - if (weight_shape.size() != 4) { - MS_LOG(ERROR) << "weight_shape.size() should be 4, but is " << weight_shape.size(); - return nullptr; - } - attr->channelIn = weight_shape[0]; - attr->channelOut = weight_shape[1] * attr->group; - - attr->format = schema::Format::Format_NCHW; - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - if (attr->group != 1) { - if (!ParseGroupDeConvolution(attr, primitive.get())) { - MS_LOG(ERROR) << "Convert DeConvolution to DeDepthwise failed, generalized group deconv hasn't support"; - return nullptr; - } - } else { - primitive->value.type = schema::PrimitiveType_DeConv2D; - primitive->value.value = attr.release(); - } - - return PrimitiveC::Create(primitive.release()); -} - -OnnxNodeRegistrar g_onnxDeConvParser("ConvTranspose", new OnnxDeConvParser()); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.h deleted file mode 100644 index a5f6c582cc..0000000000 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_deconv_parser.h +++ /dev/null @@ -1,40 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_DECONV_PARSER_H -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_DECONV_PARSER_H - -#include <memory> -#include "tools/converter/parser/onnx/onnx_node_parser.h" -#include "tools/converter/parser/onnx/onnx_node_parser_registry.h" - -namespace mindspore { -namespace lite { -class OnnxDeConvParser : public OnnxNodeParser { - public: - OnnxDeConvParser() : OnnxNodeParser("DeConv") {} - ~OnnxDeConvParser() override = default; - - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; - - private: - bool ParseGroupDeConvolution(const std::unique_ptr<schema::DeConv2DT> &attr, schema::PrimitiveT *primitive); - - int ParseParameters(const onnx::NodeProto &onnx_node, const std::unique_ptr<schema::DeConv2DT> &attr); -}; -} // namespace lite -} // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_DECONV_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.cc index 8ee5081967..b66d00c5e7 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.cc @@ -16,32 +16,21 @@ #include "tools/converter/parser/onnx/onnx_depth_to_space_parser.h" #include <memory> +#include "ops/depth_to_space.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxDepthToSpaceParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx DepthToSpaceParser"; - auto attr = std::make_unique<schema::DepthToSpaceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxDepthToSpaceParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::DepthToSpace>(); for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "blocksize") { - attr->blockSize = static_cast<int32_t>(onnx_node_attr.i()); + prim->set_block_size(onnx_node_attr.i()); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_DepthToSpace; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } OnnxNodeRegistrar g_onnxDepthToSpaceParser("DepthToSpace", new OnnxDepthToSpaceParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.h index a53623b799..3b32e96d40 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_depth_to_space_parser.h @@ -27,7 +27,7 @@ class OnnxDepthToSpaceParser : public OnnxNodeParser { OnnxDepthToSpaceParser() : OnnxNodeParser("DepthToSpace") {} ~OnnxDepthToSpaceParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.cc index b29778372e..5e28f2c672 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.cc @@ -16,32 +16,21 @@ #include "tools/converter/parser/onnx/onnx_dropout_parser.h" #include <memory> +#include "ops/dropout.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxDropoutParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx DropoutParser"; - auto attr = std::make_unique<schema::DropoutT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxDropoutParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Dropout>(); for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "ratio") { - attr->ratio = static_cast<float>(onnx_node_attr.f()); + prim->set_keep_prob(onnx_node_attr.f()); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Dropout; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } OnnxNodeRegistrar g_onnxDropoutParser("Dropout", new OnnxDropoutParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.h index c2c3ca0083..be6d33da5d 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_dropout_parser.h @@ -27,7 +27,7 @@ class OnnxDropoutParser : public OnnxNodeParser { OnnxDropoutParser() : OnnxNodeParser("Dropout") {} ~OnnxDropoutParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.cc deleted file mode 100644 index bcb77b8f1b..0000000000 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.cc +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/parser/onnx/onnx_elu_parser.h" -#include <memory> - -namespace mindspore { -namespace lite { -lite::PrimitiveC *OnnxEluParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx EluParser"; - auto attr = std::make_unique<schema::EluT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - for (const auto &onnx_node_attr : onnx_node.attribute()) { - const auto &attribute_name = onnx_node_attr.name(); - if (attribute_name == "alpha") { - attr->alpha = onnx_node_attr.f(); - } - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Elu; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); -} - -OnnxNodeRegistrar g_onnxEluParser("Elu", new OnnxEluParser()); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.h deleted file mode 100644 index 68a7037aed..0000000000 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_elu_parser.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_ELU_PARSER_H -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_ELU_PARSER_H - -#include "tools/converter/parser/onnx/onnx_node_parser.h" -#include "tools/converter/parser/onnx/onnx_node_parser_registry.h" - -namespace mindspore { -namespace lite { -class OnnxEluParser : public OnnxNodeParser { - public: - OnnxEluParser() : OnnxNodeParser("Elu") {} - ~OnnxEluParser() override = default; - - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; -}; -} // namespace lite -} // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_ELU_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_erf_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_erf_parser.cc index d8de36ab97..54d912f538 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_erf_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_erf_parser.cc @@ -15,26 +15,13 @@ */ #include "tools/converter/parser/onnx/onnx_erf_parser.h" #include <memory> +#include "ops/erf.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxErfParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ErfParser"; - auto attr = std::make_unique<schema::ErfT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Erf; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxErfParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Erf>(); + return prim.release(); } OnnxNodeRegistrar g_onnx_erf_parser("Erf", new OnnxErfParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_erf_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_erf_parser.h index 532337e4ec..bfdeb4d82e 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_erf_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_erf_parser.h @@ -26,7 +26,7 @@ class OnnxErfParser : public OnnxNodeParser { OnnxErfParser() : OnnxNodeParser("Erf") {} ~OnnxErfParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.cc index 9c76c278e1..53bb0065cc 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.cc @@ -17,19 +17,14 @@ #include "tools/converter/parser/onnx/onnx_expand_parser.h" #include <memory> #include <vector> +#include "ops/broadcast_to.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxExpandParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ExpandParser"; - auto attr = std::make_unique<schema::BroadcastToT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxExpandParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::BroadcastTo>(); - std::vector<int> dst_shape; + std::vector<int64_t> dst_shape; const auto &onnx_expand_power = onnx_node.input(1); auto node_iter = std::find_if(onnx_graph.node().begin(), onnx_graph.node().end(), @@ -47,15 +42,9 @@ lite::PrimitiveC *OnnxExpandParser::ParseLitePrimitive(const onnx::GraphProto &o } } } - attr->dst_shape = dst_shape; - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_BroadcastTo; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->set_shape(dst_shape); + + return prim.release(); } OnnxNodeRegistrar g_onnxExpandSpaceParser("Expand", new OnnxExpandParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.h index 7178aa2044..bb43d24b48 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_expand_parser.h @@ -27,7 +27,7 @@ class OnnxExpandParser : public OnnxNodeParser { OnnxExpandParser() : OnnxNodeParser("Expand") {} ~OnnxExpandParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.cc index 60be9c822e..a8599c5ee6 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.cc @@ -16,37 +16,13 @@ #include "tools/converter/parser/onnx/onnx_flatten_parser.h" #include <memory> +#include "ops/flatten.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxFlattenParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx FlattenParser"; - auto attr = std::make_unique<schema::ReshapeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - int axis = 1; - for (const auto &onnx_node_attr : onnx_node.attribute()) { - const auto &attribute_name = onnx_node_attr.name(); - if (attribute_name == "axis") { - axis = static_cast<int>(onnx_node_attr.i()); - } - } - for (int i = 0; i < axis; ++i) { - attr->shape.emplace_back(0); - } - attr->shape.emplace_back(-1); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Reshape; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxFlattenParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Flatten>(); + return prim.release(); } OnnxNodeRegistrar g_onnxFlattenParser("Flatten", new OnnxFlattenParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.h index 1b368f6705..8211f751f1 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_flatten_parser.h @@ -27,7 +27,7 @@ class OnnxFlattenParser : public OnnxNodeParser { OnnxFlattenParser() : OnnxNodeParser("Fatten") {} ~OnnxFlattenParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.cc index 3d02ca7d0d..81f7fd8f26 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.cc @@ -16,33 +16,23 @@ #include "tools/converter/parser/onnx/onnx_gather_parser.h" #include <memory> +#include "ops/gather.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxGatherParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx GatherParser"; - auto attr = std::make_unique<schema::GatherT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxGatherParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Gather>(); + int32_t axis = 0; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "axis") { - attr->axis = static_cast<int32_t>(onnx_node_attr.i()); + axis = static_cast<int32_t>(onnx_node_attr.i()); } } + prim->AddAttr("axis", MakeValue(axis)); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Gather; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } OnnxNodeRegistrar g_onnxGatherParser("Gather", new OnnxGatherParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.h index a1768bd398..f213c3643c 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_gather_parser.h @@ -27,7 +27,7 @@ class OnnxGatherParser : public OnnxNodeParser { OnnxGatherParser() : OnnxNodeParser("Gather") {} ~OnnxGatherParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_gemm_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_gemm_parser.cc index 884a34daff..82e7e2b719 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_gemm_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_gemm_parser.cc @@ -17,37 +17,30 @@ #include "tools/converter/parser/onnx/onnx_gemm_parser.h" #include <vector> #include <memory> +#include "ops/make_tuple.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxGemmParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx IdentityParser"; +ops::PrimitiveC *OnnxGemmParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::MakeTuple>(); + auto node_parser = OnnxNodeParserRegistry::GetInstance()->GetNodeParser("MatMul"); if (node_parser == nullptr) { MS_LOG(ERROR) << "parse node " << onnx_node.op_type() << " failed."; return nullptr; } - auto *matmul_primitive = node_parser->ParseLitePrimitive(onnx_graph, onnx_node); + auto *matmul_primitive = node_parser->Parse(onnx_graph, onnx_node); + prim->AddAttr("MatMul", std::shared_ptr<ops::PrimitiveC>(matmul_primitive)); node_parser = OnnxNodeParserRegistry::GetInstance()->GetNodeParser("BiasAdd"); if (node_parser == nullptr) { MS_LOG(ERROR) << "parse node " << onnx_node.op_type() << " failed."; return nullptr; } + auto *bias_add_primitive = node_parser->Parse(onnx_graph, onnx_node); + prim->AddAttr("BiasAdd", std::shared_ptr<ops::PrimitiveC>(bias_add_primitive)); - auto *bias_add_primitive = node_parser->ParseLitePrimitive(onnx_graph, onnx_node); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - - primitive->value.type = schema::PrimitiveType_MakeTuple; - auto primitve_c = PrimitiveC::Create(primitive.release()); - primitve_c->set_attr("MatMul", std::shared_ptr<lite::PrimitiveC>(matmul_primitive)); - primitve_c->set_attr("BiasAdd", std::shared_ptr<lite::PrimitiveC>(bias_add_primitive)); - return primitve_c; + return prim.release(); } OnnxNodeRegistrar g_onnxGemmParser("Gemm", new OnnxGemmParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_gemm_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_gemm_parser.h index 4424d2ea6b..948deca088 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_gemm_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_gemm_parser.h @@ -27,7 +27,7 @@ class OnnxGemmParser : public OnnxNodeParser { OnnxGemmParser() : OnnxNodeParser("Gemm") {} ~OnnxGemmParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_given_tensor_fill_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_given_tensor_fill_parser.cc index ec51aeef93..a172795242 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_given_tensor_fill_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_given_tensor_fill_parser.cc @@ -20,17 +20,14 @@ #include <vector> #include <algorithm> #include "src/param_value_lite.h" +#include "ops/constant.h" namespace mindspore { namespace lite { -STATUS OnnxGivenTensorFillParser::ParseInt8GivenIntTensorFill(const onnx::NodeProto &onnx_node, - lite::PrimitiveC *primitive_c, +STATUS OnnxGivenTensorFillParser::ParseInt8GivenIntTensorFill(const onnx::NodeProto &onnx_node, ops::PrimitiveC *prim, const std::vector<int> &shape) { ParamValueLitePtr param_value = std::make_shared<ParamValueLite>(); - if (param_value == nullptr) { - MS_LOG(ERROR) << "new a paramValueLite failed."; - return RET_ERROR; - } + int data_count = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>()); auto iter = std::find_if(onnx_node.attribute().begin(), onnx_node.attribute().end(), [](const onnx::AttributeProto &attr) { return attr.name() == "values"; }); @@ -57,18 +54,14 @@ STATUS OnnxGivenTensorFillParser::ParseInt8GivenIntTensorFill(const onnx::NodePr param_value->set_format(schema::Format_NUM_OF_FORMAT); param_value->set_tensor_type(kNumberTypeInt64); param_value->SetTensorData(param_data, data_size); - primitive_c->set_attr("const_data", param_value); + prim->set_attr("const_data", param_value); return RET_OK; } -STATUS OnnxGivenTensorFillParser::ParseInt8GivenTensorFill(const onnx::NodeProto &onnx_node, - lite::PrimitiveC *primitive_c, +STATUS OnnxGivenTensorFillParser::ParseInt8GivenTensorFill(const onnx::NodeProto &onnx_node, ops::PrimitiveC *prim, const std::vector<int> &shape) { ParamValueLitePtr param_value = std::make_shared<ParamValueLite>(); - if (param_value == nullptr) { - MS_LOG(ERROR) << "new a paramValueLite failed."; - return RET_ERROR; - } + int data_count = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int64_t>()); auto iter = std::find_if(onnx_node.attribute().begin(), onnx_node.attribute().end(), [](const onnx::AttributeProto &attr) { return attr.name() == "values"; }); @@ -89,20 +82,13 @@ STATUS OnnxGivenTensorFillParser::ParseInt8GivenTensorFill(const onnx::NodeProto param_value->set_format(schema::Format_NUM_OF_FORMAT); param_value->set_tensor_type(kNumberTypeUInt8); param_value->SetTensorData(param_data, data_count); - primitive_c->set_attr("const_data", param_value); + prim->set_attr("const_data", param_value); return RET_OK; } +ops::PrimitiveC *OnnxGivenTensorFillParser::Parse(const onnx::GraphProto &onnx_graph, + const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Constant>(); -lite::PrimitiveC *OnnxGivenTensorFillParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx GivenTensorFillParser"; - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Constant; - auto primitive_c = PrimitiveC::Create(primitive.release()); std::vector<int64_t> shape_vector; auto iter = std::find_if(onnx_node.attribute().begin(), onnx_node.attribute().end(), [](const onnx::AttributeProto &attr) { return attr.name() == "shape"; }); @@ -113,17 +99,18 @@ lite::PrimitiveC *OnnxGivenTensorFillParser::ParseLitePrimitive(const onnx::Grap std::transform(shape_vector.begin(), shape_vector.end(), std::back_inserter(shape), [](const int64_t &val) { return static_cast<int32_t>(val); }); if (onnx_node.op_type() == "Int8GivenIntTensorFill") { - if (ParseInt8GivenIntTensorFill(onnx_node, primitive_c, shape) != RET_OK) { + if (ParseInt8GivenIntTensorFill(onnx_node, prim.get(), shape) != RET_OK) { MS_LOG(ERROR) << "given tensor fill parse failed."; return nullptr; } } else if (onnx_node.op_type() == "Int8GivenTensorFill") { - if (ParseInt8GivenTensorFill(onnx_node, primitive_c, shape) != RET_OK) { + if (ParseInt8GivenTensorFill(onnx_node, prim.get(), shape) != RET_OK) { MS_LOG(ERROR) << "given tensor fill parse failed."; return nullptr; } } - return primitive_c; + + return prim.release(); } OnnxNodeRegistrar g_onnxInt8GivenIntTensorFillParser("Int8GivenIntTensorFill", new OnnxGivenTensorFillParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_given_tensor_fill_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_given_tensor_fill_parser.h index 4a55f5659f..5a77282ff7 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_given_tensor_fill_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_given_tensor_fill_parser.h @@ -28,11 +28,12 @@ class OnnxGivenTensorFillParser : public OnnxNodeParser { OnnxGivenTensorFillParser() : OnnxNodeParser("GivenTensorFill") {} ~OnnxGivenTensorFillParser() override = default; - STATUS ParseInt8GivenIntTensorFill(const onnx::NodeProto &onnx_node, lite::PrimitiveC *primitive_c, + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + + STATUS ParseInt8GivenIntTensorFill(const onnx::NodeProto &onnx_node, ops::PrimitiveC *prim, const std::vector<int> &shape); - STATUS ParseInt8GivenTensorFill(const onnx::NodeProto &onnx_node, lite::PrimitiveC *primitive_c, + STATUS ParseInt8GivenTensorFill(const onnx::NodeProto &onnx_node, ops::PrimitiveC *prim, const std::vector<int> &shape); - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_identity_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_identity_parser.cc index 048f2f6521..63e1091629 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_identity_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_identity_parser.cc @@ -15,28 +15,15 @@ */ #include "tools/converter/parser/onnx/onnx_identity_parser.h" -#include <memory> #include <vector> +#include <memory> +#include "ops/identity.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxIdentityParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx IdentityParser"; - auto attr = std::make_unique<schema::IdentityT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Identity; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxIdentityParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Identity>(); + return prim.release(); } OnnxNodeRegistrar g_onnxIdentityParser("Identity", new OnnxIdentityParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_identity_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_identity_parser.h index 14dad740a9..4dea7165c1 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_identity_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_identity_parser.h @@ -27,7 +27,7 @@ class OnnxIdentityParser : public OnnxNodeParser { OnnxIdentityParser() : OnnxNodeParser("Identity") {} ~OnnxIdentityParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_if_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_if_parser.cc index b975afe0a6..0241933180 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_if_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_if_parser.cc @@ -17,26 +17,15 @@ #include "tools/converter/parser/onnx/onnx_if_parser.h" #include <memory> #include "tools/converter/parser/onnx/onnx_model_parser.h" +#include "ops/if.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxIfParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx IfParser"; - auto attr = std::make_unique<schema::IfT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_If; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxIfParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::If>(); + return prim.release(); } + OnnxNodeRegistrar g_onnxIfParser("If", new OnnxIfParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_if_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_if_parser.h index 8b3fc245db..2fe114bfbf 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_if_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_if_parser.h @@ -27,7 +27,7 @@ class OnnxIfParser : public OnnxNodeParser { OnnxIfParser() : OnnxNodeParser("If") {} ~OnnxIfParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_instance_norm_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_instance_norm_parser.cc index 7cfe142bb7..1bc1a1f1a1 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_instance_norm_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_instance_norm_parser.cc @@ -16,32 +16,21 @@ #include "tools/converter/parser/onnx/onnx_instance_norm_parser.h" #include <memory> +#include "ops/instance_norm.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxInstanceNormParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx InstanceNormParser"; - auto attr = std::make_unique<schema::InstanceNormT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxInstanceNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::InstanceNorm>(); if (!onnx_node.attribute().empty()) { auto onnx_node_attr = onnx_node.attribute().at(0); if (onnx_node_attr.name() == "epsilon") { - attr->epsilon = onnx_node_attr.f(); + prim->set_epsilon(onnx_node_attr.f()); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_InstanceNorm; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } OnnxNodeRegistrar g_onnxInstanceNormParser("InstanceNormalization", new OnnxInstanceNormParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_instance_norm_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_instance_norm_parser.h index 9979c36dab..155409bd09 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_instance_norm_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_instance_norm_parser.h @@ -27,7 +27,7 @@ class OnnxInstanceNormParser : public OnnxNodeParser { OnnxInstanceNormParser() : OnnxNodeParser("InstanceNorm") {} ~OnnxInstanceNormParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_loop_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_loop_parser.cc index ab1fbd1ef2..44aa53bd8f 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_loop_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_loop_parser.cc @@ -17,26 +17,15 @@ #include "tools/converter/parser/onnx/onnx_loop_parser.h" #include <memory> #include "tools/converter/parser/onnx/onnx_model_parser.h" +#include "ops/while.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxLoopParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx LoopParser"; - auto attr = std::make_unique<schema::WhileT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_While; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxLoopParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::While>(); + return prim.release(); } + OnnxNodeRegistrar g_onnxLoopParser("Loop", new OnnxLoopParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_loop_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_loop_parser.h index f584a994a8..700be8baf2 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_loop_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_loop_parser.h @@ -27,7 +27,7 @@ class OnnxLoopParser : public OnnxNodeParser { OnnxLoopParser() : OnnxNodeParser("Loop") {} ~OnnxLoopParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.cc index 773d81cf37..12a2bdf2d0 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.cc @@ -16,34 +16,25 @@ #include "tools/converter/parser/onnx/onnx_lp_norm_parser.h" #include <memory> +#include "ops/lp_normalization.h" -namespace mindspore::lite { -lite::PrimitiveC *OnnxLpNormParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx LpNormParser"; - auto attr = std::make_unique<schema::LpNormalizationT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +namespace mindspore { +namespace lite { +ops::PrimitiveC *OnnxLpNormParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::LpNormalization>(); for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "axis") { - attr->axis = onnx_node_attr.i(); + prim->set_axis(onnx_node_attr.i()); } else if (attribute_name == "p") { - attr->p = onnx_node_attr.i(); + prim->set_p(onnx_node_attr.i()); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_LpNormalization; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } OnnxNodeRegistrar g_onnxLpNormParser("LpNormalization", new OnnxLpNormParser()); -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.h index 9fa92f8be6..1beef0a78b 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_lp_norm_parser.h @@ -27,7 +27,7 @@ class OnnxLpNormParser : public OnnxNodeParser { OnnxLpNormParser() : OnnxNodeParser("LpNorm") {} ~OnnxLpNormParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.cc index 83a2bb08c5..0fa2db27aa 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.cc @@ -16,29 +16,26 @@ #include "tools/converter/parser/onnx/onnx_lrn_parser.h" #include <memory> +#include "ops/lrn.h" -namespace mindspore::lite { -lite::PrimitiveC *OnnxLrnParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx LrnParser"; - auto attr = std::make_unique<schema::LocalResponseNormalizationT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +namespace mindspore { +namespace lite { +ops::PrimitiveC *OnnxLrnParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::LRN>(); - int32_t size = 0; + int64_t size = 0; + float alpha = 0; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "alpha") { - attr->alpha = onnx_node_attr.f(); + alpha = onnx_node_attr.f(); } else if (attribute_name == "beta") { - attr->beta = onnx_node_attr.f(); + prim->set_beta(onnx_node_attr.f()); } else if (attribute_name == "bias") { - attr->bias = onnx_node_attr.f(); + prim->set_bias(onnx_node_attr.f()); } else if (attribute_name == "size") { - size = static_cast<int32_t>(onnx_node_attr.i()); - attr->depth_radius = size / 2; + size = onnx_node_attr.i(); + prim->set_depth_radius(size / 2); } } @@ -46,18 +43,13 @@ lite::PrimitiveC *OnnxLrnParser::ParseLitePrimitive(const onnx::GraphProto &onnx MS_LOG(ERROR) << "Divide-by-zero error."; return nullptr; } - attr->alpha /= size; + alpha /= size; + prim->set_alpha(alpha); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_LocalResponseNormalization; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } OnnxNodeRegistrar g_onnxLrnxParser("Lrn", new OnnxLrnParser()); OnnxNodeRegistrar g_onnxLRNxParser("LRN", new OnnxLrnParser()); -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.h index 347d13cb17..3fae8c0977 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_lrn_parser.h @@ -27,7 +27,7 @@ class OnnxLrnParser : public OnnxNodeParser { OnnxLrnParser() : OnnxNodeParser("Lrn") {} ~OnnxLrnParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_lstm_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_lstm_parser.cc index 09716c1318..b90684448e 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_lstm_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_lstm_parser.cc @@ -16,33 +16,33 @@ #include "tools/converter/parser/onnx/onnx_lstm_parser.h" #include <memory> +#include "ops/lstm.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxLstmParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx LstmParser"; - auto attr = std::make_unique<schema::LstmT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxLstmParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::LSTM>(); for (const auto &onnx_node_attr : onnx_node.attribute()) { if (onnx_node_attr.name() == "direction") { const auto &direction = onnx_node_attr.s(); - attr->bidirection = direction == "bidirectional"; + bool bidirectional = direction == "bidirectional"; + prim->set_bidirectional(bidirectional); + if (bidirectional) { + prim->set_num_directions(2); + } else { + prim->set_num_directions(1); + } + } else if (onnx_node_attr.name() == "hidden_size") { + prim->set_hidden_size(onnx_node_attr.i()); + } else if (onnx_node_attr.name() == "clip") { + prim->set_dropout(onnx_node_attr.f()); + } else if (onnx_node_attr.name() == "activations") { + prim->set_has_bias(true); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Lstm; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } OnnxNodeRegistrar g_onnxLstmParser("LSTM", new OnnxLstmParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_lstm_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_lstm_parser.h index 3be45c5b7e..5262188065 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_lstm_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_lstm_parser.h @@ -27,7 +27,7 @@ class OnnxLstmParser : public OnnxNodeParser { OnnxLstmParser() : OnnxNodeParser("LSTM") {} ~OnnxLstmParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.cc index 18fd66169f..dbb798bf1a 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.cc @@ -16,26 +16,21 @@ #include "tools/converter/parser/onnx/onnx_matmul_parser.h" #include <memory> +#include "ops/mat_mul.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxMatmulParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx MatMulParser"; - auto attr = std::make_unique<schema::MatMulT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxMatmulParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::MatMul>(); float alpha = 1.0f; float beta = 1.0f; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "transA") { - attr->transposeA = static_cast<bool>(onnx_node_attr.i()); + prim->set_transpose_a(static_cast<bool>(onnx_node_attr.i())); } else if (attribute_name == "transB") { - attr->transposeB = static_cast<bool>(onnx_node_attr.i()); + prim->set_transpose_b(static_cast<bool>(onnx_node_attr.i())); } else if (attribute_name == "alpha") { alpha = onnx_node_attr.f(); } else if (attribute_name == "beta") { @@ -47,14 +42,7 @@ lite::PrimitiveC *OnnxMatmulParser::ParseLitePrimitive(const onnx::GraphProto &o return nullptr; } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_MatMul; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } OnnxNodeRegistrar g_onnxMatmulParser("MatMul", new OnnxMatmulParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.h index 22af92f4f3..9d9e7ac6fa 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_matmul_parser.h @@ -27,7 +27,7 @@ class OnnxMatmulParser : public OnnxNodeParser { OnnxMatmulParser() : OnnxNodeParser("MatMul") {} ~OnnxMatmulParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc index e775793d8c..4986f123f9 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.cc @@ -16,12 +16,20 @@ #include "tools/converter/parser/onnx/onnx_model_parser.h" #include <algorithm> +#include <memory> #include <set> #include <unordered_map> #include <utility> #include "src/common/utils.h" #include "tools/common/graph_util.h" #include "tools/common/protobuf_utils.h" +#include "ops/return.h" +#include "ops/make_tuple.h" +#include "ops/tensor_list_stack.h" +#include "ops/tuple_get_item.h" +#include "ir/func_graph.h" +#include "src/param_value_lite.h" +#include "tools/converter/converter_flags.h" namespace mindspore { namespace lite { @@ -194,7 +202,9 @@ STATUS OnnxModelParser::ConvertNodes(const onnx::GraphProto &onnx_graph, const F if (status != RET_OK) { continue; } - auto primitive_c = node_parser->ParseLitePrimitive(onnx_graph, onnx_node); + + MS_LOG(INFO) << "parse op:" << onnx_node.op_type(); + auto primitive_c = node_parser->Parse(onnx_graph, onnx_node); if (primitive_c == nullptr) { MS_LOG(ERROR) << "parse node " << onnx_node.op_type() << " failed."; status = RET_ERROR; @@ -333,9 +343,9 @@ STATUS OnnxModelParser::ConvertGraphOutputs(const onnx::GraphProto &onnx_graph, std::vector<AnfNodePtr> return_inputs; if (onnx_graph.output_size() > 1) { std::vector<AnfNodePtr> make_tuple_inputs; - auto make_tuple_prim_ptr = GetMakeTuplePrim(); + auto make_tuple_prim_ptr = std::make_shared<ops::MakeTuple>(); if (make_tuple_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetMakeTuplePrim return nullptr"; + MS_LOG(ERROR) << "new MakeTuple failed"; return RET_NULL_PTR; } for (const auto &graph_out : onnx_graph.output()) { @@ -382,9 +392,9 @@ STATUS OnnxModelParser::BuildReturnNode(const FuncGraphPtr &anf_graph, const std MS_LOG(ERROR) << "parameter has null."; return RET_NULL_PTR; } - auto returnPrim = GetReturnPrim(); + auto returnPrim = std::make_shared<ops::Return>(); if (returnPrim == nullptr) { - MS_LOG(ERROR) << "GetReturnPrim return nullptr"; + MS_LOG(ERROR) << "new Return failed"; return RET_NULL_PTR; } auto returnCnode = anf_graph->NewCNode(returnPrim, return_inputs); @@ -398,7 +408,7 @@ STATUS OnnxModelParser::BuildReturnNode(const FuncGraphPtr &anf_graph, const std } STATUS OnnxModelParser::BuildCNode(const onnx::NodeProto &onnx_node, const FuncGraphPtr &anf_graph, std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, - std::vector<AnfNodePtr> *graph_inputs, lite::PrimitiveC *primitive_c, + std::vector<AnfNodePtr> *graph_inputs, ops::PrimitiveC *primitive_c, std::string loop_name) { if (primitive_c == nullptr || anf_graph == nullptr) { MS_LOG(ERROR) << "primitive_c is nullptr."; @@ -477,7 +487,7 @@ STATUS OnnxModelParser::BuildCNode(const onnx::NodeProto &onnx_node, const FuncG } } } - auto new_cnode = anf_graph->NewCNode(std::shared_ptr<lite::PrimitiveC>(primitive_c), op_inputs); + auto new_cnode = anf_graph->NewCNode(std::shared_ptr<ops::PrimitiveC>(primitive_c), op_inputs); if (new_cnode == nullptr) { MS_LOG(ERROR) << "new cnode error"; return RET_ERROR; @@ -506,9 +516,9 @@ STATUS OnnxModelParser::BuildOpOutputs(const onnx::NodeProto &onnx_node, const F std::vector<int64_t> shape_vector; auto type_ptr = TypeIdToType(kNumberTypeFloat32); abstract_list.emplace_back(std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector)); - auto tuple_get_item_prim_ptr = GetTupleGetItemPrim(); + auto tuple_get_item_prim_ptr = std::make_shared<ops::TupleGetItem>(); if (tuple_get_item_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetTupleGetItemPrim return nullptr"; + MS_LOG(ERROR) << "new TupleGetItem failed"; return RET_NULL_PTR; } auto tuple_get_item_prim = NewValueNode(tuple_get_item_prim_ptr); @@ -529,7 +539,7 @@ STATUS OnnxModelParser::BuildOpOutputs(const onnx::NodeProto &onnx_node, const F return RET_OK; } -STATUS OnnxModelParser::ConvertOpQuantParams(const onnx::NodeProto &onnx_node, lite::PrimitiveC *primitive_c) { +STATUS OnnxModelParser::ConvertOpQuantParams(const onnx::NodeProto &onnx_node, ops::PrimitiveC *primitive_c) { if (primitive_c == nullptr) { MS_LOG(ERROR) << "primitive_c is null, get quant params failed."; return RET_NULL_PTR; @@ -540,6 +550,7 @@ STATUS OnnxModelParser::ConvertOpQuantParams(const onnx::NodeProto &onnx_node, l return RET_ERROR; } // set input tensors + auto quant_params_holder = std::make_shared<QuantParamHolder>(); for (int i = 0; i < onnx_node.input_size(); ++i) { const auto &input_name = onnx_node.input(i); std::vector<schema::QuantParamT> quant_params; @@ -548,7 +559,7 @@ STATUS OnnxModelParser::ConvertOpQuantParams(const onnx::NodeProto &onnx_node, l MS_LOG(ERROR) << "set input tensor quant param failed."; return status; } - primitive_c->AddInputQuantParam(quant_params); + quant_params_holder->AddInputQuantParam(quant_params); } // set out tensors for (int i = 0; i < onnx_node.output_size(); ++i) { @@ -559,8 +570,9 @@ STATUS OnnxModelParser::ConvertOpQuantParams(const onnx::NodeProto &onnx_node, l MS_LOG(ERROR) << "set output tensor quant param failed."; return status; } - primitive_c->AddOutputQuantParam(quant_params); + quant_params_holder->AddOutputQuantParam(quant_params); } + primitive_c->AddAttr("quant_params", quant_params_holder); return RET_OK; } @@ -705,20 +717,19 @@ ParameterPtr CreateConstParamter(const FuncGraphPtr &anf_graph, int val) { return const_node; } -ValueNodePtr CreateValueNode(void *attr, const PrimitiveType &op_type) { - if (attr == nullptr) { - MS_LOG(ERROR) << "attr is nullptr"; +ValueNodePtr CreateValueNode(const PrimitiveType &op_type) { + auto node_type = schema::EnumNamePrimitiveType(op_type); + auto op_primc_fns = ops::OpPrimCRegister::GetInstance().GetPrimCMap(); + if (op_primc_fns.find(node_type) == op_primc_fns.end()) { + MS_LOG(ERROR) << "have no func to create primitive."; return nullptr; } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = op_type; - primitive->value.value = attr; - auto primitive_c = PrimitiveC::Create(primitive.release()); - if (primitive_c == nullptr) { - MS_LOG(ERROR) << "create primitivec nullptr"; + auto prim = op_primc_fns[node_type](); + if (prim == nullptr) { + MS_LOG(ERROR) << "cannot create primitive."; return nullptr; } - return NewValueNode(std::shared_ptr<PrimitiveC>(primitive_c)); + return NewValueNode(prim); } STATUS AddIterNumsUpdateEdge(const FuncGraphPtr &anf_graph, std::vector<AnfNodePtr> *return_new_inputs, @@ -729,9 +740,11 @@ STATUS AddIterNumsUpdateEdge(const FuncGraphPtr &anf_graph, std::vector<AnfNodeP return RET_NULL_PTR; } // trip_cout need -1 after every iteration - auto attr = std::make_unique<schema::SubT>(); - auto sub_value_node = CreateValueNode(attr.release(), schema::PrimitiveType_Sub); - + auto sub_value_node = CreateValueNode(schema::PrimitiveType_SubFusion); + if (sub_value_node == nullptr) { + MS_LOG(ERROR) << "create sub failed."; + return RET_NULL_PTR; + } auto &trip_cout_paramter = anf_nodes_map.at(trip_cout_name); if (trip_cout_paramter == nullptr) { MS_LOG(ERROR) << "trip_cout_paramter found failed"; @@ -762,13 +775,13 @@ STATUS OnnxModelParser::AddTensorListStackNode(const AnfNodePtr &root_while_node auto output_size = onnx_node.output_size(); auto &loop_output_name = onnx_node.output(output_size - act_outputs_num + j); auto &while_output_node = control_nodes_map_[loop_node_name]->at(loop_output_name); - auto stack_attr = std::make_unique<schema::TensorListStackT>(); - if (stack_attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + auto tensor_list_stack_prim = std::make_shared<ops::TensorListStack>(); + if (tensor_list_stack_prim == nullptr) { + MS_LOG(ERROR) << "create stack failed"; return RET_ERROR; } - stack_attr->numElements = -1; - auto stack_value_node = CreateValueNode(stack_attr.release(), schema::PrimitiveType_TensorListStack); + tensor_list_stack_prim->set_num_elements(-1); + auto stack_value_node = NewValueNode(tensor_list_stack_prim); std::vector<AnfNodePtr> stack_inputs = {stack_value_node, while_output_node, stack_elem_node}; auto tensorlist_stack_cnode = root_anf_graph->NewCNode(stack_inputs); if (tensorlist_stack_cnode == nullptr) { @@ -811,8 +824,11 @@ STATUS OnnxModelParser::AddTensorArrayEdge(const FuncGraphPtr &anf_graph, std::v item_index_parameter->set_abstract(root_item_index_parameter->abstract()); body_graph_inputs->emplace_back(item_index_parameter); // item index++ edge - auto add_attr = std::make_unique<schema::AddT>(); - auto add_value_node = CreateValueNode(add_attr.release(), schema::PrimitiveType_Add); + auto add_value_node = CreateValueNode(schema::PrimitiveType_AddFusion); + if (add_value_node == nullptr) { + MS_LOG(ERROR) << "create add failed."; + return RET_NULL_PTR; + } auto add_one_input = CreateConstParamter(anf_graph, 1); add_one_input->set_name(loop_node_name + "_const_placeholder_1"); std::vector<AnfNodePtr> add_inputs = {add_value_node, item_index_parameter, add_one_input}; @@ -842,11 +858,14 @@ STATUS OnnxModelParser::AddTensorArrayEdge(const FuncGraphPtr &anf_graph, std::v subgraph_tensor_array_input->set_name(loop_node_name + "_scan_outputs_tensorarray"); subgraph_tensor_array_input->set_abstract(abstract_tensor); body_graph_inputs->emplace_back(subgraph_tensor_array_input); - auto set_item_attr = std::make_unique<schema::TensorListSetItemT>(); // skip trip_count ,cond_out,loop_var,no_loop_var,place_holder, output auto loop_output_idx = return_new_inputs->size() - act_output_num + i; auto loop_output_node = (*return_new_inputs)[loop_output_idx]; - auto set_item_value_node = CreateValueNode(set_item_attr.release(), schema::PrimitiveType_TensorListSetItem); + auto set_item_value_node = CreateValueNode(schema::PrimitiveType_TensorListSetItem); + if (set_item_value_node == nullptr) { + MS_LOG(ERROR) << "create tensor list set item failed."; + return RET_NULL_PTR; + } std::vector<AnfNodePtr> set_item_inputs = {set_item_value_node, subgraph_tensor_array_input, item_index_parameter, loop_output_node}; auto tensorlist_setitem_cnode = anf_graph->NewCNode(set_item_inputs); @@ -973,8 +992,7 @@ STATUS OnnxModelParser::BuildCondGraph(const FuncGraphPtr &cond_graph, const Anf if (i == 0) { auto zero_parameter = CreateConstParamter(cond_graph, 0); zero_parameter->set_name(root_while_node->fullname_with_scope() + "_const_0"); - auto attr = std::make_unique<schema::LessT>(); - auto less_value_node = CreateValueNode(attr.release(), schema::PrimitiveType_Less); + auto less_value_node = CreateValueNode(schema::PrimitiveType_Less); std::vector<AnfNodePtr> less_inputs = {less_value_node, zero_parameter, input_paramter}; less_cnode = cond_graph->NewCNode(less_inputs); if (less_cnode == nullptr) { @@ -986,8 +1004,7 @@ STATUS OnnxModelParser::BuildCondGraph(const FuncGraphPtr &cond_graph, const Anf less_cnode->set_fullname_with_scope(cond_graph_name + "_less_cnode"); } if (i == 1) { - auto attr = std::make_unique<schema::LogicalAndT>(); - auto and_value_node = CreateValueNode(attr.release(), schema::PrimitiveType_LogicalAnd); + auto and_value_node = CreateValueNode(schema::PrimitiveType_LogicalAnd); std::vector<AnfNodePtr> and_inputs = {and_value_node, less_cnode, input_paramter}; auto and_cnode = cond_graph->NewCNode(and_inputs); if (and_cnode == nullptr) { @@ -1008,7 +1025,7 @@ STATUS OnnxModelParser::BuildCondGraph(const FuncGraphPtr &cond_graph, const Anf STATUS OnnxModelParser::ConvertSpecialOnnxNode(const onnx::NodeProto &onnx_node, const FuncGraphPtr &anf_graph, std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, - lite::PrimitiveC *primitive_c) { + ops::PrimitiveC *primitive_c) { if (primitive_c == nullptr || anf_graph == nullptr) { MS_LOG(ERROR) << "imitive_c is nullptr."; return RET_NULL_PTR; @@ -1026,7 +1043,7 @@ STATUS OnnxModelParser::ConvertSpecialOnnxNode(const onnx::NodeProto &onnx_node, STATUS OnnxModelParser::ConvertOnnxGemmNode(const onnx::NodeProto &onnx_node, const FuncGraphPtr &anf_graph, std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, - lite::PrimitiveC *primitive_c) { + ops::PrimitiveC *primitive_c) { if (primitive_c == nullptr || anf_graph == nullptr) { MS_LOG(ERROR) << "parameter has nullptr."; return RET_NULL_PTR; @@ -1054,7 +1071,7 @@ STATUS OnnxModelParser::ConvertOnnxGemmNode(const onnx::NodeProto &onnx_node, co STATUS OnnxModelParser::BuildCNodeForGemm(const onnx::NodeProto &onnx_node, const FuncGraphPtr &anf_graph, std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, - lite::PrimitiveC *primitive_c, const std::string &name) { + ops::PrimitiveC *primitive_c, const std::string &name) { if (primitive_c == nullptr || anf_graph == nullptr) { MS_LOG(ERROR) << "parameter has nullptr."; return RET_NULL_PTR; @@ -1065,14 +1082,16 @@ STATUS OnnxModelParser::BuildCNodeForGemm(const onnx::NodeProto &onnx_node, cons MS_LOG(ERROR) << "op parse failed."; return RET_NULL_PTR; } - auto prim_ptr = value->cast<std::shared_ptr<lite::PrimitiveC>>(); + auto prim_ptr = value->cast<std::shared_ptr<ops::PrimitiveC>>(); if (prim_ptr == nullptr) { - MS_LOG(ERROR) << "p parse failed."; + MS_LOG(ERROR) << "primitive parse failed."; return RET_NULL_PTR; } auto type_ptr = TypeIdToType(kTypeUnknown); std::vector<int64_t> shape_vector; std::vector<AnfNodePtr> op_inputs; + auto quant_params_holder = std::make_shared<QuantParamHolder>(); + auto quant_params_holder_origin = primitive_c->GetAttr("quant_params")->cast<QuantParamHolderPtr>(); if (name == "MatMul") { for (int i = 0; i < 2; ++i) { if (anf_nodes_map->find(onnx_node.input(i)) == anf_nodes_map->end()) { @@ -1080,10 +1099,10 @@ STATUS OnnxModelParser::BuildCNodeForGemm(const onnx::NodeProto &onnx_node, cons return RET_ERROR; } else { op_inputs.push_back(anf_nodes_map->at(onnx_node.input(i))); - prim_ptr->AddInputQuantParam(primitive_c->input_quant_params().at(i)); + quant_params_holder->AddInputQuantParam(quant_params_holder_origin->input_quant_params().at(i)); } } - prim_ptr->AddOutputQuantParam(std::vector<schema::QuantParamT>(1)); + quant_params_holder->AddOutputQuantParam(std::vector<schema::QuantParamT>(1)); auto new_cnode = anf_graph->NewCNode(prim_ptr, op_inputs); if (new_cnode == nullptr) { MS_LOG(ERROR) << "new cnode error"; @@ -1100,9 +1119,9 @@ STATUS OnnxModelParser::BuildCNodeForGemm(const onnx::NodeProto &onnx_node, cons } op_inputs.push_back(anf_nodes_map->at("Gemm_MatMul_" + onnx_node.output(0))); op_inputs.push_back(anf_nodes_map->at(onnx_node.input(2))); - prim_ptr->AddInputQuantParam(std::vector<schema::QuantParamT>(1)); - prim_ptr->AddInputQuantParam(primitive_c->input_quant_params().at(2)); - prim_ptr->AddOutputQuantParam(primitive_c->output_quant_params().front()); + quant_params_holder->AddInputQuantParam(std::vector<schema::QuantParamT>(1)); + quant_params_holder->AddInputQuantParam(quant_params_holder_origin->input_quant_params().at(2)); + quant_params_holder->AddOutputQuantParam(quant_params_holder_origin->output_quant_params().front()); auto new_cnode = anf_graph->NewCNode(prim_ptr, op_inputs); if (new_cnode == nullptr) { MS_LOG(ERROR) << "new cnode error"; diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.h index b9b2da6c01..122fac9a49 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_model_parser.h @@ -41,11 +41,6 @@ class OnnxModelParser : public ModelParser { ~OnnxModelParser() override = default; - MetaGraphT *ParseToFb(const std::string &model_file, const std::string &weight_file, - const QuantType &quant_type) override { - return nullptr; - } - FuncGraphPtr Parse(const std::string &model_file, const std::string &weight_file, const QuantType &quant_type) override; static TypeId GetDataTypeFromOnnx(onnx::TensorProto_DataType onnx_type); @@ -60,34 +55,35 @@ class OnnxModelParser : public ModelParser { STATUS ConvertOnnxGraph(const onnx::GraphProto &onnx_graph, const FuncGraphPtr &func_graph_ptr, std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, std::vector<AnfNodePtr> *graph_inputs, const std::string &root_node_name); - STATUS ConvertConstTensors(const onnx::GraphProto &onnx_graph, const FuncGraphPtr &func_graph_ptr, - std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map); - STATUS ConvertGraphInputs(const onnx::GraphProto &onnx_graph, const FuncGraphPtr &func_graph_ptr, - std::unordered_map<std::string, AnfNodePtr> *nodes_map); - STATUS ConvertGraphOutputs(const onnx::GraphProto &onnx_graph, const FuncGraphPtr &func_graph_ptr, - const std::unordered_map<std::string, AnfNodePtr> &anf_nodes_map); - STATUS BuildReturnNode(const FuncGraphPtr &func_graph_ptr, const std::vector<AnfNodePtr> &return_inputs); - STATUS BuildParameterNode(const ParameterPtr &parameter_node, const onnx::TensorProto &tensor); + static STATUS ConvertConstTensors(const onnx::GraphProto &onnx_graph, const FuncGraphPtr &func_graph_ptr, + std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map); + static STATUS ConvertGraphInputs(const onnx::GraphProto &onnx_graph, const FuncGraphPtr &func_graph_ptr, + std::unordered_map<std::string, AnfNodePtr> *nodes_map); + static STATUS ConvertGraphOutputs(const onnx::GraphProto &onnx_graph, const FuncGraphPtr &func_graph_ptr, + const std::unordered_map<std::string, AnfNodePtr> &anf_nodes_map); + static STATUS BuildReturnNode(const FuncGraphPtr &func_graph_ptr, const std::vector<AnfNodePtr> &return_inputs); + static STATUS BuildParameterNode(const ParameterPtr &parameter_node, const onnx::TensorProto &tensor); STATUS BuildParameterNodeForQuantParam(const void *data, const std::string &name, TypeId type); STATUS BuildCNode(const onnx::NodeProto &onnx_node, const FuncGraphPtr &func_graph_ptr, std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, std::vector<AnfNodePtr> *graph_inputs, - lite::PrimitiveC *primitive_c, std::string loop_name); - STATUS BuildOpOutputs(const onnx::NodeProto &onnx_node, const FuncGraphPtr &func_graph_ptr, - std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, const CNodePtr &cnode); - STATUS ConvertSpecialOnnxNode(const onnx::NodeProto &onnx_node, const FuncGraphPtr &func_graph_ptr, - std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, - lite::PrimitiveC *primitive_c); - STATUS ConvertOnnxGemmNode(const onnx::NodeProto &onnx_node, const FuncGraphPtr &func_graph_ptr, - std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, lite::PrimitiveC *primitive_c); - STATUS BuildCNodeForGemm(const onnx::NodeProto &onnx_node, const FuncGraphPtr &func_graph_ptr, - std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, lite::PrimitiveC *primitive_c, - const std::string &name); - STATUS ConvertOpQuantParams(const onnx::NodeProto &onnx_node, lite::PrimitiveC *primitive_c); + ops::PrimitiveC *primitive_c, std::string loop_name); + static STATUS BuildOpOutputs(const onnx::NodeProto &onnx_node, const FuncGraphPtr &func_graph_ptr, + std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, const CNodePtr &cnode); + static STATUS ConvertSpecialOnnxNode(const onnx::NodeProto &onnx_node, const FuncGraphPtr &func_graph_ptr, + std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, + ops::PrimitiveC *primitive_c); + static STATUS ConvertOnnxGemmNode(const onnx::NodeProto &onnx_node, const FuncGraphPtr &func_graph_ptr, + std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, + ops::PrimitiveC *primitive_c); + static STATUS BuildCNodeForGemm(const onnx::NodeProto &onnx_node, const FuncGraphPtr &func_graph_ptr, + std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, + ops::PrimitiveC *primitive_c, const std::string &name); + STATUS ConvertOpQuantParams(const onnx::NodeProto &onnx_node, ops::PrimitiveC *primitive_c); STATUS ParseQuantParam(const onnx::NodeProto &onnx_node); STATUS SetTensorQuantParam(const std::string &tensor_name, std::vector<QuantParamT> *quant_params); STATUS SetTensorQuantParamFromNode(const std::string &tensor_name, std::vector<QuantParamT> *quant_params); STATUS CopyTensorQuantParam(const std::string &tensor_name, QuantParamT *quant_param, bool scale_or_not); - bool IsSpecialOnnxNode(const onnx::NodeProto &onnx_node); + static bool IsSpecialOnnxNode(const onnx::NodeProto &onnx_node); STATUS ConvertLoopOnnxNode(const onnx::NodeProto &onnx_node, std::unordered_map<std::string, AnfNodePtr> *anf_nodes_map, const std::string &root_node_name); @@ -98,8 +94,8 @@ class OnnxModelParser : public ModelParser { int act_output_num); STATUS AddTensorListStackNode(const AnfNodePtr &root_while_node, const onnx::NodeProto &onnx_node, int act_output_num, int body_output_size); - STATUS BuildCondGraph(const FuncGraphPtr &cond_graph, const AnfNodePtr &root_while_node, int inputs_num, - const std::string &cond_graph_name); + static STATUS BuildCondGraph(const FuncGraphPtr &cond_graph, const AnfNodePtr &root_while_node, int inputs_num, + const std::string &cond_graph_name); STATUS ConvertIfSubgraph(const onnx::GraphProto &onnx_graph, const FuncGraphPtr &anf_graph, const std::string &subgrah_name, const std::string &if_node_name, const std::string &root_node_name); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc index 7da40b6f54..52bcd230b7 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.cc @@ -24,18 +24,36 @@ namespace mindspore { namespace lite { int OnnxNodeParser::opset_version_ = 0; -schema::PadMode OnnxNodeParser::GetOnnxPadMode(const onnx::AttributeProto &onnx_node_attr) { +mindspore::PadMode OnnxNodeParser::GetOnnxPadMode(const onnx::AttributeProto &onnx_node_attr) { if (onnx_node_attr.s() == "NOTSET") { - return schema::PadMode_NOTSET; + return mindspore::PadMode::PAD; + } else if (onnx_node_attr.s() == "SAME_UPPER" || onnx_node_attr.s() == "SAME_LOWER") { + return mindspore::PadMode::SAME; + } else if (onnx_node_attr.s() == "VALID") { + return mindspore::PadMode::VALID; + } else { + MS_LOG(ERROR) << "unsupported padMode"; + return mindspore::PadMode::PAD; + } +} + +STATUS OnnxNodeParser::GetPadMode(const onnx::AttributeProto &onnx_node_attr, std::string *mode) { + if (onnx_node_attr.s() == "NOTSET") { + *mode = "NOTSET"; + return RET_OK; } else if (onnx_node_attr.s() == "SAME_UPPER") { - return schema::PadMode_SAME_UPPER; + *mode = "SAME_UPPER"; + return RET_OK; } else if (onnx_node_attr.s() == "SAME_LOWER") { - return schema::PadMode_SAME_LOWER; + *mode = "SAME_LOWER"; + return RET_OK; } else if (onnx_node_attr.s() == "VALID") { - return schema::PadMode_VALID; + *mode = "VALID"; + return RET_OK; } else { MS_LOG(ERROR) << "unsupported padMode"; - return schema::PadMode_NOTSET; + *mode = "NOTSET"; + return RET_ERROR; } } diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.h index 222d972cf7..de73314005 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_node_parser.h @@ -20,13 +20,15 @@ #include <string> #include <utility> #include <vector> -#include "src/ops/primitive_c.h" #include "google/protobuf/message.h" #include "proto/onnx.pb.h" #include "include/errorcode.h" #include "src/common/log_adapter.h" #include "schema/inner/model_generated.h" #include "ir/dtype/type_id.h" +#include "ops/primitive_c.h" +#include "mindspore/core/utils/check_convert_utils.h" + namespace mindspore { namespace lite { class OnnxNodeParser { @@ -35,10 +37,9 @@ class OnnxNodeParser { virtual ~OnnxNodeParser() = default; - virtual lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) = 0; - - static STATUS GetTensorDataFromOnnx(const onnx::TensorProto &onnx_tensor, std::vector<float> *value, int *type); + virtual ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + return nullptr; + } static STATUS set_opset_version(int version) { opset_version_ = version; @@ -47,7 +48,11 @@ class OnnxNodeParser { static int opset_version() { return opset_version_; } protected: - static schema::PadMode GetOnnxPadMode(const onnx::AttributeProto &onnx_node_attr); + static mindspore::PadMode GetOnnxPadMode(const onnx::AttributeProto &onnx_node_attr); + + static STATUS GetPadMode(const onnx::AttributeProto &onnx_node_attr, std::string *mode); + + STATUS GetTensorDataFromOnnx(const onnx::TensorProto &onnx_tensor, std::vector<float> *value, int *type); static void Split(const std::string &src_str, std::vector<std::string> *dst_str, const std::string &chr); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_non_max_suppression_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_non_max_suppression_parser.cc index 8c4979e0cd..b87e3382e8 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_non_max_suppression_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_non_max_suppression_parser.cc @@ -16,35 +16,24 @@ #include "tools/converter/parser/onnx/onnx_non_max_suppression_parser.h" #include <memory> +#include "ops/non_max_suppression.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxNonMaxSuppressionParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx EluParser"; - auto attr = std::make_unique<schema::NonMaxSuppressionT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxNonMaxSuppressionParser::Parse(const onnx::GraphProto &onnx_graph, + const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::NonMaxSuppression>(); for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "center_point_box") { if (onnx_node_attr.has_i()) { - attr->centerPointBox = onnx_node_attr.i(); + prim->set_center_point_box(onnx_node_attr.i()); } } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_NonMaxSuppression; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } OnnxNodeRegistrar g_onnxNonMaxSuppressionParser("NonMaxSuppression", new OnnxNonMaxSuppressionParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_non_max_suppression_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_non_max_suppression_parser.h index 3f20c01296..6c51e67912 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_non_max_suppression_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_non_max_suppression_parser.h @@ -27,7 +27,7 @@ class OnnxNonMaxSuppressionParser : public OnnxNodeParser { OnnxNonMaxSuppressionParser() : OnnxNodeParser("NonMaxSuppression") {} ~OnnxNonMaxSuppressionParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_nonzero_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_nonzero_parser.cc index ce9fc1194d..f2167d94a6 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_nonzero_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_nonzero_parser.cc @@ -17,26 +17,15 @@ #include "tools/converter/parser/onnx/onnx_nonzero_parser.h" #include <memory> #include "tools/converter/parser/onnx/onnx_model_parser.h" +#include "ops/where.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxNonZeroParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx NonZeroParser"; - auto attr = std::make_unique<schema::WhereT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Where; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxNonZeroParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Where>(); + return prim.release(); } + OnnxNodeRegistrar g_onnxNonZeroParser("NonZero", new OnnxNonZeroParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_nonzero_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_nonzero_parser.h index a07a58ff80..a8ed83448f 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_nonzero_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_nonzero_parser.h @@ -27,7 +27,7 @@ class OnnxNonZeroParser : public OnnxNodeParser { OnnxNonZeroParser() : OnnxNodeParser("NonZero") {} ~OnnxNonZeroParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_onehot_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_onehot_parser.cc index 0ef4932291..6b40cf820d 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_onehot_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_onehot_parser.cc @@ -16,33 +16,21 @@ #include "tools/converter/parser/onnx/onnx_onehot_parser.h" #include <memory> +#include "ops/one_hot.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxOneHotParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx OneHotParser"; - auto attr = std::make_unique<schema::OneHotT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxOneHotParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::OneHot>(); for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "axis") { - attr->axis = static_cast<int32_t>(onnx_node_attr.i()); + prim->set_axis(onnx_node_attr.i()); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_OneHot; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } OnnxNodeRegistrar g_onnxOneHotParser("OneHot", new OnnxOneHotParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_onehot_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_onehot_parser.h index 394502e130..9ed0a6278b 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_onehot_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_onehot_parser.h @@ -27,7 +27,7 @@ class OnnxOneHotParser : public OnnxNodeParser { OnnxOneHotParser() : OnnxNodeParser("OneHot") {} ~OnnxOneHotParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.cc index 106a35e7c0..637aeab950 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.cc @@ -16,47 +16,41 @@ #include "tools/converter/parser/onnx/onnx_pad_parser.h" #include <memory> +#include <vector> +#include "ops/fusion/pad_fusion.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxPadParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx PadParser"; - auto attr = std::make_unique<schema::PadT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxPadParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::PadFusion>(); + mindspore::PaddingMode padding_mode; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "pads") { const int size = onnx_node_attr.ints_size(); - attr->paddings.resize(size); - for (int i = 0; i < size / 2; ++i) { - attr->paddings[i * 2] = static_cast<int32_t>(onnx_node_attr.ints(i)); - attr->paddings[i * 2 + 1] = static_cast<int32_t>(onnx_node_attr.ints(i + size / 2)); + std::vector<std::vector<int64_t>> paddings(size / 2, std::vector<int64_t>(2, 0)); + // begin1, begin2, begin3... end1, end2, end3... to + // begin1, end1, begin2, end2, begin3, end3... + for (int i = 0; i < size / 2; i++) { + paddings[i][0] = static_cast<int64_t>(onnx_node_attr.ints(i)); + paddings[i][1] = static_cast<int64_t>(onnx_node_attr.ints(i + size / 2)); } + prim->set_paddings(paddings); } else if (attribute_name == "mode") { const auto &mode = onnx_node_attr.s(); if (mode == "constant") { - attr->paddingMode = schema::PaddingMode_CONSTANT; + padding_mode = mindspore::PaddingMode::CONSTANT; } else if (mode == "reflect") { - attr->paddingMode = schema::PaddingMode_REFLECT; + padding_mode = mindspore::PaddingMode::REFLECT; } else if (mode == "edge") { - attr->paddingMode = schema::PaddingMode_SYMMETRIC; + padding_mode = mindspore::PaddingMode::SYMMETRIC; } + prim->set_padding_mode(padding_mode); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Pad; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } OnnxNodeRegistrar g_onnxPadParser("Pad", new OnnxPadParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.h index 4cdb8a0223..641b35f39a 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_pad_parser.h @@ -27,7 +27,7 @@ class OnnxPadParser : public OnnxNodeParser { OnnxPadParser() : OnnxNodeParser("Pad") {} ~OnnxPadParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.cc index cdfb103f58..4906179ba9 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.cc @@ -16,78 +16,129 @@ #include "tools/converter/parser/onnx/onnx_pool_parser.h" #include <memory> +#include <vector> +#include "ops/fusion/avg_pool_fusion.h" +#include "ops/fusion/max_pool_fusion.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxPoolParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx PoolParser"; - auto attr = std::make_unique<schema::PoolingT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; +ops::PrimitiveC *OnnxAvgPoolParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::AvgPoolFusion>(); + + prim->set_format(mindspore::Format::NCHW); + prim->set_pad_mode(mindspore::PadMode::PAD); + mindspore::RoundMode roundMode = mindspore::RoundMode::FLOOR; + std::vector<int64_t> kernels; + std::vector<int64_t> strides; + std::vector<int64_t> pads; + for (const auto &onnx_node_attr : onnx_node.attribute()) { + const auto &attribute_name = onnx_node_attr.name(); + if (attribute_name == "kernel_shape") { + if (onnx_node_attr.ints_size() == 2) { + kernels.push_back(onnx_node_attr.ints(0)); + kernels.push_back(onnx_node_attr.ints(1)); + prim->set_kernel_size(kernels); + } + } + if (attribute_name == "strides") { + if (onnx_node_attr.ints_size() == 2) { + strides.push_back(onnx_node_attr.ints(0)); + strides.push_back(onnx_node_attr.ints(1)); + } + } + if (attribute_name == "auto_pad") { + if (onnx_node_attr.s() == "SAME_UPPER") { + prim->set_pad_mode(mindspore::PadMode::SAME); + } else if (onnx_node_attr.s() == "SAME_LOWER") { + MS_LOG(ERROR) << "PadMode_SAME_LOWER is not supported now"; + return nullptr; + } + } + if (attribute_name == "pads") { + if (onnx_node_attr.ints_size() == 4) { + pads.push_back(onnx_node_attr.ints(0)); + pads.push_back(onnx_node_attr.ints(2)); + pads.push_back(onnx_node_attr.ints(1)); + pads.push_back(onnx_node_attr.ints(3)); + } + } + if (attribute_name == "ceil_mode") { + if (onnx_node_attr.i() == 0) { + roundMode = mindspore::RoundMode::FLOOR; + } else { + roundMode = mindspore::RoundMode::CEIL; + } + } + if (attribute_name == "dilations") { + MS_LOG(ERROR) << "pooling op not support dilations now"; + return nullptr; + } } + prim->set_round_mode(roundMode); - attr->format = schema::Format::Format_NCHW; - const auto &pool_type = onnx_node.op_type(); - if (pool_type == "MaxPool") { - attr->poolingMode = schema::PoolMode_MAX_POOLING; - attr->global = false; - } else if (pool_type == "AveragePool") { - attr->poolingMode = schema::PoolMode_MEAN_POOLING; - attr->global = false; - } else if (pool_type == "GlobalMaxPool") { - attr->poolingMode = schema::PoolMode_MAX_POOLING; - attr->global = true; - } else if (pool_type == "GlobalAveragePool") { - attr->poolingMode = schema::PoolMode_MEAN_POOLING; - attr->global = true; - } else if (pool_type == "Int8AveragePool") { - attr->poolingMode = schema::PoolMode_MEAN_POOLING; - attr->global = false; + if (strides.empty()) { + strides.push_back(1); + strides.push_back(1); + } + prim->set_strides(strides); + if (pads.empty()) { + pads = {0, 0, 0, 0}; + } + prim->set_pad(pads); + if (onnx_node.op_type() == "GlobalAveragePool") { + prim->set_global(true); } else { - MS_LOG(ERROR) << "Pooling param`s PoolingMode is not MAX either AVE. MindSpore support MAX and AVE only."; - return nullptr; + prim->set_global(false); } - attr->roundMode = schema::RoundMode_FLOOR; - attr->strideW = 1; - attr->strideH = 1; + return prim.release(); +} + +ops::PrimitiveC *OnnxMaxPoolParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::MaxPoolFusion>(); + + prim->set_format(mindspore::Format::NCHW); + mindspore::RoundMode roundMode = mindspore::RoundMode::FLOOR; + std::vector<int64_t> kernels; + std::vector<int64_t> strides; + std::vector<int64_t> pads; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "kernel_shape") { if (onnx_node_attr.ints_size() == 2) { - attr->windowH = static_cast<int32_t>(onnx_node_attr.ints(0)); - attr->windowW = static_cast<int32_t>(onnx_node_attr.ints(1)); + kernels.push_back(onnx_node_attr.ints(0)); + kernels.push_back(onnx_node_attr.ints(1)); + prim->set_kernel_size(kernels); } } if (attribute_name == "strides") { if (onnx_node_attr.ints_size() == 2) { - attr->strideH = static_cast<int32_t>(onnx_node_attr.ints(0)); - attr->strideW = static_cast<int32_t>(onnx_node_attr.ints(1)); + strides.push_back(onnx_node_attr.ints(0)); + strides.push_back(onnx_node_attr.ints(1)); } } if (attribute_name == "auto_pad") { if (onnx_node_attr.s() == "SAME_UPPER") { - attr->padMode = schema::PadMode_SAME_UPPER; + prim->set_pad_mode(mindspore::PadMode::SAME); } else if (onnx_node_attr.s() == "SAME_LOWER") { - attr->padMode = schema::PadMode_SAME_LOWER; + MS_LOG(ERROR) << "PadMode_SAME_LOWER is not supported now"; + return nullptr; } } if (attribute_name == "pads") { if (onnx_node_attr.ints_size() == 4) { - attr->padMode = schema::PadMode_CAFFE; - attr->padUp = static_cast<int32_t>(onnx_node_attr.ints(0)); - attr->padDown = static_cast<int32_t>(onnx_node_attr.ints(2)); - attr->padLeft = static_cast<int32_t>(onnx_node_attr.ints(1)); - attr->padRight = static_cast<int32_t>(onnx_node_attr.ints(3)); + prim->set_pad_mode(mindspore::PadMode::PAD); + pads.push_back(onnx_node_attr.ints(0)); + pads.push_back(onnx_node_attr.ints(2)); + pads.push_back(onnx_node_attr.ints(1)); + pads.push_back(onnx_node_attr.ints(3)); } } if (attribute_name == "ceil_mode") { if (onnx_node_attr.i() == 0) { - attr->roundMode = schema::RoundMode_FLOOR; + roundMode = mindspore::RoundMode::FLOOR; } else { - attr->roundMode = schema::RoundMode_CEIL; + roundMode = mindspore::RoundMode::CEIL; } } if (attribute_name == "dilations") { @@ -95,21 +146,29 @@ lite::PrimitiveC *OnnxPoolParser::ParseLitePrimitive(const onnx::GraphProto &onn return nullptr; } } + prim->set_round_mode(roundMode); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; + if (pads.empty()) { + pads = {0, 0, 0, 0}; } - primitive->value.type = schema::PrimitiveType_Pooling; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->set_pad(pads); + + if (strides.empty()) { + strides.push_back(1); + strides.push_back(1); + } + prim->set_strides(strides); + + prim->set_global(onnx_node.op_type() == "GlobalMaxPool"); + + return prim.release(); } -OnnxNodeRegistrar g_onnxMaxPoolParser("MaxPool", new OnnxPoolParser()); -OnnxNodeRegistrar g_onnxAveragePoolParser("AveragePool", new OnnxPoolParser()); -OnnxNodeRegistrar g_onnxGlobalAveragePoolParser("GlobalAveragePool", new OnnxPoolParser()); -OnnxNodeRegistrar g_onnxGlobalMaxPoolParser("GlobalMaxPool", new OnnxPoolParser()); -OnnxNodeRegistrar g_onnxInt8AveragePoolParser("Int8AveragePool", new OnnxPoolParser()); +OnnxNodeRegistrar g_onnxAveragePoolParser("AveragePool", new OnnxAvgPoolParser()); +OnnxNodeRegistrar g_onnxGlobalAveragePoolParser("GlobalAveragePool", new OnnxAvgPoolParser()); +OnnxNodeRegistrar g_onnxInt8AveragePoolParser("Int8AveragePool", new OnnxAvgPoolParser()); + +OnnxNodeRegistrar g_onnxMaxPoolParser("MaxPool", new OnnxMaxPoolParser()); +OnnxNodeRegistrar g_onnxGlobalMaxPoolParser("GlobalMaxPool", new OnnxMaxPoolParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.h index 4d864358b7..0fc82ba857 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_pool_parser.h @@ -22,12 +22,20 @@ namespace mindspore { namespace lite { -class OnnxPoolParser : public OnnxNodeParser { +class OnnxAvgPoolParser : public OnnxNodeParser { public: - OnnxPoolParser() : OnnxNodeParser("Pool") {} - ~OnnxPoolParser() override = default; + OnnxAvgPoolParser() : OnnxNodeParser("AvgPool") {} + ~OnnxAvgPoolParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; +}; + +class OnnxMaxPoolParser : public OnnxNodeParser { + public: + OnnxMaxPoolParser() : OnnxNodeParser("MaxPool") {} + ~OnnxMaxPoolParser() override = default; + + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_quantize_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_quantize_parser.cc index 3c73e29f75..5c00c3512d 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_quantize_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_quantize_parser.cc @@ -16,35 +16,25 @@ #include "tools/converter/parser/onnx/onnx_quantize_parser.h" #include <memory> +#include "ops/quant_dtype_cast.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxQuantizeParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx QuantizeDequantizeParser"; - auto attr = std::make_unique<schema::QuantDTypeCastT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed."; - return nullptr; - } +ops::PrimitiveC *OnnxQuantizeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::QuantDTypeCast>(); + if (onnx_node.op_type() == "Int8Quantize") { - attr->srcT = kNumberTypeFloat32; - attr->dstT = kNumberTypeUInt8; + prim->set_src_t(kNumberTypeFloat32); + prim->set_dst_t(kNumberTypeUInt8); } else if (onnx_node.op_type() == "Int8Dequantize") { - attr->srcT = kNumberTypeUInt8; - attr->dstT = kNumberTypeFloat32; + prim->set_src_t(kNumberTypeUInt8); + prim->set_dst_t(kNumberTypeFloat32); } else { MS_LOG(ERROR) << "Unsupported nodeType: " << onnx_node.op_type().c_str(); return nullptr; } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_QuantDTypeCast; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } OnnxNodeRegistrar g_onnxInt8QuantizeParser("Int8Quantize", new OnnxQuantizeParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_quantize_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_quantize_parser.h index fdaf0b158b..0b6cbc2898 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_quantize_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_quantize_parser.h @@ -27,7 +27,7 @@ class OnnxQuantizeParser : public OnnxNodeParser { OnnxQuantizeParser() : OnnxNodeParser("Quantize") {} ~OnnxQuantizeParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_range_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_range_parser.cc index 555e47e64c..9d30a348ce 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_range_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_range_parser.cc @@ -16,26 +16,16 @@ #include "tools/converter/parser/onnx/onnx_range_parser.h" #include <memory> +#include "ops/range.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxRangeParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx RangeParser"; - auto attr = std::make_unique<schema::RangeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - attr->dType = 0; - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Range; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxRangeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Range>(); + + prim->set_d_type(0); + + return prim.release(); } OnnxNodeRegistrar g_onnxRangeParser("Range", new OnnxRangeParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_range_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_range_parser.h index cdc02d32c8..22f8ffecaa 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_range_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_range_parser.h @@ -27,7 +27,7 @@ class OnnxRangeParser : public OnnxNodeParser { OnnxRangeParser() : OnnxNodeParser("Range") {} ~OnnxRangeParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.cc index f0180d6bfc..5b7f8f1fb4 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.cc @@ -16,56 +16,49 @@ #include "tools/converter/parser/onnx/onnx_reduce_parser.h" #include <memory> +#include <vector> +#include "ops/fusion/reduce_fusion.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxReduceParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ReduceParser"; - auto attr = std::make_unique<schema::ReduceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxReduceParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::ReduceFusion>(); + + prim->set_keep_dims(true); - attr->keepDims = 1; + std::vector<int32_t> axes = {}; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "axes") { const int &size = onnx_node_attr.ints_size(); for (int i = 0; i < size; ++i) { - attr->axes.push_back(onnx_node_attr.ints(i)); + axes.push_back(onnx_node_attr.ints(i)); } } else if (attribute_name == "keepdims") { - attr->keepDims = static_cast<bool>(onnx_node_attr.i()); + prim->set_keep_dims(static_cast<bool>(onnx_node_attr.i())); } } + prim->AddAttr("axes", MakeValue(axes)); + const auto &type = onnx_node.op_type(); if (type == "ReduceMean") { - attr->mode = schema::ReduceMode_ReduceMean; + prim->set_mode(mindspore::ReduceMode::Reduce_Mean); } else if (type == "ReduceMax") { - attr->mode = schema::ReduceMode_ReduceMax; + prim->set_mode(mindspore::ReduceMode::Reduce_Max); } else if (type == "ReduceMin") { - attr->mode = schema::ReduceMode_ReduceMin; + prim->set_mode(mindspore::ReduceMode::Reduce_Min); } else if (type == "ReduceSum") { - attr->mode = schema::ReduceMode_ReduceSum; + prim->set_mode(mindspore::ReduceMode::Reduce_Sum); } else if (type == "ReduceProd") { - attr->mode = schema::ReduceMode_ReduceProd; + prim->set_mode(mindspore::ReduceMode::Reduce_Prod); } else if (type == "ReduceSumSquare") { - attr->mode = schema::ReduceMode_ReduceSumSquare; + prim->set_mode(mindspore::ReduceMode::Reduce_Sum_Square); } else { - MS_LOG(ERROR) << "unsupported type"; + MS_LOG(ERROR) << "unsupported reduce type: " << type; return nullptr; } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Reduce; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } OnnxNodeRegistrar g_onnxReduceMeanParser("ReduceMean", new OnnxReduceParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.h index 412200b227..95080675d8 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_reduce_parser.h @@ -27,7 +27,7 @@ class OnnxReduceParser : public OnnxNodeParser { OnnxReduceParser() : OnnxNodeParser("Reduce") {} ~OnnxReduceParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc deleted file mode 100644 index 14aba17789..0000000000 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.cc +++ /dev/null @@ -1,115 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/parser/onnx/onnx_relu_parser.h" -#include <memory> -#include <vector> -#include "securec/include/securec.h" - -namespace mindspore { -namespace lite { -lite::PrimitiveC *OnnxReluParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ReluParser"; - auto attr = std::make_unique<schema::ActivationT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - const auto &relu_type = onnx_node.op_type(); - if (relu_type == "Relu") { - MS_LOG(DEBUG) << "onnx ReluParser"; - attr->type = schema::ActivationType_RELU; - } else if (relu_type == "LeakyRelu") { - MS_LOG(DEBUG) << "onnx LeakyReluParser"; - attr->type = schema::ActivationType_LEAKY_RELU; - } - for (const auto &onnx_node_attr : onnx_node.attribute()) { - const auto &attribute_name = onnx_node_attr.name(); - if (attribute_name == "alpha") { - attr->alpha = onnx_node_attr.f(); - } - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Activation; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); -} - -lite::PrimitiveC *OnnxPReluParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx PReluParser"; - if (onnx_node.input_size() != 2) { - MS_LOG(ERROR) << "input num should be 2"; - return nullptr; - } - auto attr = std::make_unique<schema::PReLUT>(); - std::vector<onnx::TensorProto> params; - const auto &input_name = onnx_node.input(1); - for (const auto &it : onnx_graph.initializer()) { - if (it.name() == input_name) { - params.push_back(it); - break; - } - } - - if (!params.empty()) { - const onnx::TensorProto *slope = &params[0]; - if (slope == nullptr) { - MS_LOG(ERROR) << "input error: params[0] is null"; - return nullptr; - } - if (slope->float_data_size() > 0) { - const int64_t slope_size = slope->float_data_size(); - for (int64_t i = 0; i < slope_size; i++) { - attr->slope.emplace_back(slope->float_data(i)); - } - attr->channelShared = slope_size == 1; - } else { - const auto slope_raw_data = reinterpret_cast<const float *>(slope->raw_data().data()); - const int64_t slope_size = slope->raw_data().size() / sizeof(float); - attr->slope.resize(slope_size); - if (memcpy_s(attr->slope.data(), slope_size * sizeof(float), slope_raw_data, slope_size * sizeof(float)) != EOK) { - MS_LOG(ERROR) << "memcpy_s failed"; - return nullptr; - } - attr->channelShared = slope_size == 1; - } - } else { - MS_LOG(WARNING) << "The slope pf prelu is null, which may cause errors."; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_PReLU; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); -} - -OnnxNodeRegistrar g_onnxReluParser("Relu", new OnnxReluParser()); -OnnxNodeRegistrar g_onnxLeakyReluParser("LeakyRelu", new OnnxReluParser()); -OnnxNodeRegistrar g_onnxPReluParser("PRelu", new OnnxPReluParser()); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.h deleted file mode 100644 index 0672da099b..0000000000 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_relu_parser.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_RELU_PARSER_H -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_RELU_PARSER_H - -#include "tools/converter/parser/onnx/onnx_node_parser.h" -#include "tools/converter/parser/onnx/onnx_node_parser_registry.h" - -namespace mindspore { -namespace lite { -class OnnxReluParser : public OnnxNodeParser { - public: - OnnxReluParser() : OnnxNodeParser("Relu") {} - ~OnnxReluParser() override = default; - - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; -}; - -class OnnxPReluParser : public OnnxNodeParser { - public: - OnnxPReluParser() : OnnxNodeParser("Prelu") {} - ~OnnxPReluParser() override = default; - - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; -}; -} // namespace lite -} // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_RELU_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.cc index a9407cfa50..6e7c5cc8a3 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.cc @@ -17,41 +17,28 @@ #include "tools/converter/parser/onnx/onnx_reshape_parser.h" #include <vector> #include <memory> +#include "ops/reshape.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxReshapeParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ReshapeParser"; - auto attr = std::make_unique<schema::ReshapeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxReshapeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Reshape>(); - attr->format = schema::Format_NCHW; - std::vector<int64_t> shape; + std::vector<int32_t> shape; shape.clear(); if (onnx_node.input_size() != 2) { for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "shape") { for (int i = 0; i < onnx_node_attr.ints_size(); ++i) { - shape.push_back(static_cast<int64_t>(onnx_node_attr.ints(i))); + shape.push_back(static_cast<int>(onnx_node_attr.ints(i))); } + prim->AddAttr("shape", MakeValue(shape)); } } } - attr->shape = shape; - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Reshape; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } OnnxNodeRegistrar g_onnxReshapeParser("Reshape", new OnnxReshapeParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.h index 411329762a..cc5a252574 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_reshape_parser.h @@ -27,7 +27,7 @@ class OnnxReshapeParser : public OnnxNodeParser { OnnxReshapeParser() : OnnxNodeParser("Reshape") {} ~OnnxReshapeParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_resize_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_resize_parser.cc index 2f2d2f0333..7060a9e11b 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_resize_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_resize_parser.cc @@ -15,76 +15,59 @@ */ #include "tools/converter/parser/onnx/onnx_resize_parser.h" -#include <map> -#include <memory> #include <string> #include <vector> +#include <map> +#include <memory> +#include "ops/resize.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxResizeParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ResizeParser"; - auto attr = std::make_unique<schema::ResizeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxResizeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Resize>(); + + prim->set_format(mindspore::Format::NCHW); + prim->set_nearest_mode(mindspore::NearestMode::ROUND_HALF_DOWN); - attr->format = schema::Format_NCHW; - attr->nearestMode = schema::NearestMode_ROUND_HALF_DOWN; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "coordinate_transformation_mode") { - std::map<std::string, schema::CoordinateTransformMode> transform_map = { - {"half_pixel", schema::CoordinateTransformMode_HALF_PIXEL}, - {"pytorch_half_pixel", schema::CoordinateTransformMode_HALF_PIXEL}, - {"align_corners", schema::CoordinateTransformMode_ALIGN_CORNERS}, - {"asymmetric", schema::CoordinateTransformMode_ASYMMETRIC}, - {"tf_crop_and_resize", schema::CoordinateTransformMode_TF_CROP_AND_RESIZE}, - }; + std::map<std::string, mindspore::CoordinateTransformMode> transform_map = { + {"half_pixel", mindspore::CoordinateTransformMode::HALF_PIXEL}, + {"pytorch_half_pixel", mindspore::CoordinateTransformMode::HALF_PIXEL}, + {"align_corners", mindspore::CoordinateTransformMode::ALIGN_CORNERS}, + {"asymmetric", mindspore::CoordinateTransformMode::ASYMMETRIC}}; if (transform_map.find(onnx_node_attr.s()) != transform_map.end()) { - attr->coordinateTransformMode = transform_map[onnx_node_attr.s()]; + prim->set_coordinate_transform_mode(transform_map[onnx_node_attr.s()]); } else { - MS_LOG(ERROR) << "Unsupport coordinate transform mode: " << attribute_name; + MS_LOG(ERROR) << "Unsupported coordinate transform mode: " << attribute_name; return nullptr; } } else if (attribute_name == "cubic_coeff_a") { - attr->cubicCoeff = onnx_node_attr.f(); + prim->set_cubic_coeff(onnx_node_attr.f()); } else if (attribute_name == "exclude_outside") { - attr->excludeOutside = onnx_node_attr.i(); + prim->set_exclude_outside(onnx_node_attr.i()); } else if (attribute_name == "extrapolation_value") { - attr->extrapolationValue = onnx_node_attr.f(); + prim->set_extrapolation_value(onnx_node_attr.f()); } else if (attribute_name == "mode") { - attr->method = [&]() { - std::map<std::string, schema::ResizeMethod> resize_mode = { - {"nearest", schema::ResizeMethod_NEAREST}, - {"linear", schema::ResizeMethod_LINEAR}, - {"cubic", schema::ResizeMethod_CUBIC}, - }; - return resize_mode[onnx_node_attr.s()]; - }(); + std::map<std::string, mindspore::ResizeMethod> resize_mode = { + {"nearest", mindspore::ResizeMethod::NEAREST}, + {"linear", mindspore::ResizeMethod::LINEAR}, + {"cubic", mindspore::ResizeMethod::CUBIC}, + }; + prim->set_method(resize_mode[onnx_node_attr.s()]); } else if (attribute_name == "nearest_mode") { - attr->nearestMode = [&]() { - std::map<std::string, schema::NearestMode> nearest_mode = { - {"round_prefer_floor", schema::NearestMode_ROUND_HALF_DOWN}, - {"round_prefer_ceil", schema::NearestMode_ROUND_HALF_UP}, - {"floor", schema::NearestMode_FLOOR}, - {"ceil", schema::NearestMode_CEIL}, - }; - return nearest_mode[onnx_node_attr.s()]; - }(); + std::map<std::string, mindspore::NearestMode> nearest_mode = { + {"round_prefer_floor", mindspore::NearestMode::ROUND_HALF_DOWN}, + {"round_prefer_ceil", mindspore::NearestMode::ROUND_HALF_UP}, + {"floor", mindspore::NearestMode::FLOOR}, + {"ceil", mindspore::NearestMode::CEIL}, + }; + prim->set_nearest_mode(nearest_mode[onnx_node_attr.s()]); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Resize; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } OnnxNodeRegistrar g_onnxResizeParser("Resize", new OnnxResizeParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_resize_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_resize_parser.h index 7bb19e84a8..fc19617032 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_resize_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_resize_parser.h @@ -27,7 +27,7 @@ class OnnxResizeParser : public OnnxNodeParser { OnnxResizeParser() : OnnxNodeParser("Resize") {} ~OnnxResizeParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.cc index 052c72f703..61392618c0 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.cc @@ -16,26 +16,13 @@ #include "tools/converter/parser/onnx/onnx_shape_parser.h" #include <memory> +#include "ops/shape.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxShapeParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx ShapeParser"; - auto attr = std::make_unique<schema::ShapeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Shape; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxShapeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Shape>(); + return prim.release(); } OnnxNodeRegistrar g_onnxShapeParser("Shape", new OnnxShapeParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.h index 3da6eed628..b78d4673b1 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_shape_parser.h @@ -27,7 +27,7 @@ class OnnxShapeParser : public OnnxNodeParser { OnnxShapeParser() : OnnxNodeParser("Shape") {} ~OnnxShapeParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.cc deleted file mode 100644 index 956d8936fb..0000000000 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.cc +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/parser/onnx/onnx_sigmoid_parser.h" -#include <memory> - -namespace mindspore { -namespace lite { -lite::PrimitiveC *OnnxSigmoidParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx SigmoidParser"; - auto attr = std::make_unique<schema::ActivationT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - attr->type = schema::ActivationType_SIGMOID; - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Activation; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); -} - -OnnxNodeRegistrar g_onnxSigmoodParser("Sigmoid", new OnnxSigmoidParser()); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.h deleted file mode 100644 index c131af9fb7..0000000000 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_sigmoid_parser.h +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_SIGMOID_PARSER_H -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_SIGMOID_PARSER_H - -#include "tools/converter/parser/onnx/onnx_node_parser.h" -#include "tools/converter/parser/onnx/onnx_node_parser_registry.h" - -namespace mindspore { -namespace lite { -class OnnxSigmoidParser : public OnnxNodeParser { - public: - OnnxSigmoidParser() : OnnxNodeParser("Sigmoid") {} - ~OnnxSigmoidParser() override = default; - - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; -}; -} // namespace lite -} // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_ONNX_SIGMOID_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.cc index c28eba6ded..99024b3546 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.cc @@ -15,28 +15,23 @@ */ #include "tools/converter/parser/onnx/onnx_slice_parser.h" -#include <algorithm> #include <functional> #include <memory> #include <numeric> +#include <algorithm> #include <vector> #include <string> +#include "ops/strided_slice.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxSliceParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx SliceParser"; - auto attr = std::make_unique<schema::StridedSliceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxSliceParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::StridedSlice>(); - std::vector<int> starts; - std::vector<int> ends; - std::vector<int> axes; - std::vector<int> steps; + std::vector<int32_t> starts; + std::vector<int32_t> ends; + std::vector<int32_t> axes; + std::vector<int32_t> steps; constexpr int64_t int_32_max = INT32_MAX; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); @@ -66,16 +61,18 @@ lite::PrimitiveC *OnnxSliceParser::ParseLitePrimitive(const onnx::GraphProto &on } } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; + int size = -1; + if (!starts.empty()) { + size = static_cast<int>(starts.size()); + } else if (!ends.empty()) { + size = static_cast<int>(ends.size()); + } else if (!axes.empty()) { + size = static_cast<int>(axes.size()); + } else if (!steps.empty()) { + size = static_cast<int>(steps.size()); } - primitive->value.type = schema::PrimitiveType_StridedSlice; - primitive->value.value = attr.release(); - auto primitive_c = PrimitiveC::Create(primitive.release()); - if (starts.empty()) { - return primitive_c; + if (size == -1) { + return prim.release(); } if (axes.empty()) { for (size_t i = 0; i < starts.size(); ++i) { @@ -85,11 +82,13 @@ lite::PrimitiveC *OnnxSliceParser::ParseLitePrimitive(const onnx::GraphProto &on if (steps.empty()) { steps.assign(starts.size(), 1); } - primitive_c->set_attr("starts", MakeValue<std::vector<int>>(starts)); - primitive_c->set_attr("ends", MakeValue<std::vector<int>>(ends)); - primitive_c->set_attr("axes", MakeValue<std::vector<int>>(axes)); - primitive_c->set_attr("steps", MakeValue<std::vector<int>>(steps)); - return primitive_c; + + prim->AddAttr("starts", MakeValue(starts)); + prim->AddAttr("axes", MakeValue(axes)); + prim->AddAttr("ends", MakeValue(ends)); + prim->AddAttr("steps", MakeValue(steps)); + + return prim.release(); } OnnxNodeRegistrar g_onnxSliceParser("Slice", new OnnxSliceParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.h index 210fd4f3a0..9608931e9f 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_slice_parser.h @@ -29,7 +29,7 @@ class OnnxSliceParser : public OnnxNodeParser { OnnxSliceParser() : OnnxNodeParser("Slice") {} ~OnnxSliceParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.cc index 5facba0cc7..a6be2910df 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.cc @@ -16,41 +16,28 @@ #include "tools/converter/parser/onnx/onnx_softmax_parser.h" #include <memory> +#include "ops/softmax.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxSoftMaxParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx SoftMaxParser"; - auto attr = std::make_unique<schema::SoftMaxT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxSoftMaxParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Softmax>(); + int64_t axis; bool axis_is_def = true; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "axis") { - attr->axis = static_cast<int32_t>(onnx_node_attr.i()); + axis = onnx_node_attr.i(); axis_is_def = false; } } if (axis_is_def) { - if (OnnxNodeParser::opset_version() >= 13) { - attr->axis = -1; - } else { - attr->axis = 1; - } + axis = OnnxNodeParser::opset_version() >= 13 ? -1 : 1; } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_SoftMax; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->set_axis({axis}); + + return prim.release(); } OnnxNodeRegistrar g_onnxSoftMaxParser("Softmax", new OnnxSoftMaxParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.h index d60346f65f..ccbf24a341 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_softmax_parser.h @@ -27,7 +27,7 @@ class OnnxSoftMaxParser : public OnnxNodeParser { OnnxSoftMaxParser() : OnnxNodeParser("Softmax") {} ~OnnxSoftMaxParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.cc index d404fe7285..d28aebee93 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.cc @@ -16,33 +16,21 @@ #include "tools/converter/parser/onnx/onnx_space_to_depth_parser.h" #include <memory> +#include "ops/space_to_depth.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxSpaceToDepthParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx SpaceToDepthParser"; - auto attr = std::make_unique<schema::SpaceToDepthT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxSpaceToDepthParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::SpaceToDepth>(); for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "blocksize") { - attr->blockSize = static_cast<int32_t>(onnx_node_attr.i()); + prim->set_block_size(onnx_node_attr.i()); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_SpaceToDepth; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } OnnxNodeRegistrar g_onnxSpaceToDepthParser("SpaceToDepth", new OnnxSpaceToDepthParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.h index daff7831a7..00798fcfee 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_space_to_depth_parser.h @@ -27,7 +27,7 @@ class OnnxSpaceToDepthParser : public OnnxNodeParser { OnnxSpaceToDepthParser() : OnnxNodeParser("SpaceToDepth") {} ~OnnxSpaceToDepthParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_split_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_split_parser.cc index 84a515eaa6..0e4d5cdaae 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_split_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_split_parser.cc @@ -16,39 +16,35 @@ #include "tools/converter/parser/onnx/onnx_split_parser.h" #include <memory> +#include <vector> +#include <algorithm> +#include "ops/split.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxSplitParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx SplitParser"; - auto attr = std::make_unique<schema::SplitT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxSplitParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Split>(); - attr->splitDim = 0; + prim->set_axis(0); + std::vector<int64_t> size_splits; + int64_t split_num = 0; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "axis") { - attr->splitDim = static_cast<int32_t>(onnx_node_attr.i()); + prim->set_axis(onnx_node_attr.i()); } else if (attribute_name == "split") { - for (auto sizeSplit : onnx_node_attr.ints()) { - attr->sizeSplits.emplace_back(sizeSplit); - } - attr->numberSplit = onnx_node_attr.ints_size(); + size_splits.resize(onnx_node_attr.ints_size()); + std::copy(onnx_node_attr.ints().begin(), onnx_node_attr.ints().end(), size_splits.begin()); + prim->set_size_splits(size_splits); + split_num = onnx_node_attr.ints_size(); } } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; + if (split_num == 0) { + split_num = onnx_node.output_size(); } - primitive->value.type = schema::PrimitiveType_Split; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->set_output_num(split_num); + + return prim.release(); } OnnxNodeRegistrar g_onnxSplitParser("Split", new OnnxSplitParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_split_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_split_parser.h index bd6fe288ec..d2d28e529c 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_split_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_split_parser.h @@ -27,7 +27,7 @@ class OnnxSplitParser : public OnnxNodeParser { OnnxSplitParser() : OnnxNodeParser("Split") {} ~OnnxSplitParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.cc index 2c8a14b56f..a62732864e 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.cc @@ -16,35 +16,26 @@ #include "tools/converter/parser/onnx/onnx_squeeze_parser.h" #include <memory> +#include <vector> +#include "ops/squeeze.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxSqueezeParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx SqueezeParser"; - auto attr = std::make_unique<schema::SqueezeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxSqueezeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Squeeze>(); + std::vector<int64_t> axis; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "axes") { for (int i = 0; i < onnx_node_attr.ints().size(); ++i) { - attr->axis.emplace_back(onnx_node_attr.ints(i)); + axis.emplace_back(onnx_node_attr.ints(i)); } + prim->set_axis(axis); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Squeeze; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } OnnxNodeRegistrar g_onnxSqueezeParser("Squeeze", new OnnxSqueezeParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.h index fef408d8bb..a53c35e0b6 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_squeeze_parser.h @@ -27,7 +27,7 @@ class OnnxSqueezeParser : public OnnxNodeParser { OnnxSqueezeParser() : OnnxNodeParser("Squeeze") {} ~OnnxSqueezeParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.cc index f875e312df..1896322a22 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.cc @@ -15,27 +15,15 @@ */ #include "tools/converter/parser/onnx/onnx_tile_parser.h" -#include <memory> #include <vector> +#include <memory> +#include "ops/fusion/tile_fusion.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxTileParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx TileParser"; - auto attr = std::make_unique<schema::TileT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Tile; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *OnnxTileParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::TileFusion>(); + return prim.release(); } OnnxNodeRegistrar g_onnxTileParser("Tile", new OnnxTileParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.h index 1117c34bba..d03d4e290f 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_tile_parser.h @@ -27,7 +27,7 @@ class OnnxTileParser : public OnnxNodeParser { OnnxTileParser() : OnnxNodeParser("Tile") {} ~OnnxTileParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_topk_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_topk_parser.cc index 67f7966f7c..08071157f9 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_topk_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_topk_parser.cc @@ -16,33 +16,21 @@ #include "tools/converter/parser/onnx/onnx_topk_parser.h" #include <memory> +#include "ops/fusion/topk_fusion.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxTopkParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx TopKParser"; - auto attr = std::make_unique<schema::TopKT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxTopkParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::TopKFusion>(); for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "k") { - attr->k = static_cast<int32_t>(onnx_node_attr.i()); + prim->AddAttr("k", MakeValue(static_cast<int32_t>(onnx_node_attr.i()))); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_TopK; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } OnnxNodeRegistrar g_onnxTopkParser("TopK", new OnnxTopkParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_topk_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_topk_parser.h index 4c593871d4..f075fa42f4 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_topk_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_topk_parser.h @@ -27,7 +27,7 @@ class OnnxTopkParser : public OnnxNodeParser { OnnxTopkParser() : OnnxNodeParser("TopK") {} ~OnnxTopkParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.cc index c481abbc19..49fac0a1f6 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.cc @@ -16,36 +16,26 @@ #include "tools/converter/parser/onnx/onnx_transpose_parser.h" #include <memory> +#include <vector> +#include "ops/transpose.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxTransposeParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx TransposeParser"; - auto attr = std::make_unique<schema::TransposeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxTransposeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Transpose>(); + std::vector<int32_t> perm; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "axes" || attribute_name == "perm") { - attr->perm.resize(onnx_node_attr.ints_size()); + perm.resize(onnx_node_attr.ints_size()); for (int i = 0; i < onnx_node_attr.ints_size(); ++i) { - attr->perm[i] = onnx_node_attr.ints(i); + perm[i] = onnx_node_attr.ints(i); } } } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Transpose; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->AddAttr("perm", MakeValue(perm)); + return prim.release(); } OnnxNodeRegistrar g_onnxTransposeParser("Transpose", new OnnxTransposeParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.h index 63f4b7f19e..4f6f76edce 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_transpose_parser.h @@ -27,7 +27,7 @@ class OnnxTransposeParser : public OnnxNodeParser { OnnxTransposeParser() : OnnxNodeParser("Transpose") {} ~OnnxTransposeParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_unsqueeze_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_unsqueeze_parser.cc index d01fc0e954..6fe3a53ac6 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_unsqueeze_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_unsqueeze_parser.cc @@ -16,35 +16,26 @@ #include "tools/converter/parser/onnx/onnx_unsqueeze_parser.h" #include <memory> +#include <vector> +#include "ops/unsqueeze.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxUnSqueezeParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx UnSqueezeParser"; - auto attr = std::make_unique<schema::UnsqueezeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxUnSqueezeParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Unsqueeze>(); + std::vector<int64_t> axis; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "axes") { for (int i = 0; i < onnx_node_attr.ints().size(); ++i) { - attr->axis.emplace_back(onnx_node_attr.ints(i)); + axis.emplace_back(onnx_node_attr.ints(i)); } + prim->set_axis(axis); } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Unsqueeze; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } OnnxNodeRegistrar g_onnxUnsqueezeParser("Unsqueeze", new OnnxUnSqueezeParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_unsqueeze_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_unsqueeze_parser.h index 6e01f72d80..eb8074e2b4 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_unsqueeze_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_unsqueeze_parser.h @@ -27,7 +27,7 @@ class OnnxUnSqueezeParser : public OnnxNodeParser { OnnxUnSqueezeParser() : OnnxNodeParser("Unsqueeze") {} ~OnnxUnSqueezeParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_upsample_parser.cc b/mindspore/lite/tools/converter/parser/onnx/onnx_upsample_parser.cc index 1d645bfd40..e3ef10aaba 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_upsample_parser.cc +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_upsample_parser.cc @@ -15,39 +15,32 @@ */ #include "tools/converter/parser/onnx/onnx_upsample_parser.h" +#include <string> +#include <vector> #include <memory> +#include "ops/resize.h" namespace mindspore { namespace lite { -lite::PrimitiveC *OnnxUpsampleParser::ParseLitePrimitive(const onnx::GraphProto &onnx_graph, - const onnx::NodeProto &onnx_node) { - MS_LOG(DEBUG) << "onnx UpsampleParser"; - auto attr = std::make_unique<schema::ResizeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *OnnxUpsampleParser::Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) { + auto prim = std::make_unique<ops::Resize>(); + + prim->set_method(mindspore::ResizeMethod::NEAREST); // use bilinear method - attr->method = schema::ResizeMethod_NEAREST; for (const auto &onnx_node_attr : onnx_node.attribute()) { const auto &attribute_name = onnx_node_attr.name(); if (attribute_name == "mode") { if (onnx_node_attr.s() != "nearest" && onnx_node_attr.s() != "linear") { - MS_LOG(ERROR) << "the upsample mode don't support now."; + MS_LOG(ERROR) << "the UpSample mode don't support now."; return nullptr; } - attr->method = onnx_node_attr.s() == "nearest" ? schema::ResizeMethod_NEAREST : schema::ResizeMethod_LINEAR; + prim->set_method(onnx_node_attr.s() == "nearest" ? mindspore::ResizeMethod::NEAREST + : mindspore::ResizeMethod::LINEAR); } } - attr->coordinateTransformMode = schema::CoordinateTransformMode_ASYMMETRIC; - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "new primitive failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Resize; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->set_coordinate_transform_mode(mindspore::CoordinateTransformMode::ASYMMETRIC); + + return prim.release(); } OnnxNodeRegistrar g_onnxUpsampleParser("Upsample", new OnnxUpsampleParser()); diff --git a/mindspore/lite/tools/converter/parser/onnx/onnx_upsample_parser.h b/mindspore/lite/tools/converter/parser/onnx/onnx_upsample_parser.h index 7b8158dbb4..56ce858faf 100644 --- a/mindspore/lite/tools/converter/parser/onnx/onnx_upsample_parser.h +++ b/mindspore/lite/tools/converter/parser/onnx/onnx_upsample_parser.h @@ -27,7 +27,7 @@ class OnnxUpsampleParser : public OnnxNodeParser { OnnxUpsampleParser() : OnnxNodeParser("Upsample") {} ~OnnxUpsampleParser() override = default; - lite::PrimitiveC *ParseLitePrimitive(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; + ops::PrimitiveC *Parse(const onnx::GraphProto &onnx_graph, const onnx::NodeProto &onnx_node) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_activation_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_activation_parser.cc index 6a27f2463c..857256908d 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_activation_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_activation_parser.cc @@ -19,74 +19,66 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/fusion/activation.h" +#include "ops/leaky_relu.h" namespace mindspore { namespace lite { -STATUS TFActivationParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF ActivationParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::ActivationT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFActivationParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Activation>(); if (tf_op.op() == "Relu") { - attr->type = schema::ActivationType_RELU; + prim->set_activation_type(mindspore::ActivationType::RELU); } else if (tf_op.op() == "Relu6") { - attr->type = schema::ActivationType_RELU6; + prim->set_activation_type(mindspore::ActivationType::RELU6); } else if (tf_op.op() == "Sigmoid") { - attr->type = schema::ActivationType_SIGMOID; + prim->set_activation_type(mindspore::ActivationType::SIGMOID); } else if (tf_op.op() == "Tanh") { - attr->type = schema::ActivationType_TANH; - } else if (tf_op.op() == "LeakyRelu") { - attr->type = schema::ActivationType_LEAKY_RELU; + prim->set_activation_type(mindspore::ActivationType::TANH); } else if (tf_op.op() == "Selu") { - attr->type = schema::ActivationType_SELU; + prim->set_activation_type(mindspore::ActivationType::SELU); } else { MS_LOG(ERROR) << "unsupported activation type:" << tf_op.op(); - return RET_ERROR; + return nullptr; } - primitive->value.type = schema::PrimitiveType_Activation; - primitive->value.value = attr.release(); - if (tf_op.op() == "LeakyRelu") { - auto attr_leaky_relu = std::make_unique<schema::LeakyReLUT>(); - tensorflow::AttrValue attr_value; - if (!TensorFlowUtils::FindAttrValue(tf_op, "alpha", &attr_value)) { - MS_LOG(ERROR) << "The attribute alpha should be specified."; - return RET_ERROR; - } - attr_leaky_relu->negativeSlope = attr_value.f(); - primitive->value.type = schema::PrimitiveType_LeakyReLU; - primitive->value.value = attr_leaky_relu.release(); + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + + return prim.release(); +} + +ops::PrimitiveC *TFLeakyReluParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::LeakyRelu>(); + + tensorflow::AttrValue attr_value; + if (!TensorFlowUtils::FindAttrValue(tf_op, "alpha", &attr_value)) { + MS_LOG(ERROR) << "The attribute alpha should be specified."; + return nullptr; } + prim->set_negative_slope(attr_value.f()); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - return status; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); } + TFNodeRegistrar g_tfReluParser("Relu", new TFActivationParser()); TFNodeRegistrar g_tfRelu6Parser("Relu6", new TFActivationParser()); TFNodeRegistrar g_tfSigmoidParser("Sigmoid", new TFActivationParser()); TFNodeRegistrar g_tfTanhParser("Tanh", new TFActivationParser()); -TFNodeRegistrar g_tfLeakyReluParser("LeakyRelu", new TFActivationParser()); TFNodeRegistrar g_tfSeLUParser("Selu", new TFActivationParser()); +TFNodeRegistrar g_tfLeakyReluParser("LeakyRelu", new TFLeakyReluParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_activation_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_activation_parser.h index 0c04e4744c..87dce5e6de 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_activation_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_activation_parser.h @@ -29,8 +29,19 @@ class TFActivationParser : public TFNodeParser { TFActivationParser() = default; ~TFActivationParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFLeakyReluParser : public TFNodeParser { + public: + TFLeakyReluParser() = default; + ~TFLeakyReluParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_argmax_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_argmax_parser.cc index 5c9480c2f6..d21d4875c7 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_argmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_argmax_parser.cc @@ -19,49 +19,32 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/fusion/arg_max_fusion.h" namespace mindspore { namespace lite { -STATUS TFArgMaxParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(DEBUG) << "TF ArgMaxParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::ArgMaxT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFArgMaxParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::ArgMaxFusion>(); + tensorflow::AttrValue attr_value; auto axis_node = tf_node_map.at(tf_op.input(tf_op.input_size() - 1)); if (!TensorFlowUtils::FindAttrValue(*axis_node, "value", &attr_value)) { MS_LOG(ERROR) << "The attr value should be specified."; - return RET_ERROR; + return nullptr; } auto &axis_tensor = attr_value.tensor(); - attr->axis = axis_tensor.int_val(0); - attr->outMaxValue = false; - primitive->value.type = schema::PrimitiveType_ArgMax; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } + prim->set_axis(axis_tensor.int_val(0)); + prim->set_out_max_value(false); + *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - return RET_OK; + + return prim.release(); } TFNodeRegistrar g_tfArgMaxParser("ArgMax", new TFArgMaxParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_argmax_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_argmax_parser.h index a85e05f475..197afb60ba 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_argmax_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_argmax_parser.h @@ -28,8 +28,9 @@ class TFArgMaxParser : public TFNodeParser { TFArgMaxParser() = default; ~TFArgMaxParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_argmin_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_argmin_parser.cc index 31dea2fbc2..85aa604d07 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_argmin_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_argmin_parser.cc @@ -19,49 +19,32 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/fusion/arg_min_fusion.h" namespace mindspore { namespace lite { -STATUS TFArgMinParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(DEBUG) << "TF ArgMinParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::ArgMinT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFArgMinParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::ArgMinFusion>(); + tensorflow::AttrValue attr_value; auto axis_node = tf_node_map.at(tf_op.input(tf_op.input_size() - 1)); if (!TensorFlowUtils::FindAttrValue(*axis_node, "value", &attr_value)) { MS_LOG(ERROR) << "The attr value should be specified."; - return RET_ERROR; + return nullptr; } auto &axis_tensor = attr_value.tensor(); - attr->axis = axis_tensor.int_val(0); - attr->outMaxValue = false; - primitive->value.type = schema::PrimitiveType_ArgMin; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } + prim->set_axis(axis_tensor.int_val(0)); + prim->set_out_max_value(false); + *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - return RET_OK; + + return prim.release(); } TFNodeRegistrar g_tfArgMinParser("ArgMin", new TFArgMinParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_argmin_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_argmin_parser.h index 64f09ec74c..1827eee910 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_argmin_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_argmin_parser.h @@ -28,8 +28,9 @@ class TFArgMinParser : public TFNodeParser { TFArgMinParser() = default; ~TFArgMinParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_arithmetic_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_arithmetic_parser.cc index 3ba08825e3..588bbd95e1 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_arithmetic_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_arithmetic_parser.cc @@ -19,167 +19,427 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/fusion/add_fusion.h" +#include "ops/fusion/div_fusion.h" +#include "ops/greater.h" +#include "ops/greater_equal.h" +#include "ops/less.h" +#include "ops/less_equal.h" +#include "ops/equal.h" +#include "ops/maximum.h" +#include "ops/minimum.h" +#include "ops/fusion/mul_fusion.h" +#include "ops/not_equal.h" +#include "ops/fusion/sub_fusion.h" +#include "ops/squared_difference.h" +#include "ops/rsqrt.h" +#include "ops/round.h" +#include "ops/ceil.h" +#include "ops/fusion/exp_fusion.h" +#include "ops/floor.h" +#include "ops/floor_mod.h" +#include "ops/log.h" +#include "ops/sqrt.h" +#include "ops/cos.h" +#include "ops/sin.h" +#include "ops/square.h" +#include "ops/fusion/pow_fusion.h" +#include "ops/abs.h" namespace mindspore { namespace lite { -STATUS TFArithmeticParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF ArithmeticParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - - if (tf_op.op() == "Add" || tf_op.op() == "AddV2") { - auto attr = std::make_unique<schema::AddT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_Add; - primitive->value.value = attr.release(); - } else if (tf_op.op() == "Sub") { - auto attr = std::make_unique<schema::SubT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_Sub; - primitive->value.value = attr.release(); - } else if (tf_op.op() == "Mul") { - auto attr = std::make_unique<schema::MulT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_Mul; - primitive->value.value = attr.release(); - } else if (tf_op.op() == "Div" || tf_op.op() == "RealDiv") { - auto attr = std::make_unique<schema::DivT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_Div; - primitive->value.value = attr.release(); - } else if (tf_op.op() == "Maximum") { - auto attr = std::make_unique<schema::MaximumT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_Maximum; - primitive->value.value = attr.release(); - } else if (tf_op.op() == "Minimum") { - auto attr = std::make_unique<schema::MinimumT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_Minimum; - primitive->value.value = attr.release(); - } else if (tf_op.op() == "Greater") { - auto attr = std::make_unique<schema::GreaterT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_Greater; - primitive->value.value = attr.release(); - } else if (tf_op.op() == "GreaterEqual") { - auto attr = std::make_unique<schema::GreaterEqualT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_GreaterEqual; - primitive->value.value = attr.release(); - } else if (tf_op.op() == "Less") { - auto attr = std::make_unique<schema::LessT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_Less; - primitive->value.value = attr.release(); - } else if (tf_op.op() == "LessEqual") { - auto attr = std::make_unique<schema::LessEqualT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_LessEqual; - primitive->value.value = attr.release(); - } else if (tf_op.op() == "Equal") { - auto attr = std::make_unique<schema::EqualT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_Equal; - primitive->value.value = attr.release(); - } else if (tf_op.op() == "NotEqual") { - auto attr = std::make_unique<schema::NotEqualT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_NotEqual; - primitive->value.value = attr.release(); - } else if (tf_op.op() == "FloorMod") { - auto attr = std::make_unique<schema::FloorModT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_FloorMod; - primitive->value.value = attr.release(); - } else if (tf_op.op() == "FloorDiv") { - auto attr = std::make_unique<schema::FloorDivT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_FloorDiv; - primitive->value.value = attr.release(); - } - - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } - - *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; - } - status = AddOpInput(tf_op, 1, inputs); - return status; -} -TFNodeRegistrar g_tfAddParser("Add", new TFArithmeticParser()); -TFNodeRegistrar g_tfAddV2Parser("AddV2", new TFArithmeticParser()); -TFNodeRegistrar g_tfSubParser("Sub", new TFArithmeticParser()); -TFNodeRegistrar g_tfMulParser("Mul", new TFArithmeticParser()); -TFNodeRegistrar g_tfDivParser("Div", new TFArithmeticParser()); -TFNodeRegistrar g_tfFloorModParser("FloorMod", new TFArithmeticParser()); -TFNodeRegistrar g_tfFloorDivParser("FloorDiv", new TFArithmeticParser()); -TFNodeRegistrar g_tfRealDivParser("RealDiv", new TFArithmeticParser()); -TFNodeRegistrar g_tfMaximumParser("Maximum", new TFArithmeticParser()); -TFNodeRegistrar g_tfMinimumParser("Minimum", new TFArithmeticParser()); -TFNodeRegistrar g_tfGreaterParser("Greater", new TFArithmeticParser()); -TFNodeRegistrar g_tfGreaterEqualParser("GreaterEqual", new TFArithmeticParser()); -TFNodeRegistrar g_tfLessParser("Less", new TFArithmeticParser()); -TFNodeRegistrar g_tfLessEqualParser("LessEqual", new TFArithmeticParser()); -TFNodeRegistrar g_tfEqualParser("Equal", new TFArithmeticParser()); -TFNodeRegistrar g_tfNotEqualParser("NotEqual", new TFArithmeticParser()); +ops::PrimitiveC *TFAddParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::AddFusion>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFSubParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::SubFusion>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFMulParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::MulFusion>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFDivParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::DivFusion>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFMaximumParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Maximum>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFMinimumParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Minimum>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFGreaterParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Greater>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFGreaterEqualParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::GreaterEqual>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFLessParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Less>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFLessEqualParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::LessEqual>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFEqualParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Equal>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFNotEqualParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::NotEqual>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFSquaredDifferenceParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::SquaredDifference>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input failed."; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFRsqrtParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Rsqrt>(); + + *output_size = 1; + for (int i = 0; i < tf_op.input_size(); i++) { + inputs->emplace_back(tf_op.input(i)); + } + + return prim.release(); +} + +ops::PrimitiveC *TFRoundParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Round>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFCeilParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Ceil>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input failed."; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFExpParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::ExpFusion>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input failed."; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFFloorParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Floor>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input failed."; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFFloorModParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::FloorMod>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFLogParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Log>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input failed."; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFSqrtParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Sqrt>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input failed."; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFCosParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Cos>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input failed."; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFSinParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Sin>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input failed."; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFSquareParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Square>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input failed."; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFPowParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::PowFusion>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input failed."; + return nullptr; + } + + return prim.release(); +} + +ops::PrimitiveC *TFAbsParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Abs>(); + + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input failed."; + return nullptr; + } + + return prim.release(); +} + +TFNodeRegistrar g_tfAddParser("Add", new TFAddParser()); +TFNodeRegistrar g_tfAddV2Parser("AddV2", new TFAddParser()); +TFNodeRegistrar g_tfSubParser("Sub", new TFSubParser()); +TFNodeRegistrar g_tfMulParser("Mul", new TFMulParser()); +TFNodeRegistrar g_tfDivParser("Div", new TFDivParser()); +TFNodeRegistrar g_tfRealDivParser("RealDiv", new TFDivParser()); +TFNodeRegistrar g_tfMaximumParser("Maximum", new TFMaximumParser()); +TFNodeRegistrar g_tfMinimumParser("Minimum", new TFMinimumParser()); +TFNodeRegistrar g_tfGreaterParser("Greater", new TFGreaterParser()); +TFNodeRegistrar g_tfGreaterEqualParser("GreaterEqual", new TFGreaterEqualParser()); +TFNodeRegistrar g_tfLessParser("Less", new TFLessParser()); +TFNodeRegistrar g_tfLessEqualParser("LessEqual", new TFLessEqualParser()); +TFNodeRegistrar g_tfEqualParser("Equal", new TFEqualParser()); +TFNodeRegistrar g_tfNotEqualParser("NotEqual", new TFNotEqualParser()); +TFNodeRegistrar g_tfSquaredDifferenceParser("SquaredDifference", new TFSquaredDifferenceParser()); +TFNodeRegistrar g_tfRsqrtParser("Rsqrt", new TFRsqrtParser()); + +TFNodeRegistrar g_tfRoundParser("Round", new TFRoundParser()); +TFNodeRegistrar g_tfCosParser("Cos", new TFCosParser()); +TFNodeRegistrar g_tfSinParser("Sin", new TFSinParser()); +TFNodeRegistrar g_tfSquareParser("Square", new TFSquareParser()); +TFNodeRegistrar g_tfCeilParser("Ceil", new TFCeilParser()); +TFNodeRegistrar g_tfExpParser("Exp", new TFExpParser()); +TFNodeRegistrar g_tfFloorParser("Floor", new TFFloorParser()); +TFNodeRegistrar g_tfFloorModParser("FloorMod", new TFFloorModParser()); +TFNodeRegistrar g_tfLogParser("Log", new TFLogParser()); +TFNodeRegistrar g_tfSqrtParser("Sqrt", new TFSqrtParser()); +TFNodeRegistrar g_tfPowParser("Pow", new TFPowParser()); +TFNodeRegistrar g_tfAbsParser("Abs", new TFAbsParser()); + } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_arithmetic_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_arithmetic_parser.h index 6b02b7e63d..305e761ace 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_arithmetic_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_arithmetic_parser.h @@ -15,6 +15,7 @@ */ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_ARITHMETIC_PARSER_H_ #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_ARITHMETIC_PARSER_H_ + #include <string> #include <memory> #include <map> @@ -23,13 +24,264 @@ namespace mindspore { namespace lite { -class TFArithmeticParser : public TFNodeParser { +class TFAddParser : public TFNodeParser { + public: + TFAddParser() = default; + ~TFAddParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFSubParser : public TFNodeParser { + public: + TFSubParser() = default; + ~TFSubParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFMulParser : public TFNodeParser { + public: + TFMulParser() = default; + ~TFMulParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFDivParser : public TFNodeParser { + public: + TFDivParser() = default; + ~TFDivParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFMaximumParser : public TFNodeParser { + public: + TFMaximumParser() = default; + ~TFMaximumParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFMinimumParser : public TFNodeParser { + public: + TFMinimumParser() = default; + ~TFMinimumParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFGreaterParser : public TFNodeParser { + public: + TFGreaterParser() = default; + ~TFGreaterParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFGreaterEqualParser : public TFNodeParser { + public: + TFGreaterEqualParser() = default; + ~TFGreaterEqualParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFLessParser : public TFNodeParser { + public: + TFLessParser() = default; + ~TFLessParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFLessEqualParser : public TFNodeParser { + public: + TFLessEqualParser() = default; + ~TFLessEqualParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFEqualParser : public TFNodeParser { + public: + TFEqualParser() = default; + ~TFEqualParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFNotEqualParser : public TFNodeParser { + public: + TFNotEqualParser() = default; + ~TFNotEqualParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFSquaredDifferenceParser : public TFNodeParser { + public: + TFSquaredDifferenceParser() = default; + ~TFSquaredDifferenceParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFRsqrtParser : public TFNodeParser { + public: + TFRsqrtParser() = default; + ~TFRsqrtParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFRoundParser : public TFNodeParser { + public: + TFRoundParser() = default; + ~TFRoundParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFCeilParser : public TFNodeParser { + public: + TFCeilParser() = default; + ~TFCeilParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFExpParser : public TFNodeParser { + public: + TFExpParser() = default; + ~TFExpParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFFloorParser : public TFNodeParser { + public: + TFFloorParser() = default; + ~TFFloorParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFFloorModParser : public TFNodeParser { + public: + TFFloorModParser() = default; + ~TFFloorModParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFLogParser : public TFNodeParser { + public: + TFLogParser() = default; + ~TFLogParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFSqrtParser : public TFNodeParser { + public: + TFSqrtParser() = default; + ~TFSqrtParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFCosParser : public TFNodeParser { + public: + TFCosParser() = default; + ~TFCosParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFSinParser : public TFNodeParser { + public: + TFSinParser() = default; + ~TFSinParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFSquareParser : public TFNodeParser { + public: + TFSquareParser() = default; + ~TFSquareParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFPowParser : public TFNodeParser { + public: + TFPowParser() = default; + ~TFPowParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFAbsParser : public TFNodeParser { public: - TFArithmeticParser() = default; - ~TFArithmeticParser() override = default; + TFAbsParser() = default; + ~TFAbsParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_arithmetic_self_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_arithmetic_self_parser.cc deleted file mode 100644 index 4be0010701..0000000000 --- a/mindspore/lite/tools/converter/parser/tf/tf_arithmetic_self_parser.cc +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "tools/converter/parser/tf/tf_arithmetic_self_parser.h" -#include <string> -#include <memory> -#include <map> -#include <vector> -#include "tools/converter/parser/tf/tf_node_parser_registry.h" -#include "tools/common/node_util.h" - -namespace mindspore { -namespace lite { - -STATUS TFArithmeticSelfParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF ArithmeticParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - - int status = RET_ERROR; - if (tf_op.op() == "Ceil") { - status = CreateOperator<schema::CeilT>(primitive, schema::PrimitiveType_Ceil); - } else if (tf_op.op() == "Exp") { - status = CreateOperator<schema::ExpT>(primitive, schema::PrimitiveType_Exp); - } else if (tf_op.op() == "Floor") { - status = CreateOperator<schema::FloorT>(primitive, schema::PrimitiveType_Floor); - } else if (tf_op.op() == "Log") { - status = CreateOperator<schema::LogT>(primitive, schema::PrimitiveType_Log); - } else if (tf_op.op() == "Sqrt") { - status = CreateOperator<schema::SqrtT>(primitive, schema::PrimitiveType_Sqrt); - } else if (tf_op.op() == "Cos") { - status = CreateOperator<schema::CosT>(primitive, schema::PrimitiveType_Cos); - } else if (tf_op.op() == "Sin") { - status = CreateOperator<schema::SinT>(primitive, schema::PrimitiveType_Sin); - } else if (tf_op.op() == "Square") { - status = CreateOperator<schema::SquareT>(primitive, schema::PrimitiveType_Square); - } else if (tf_op.op() == "Pow") { - status = CreateOperator<schema::PowerT>(primitive, schema::PrimitiveType_Power); - } else if (tf_op.op() == "Abs") { - status = CreateOperator<schema::PowerT>(primitive, schema::PrimitiveType_Abs); - } else { - MS_LOG(ERROR) << "unsupported arithmetic self type:" << tf_op.op(); - return RET_ERROR; - } - if (status != RET_OK) { - return status; - } - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } - - *output_size = 1; - status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; - } - return status; -} -TFNodeRegistrar g_tfCosParser("Cos", new TFArithmeticSelfParser()); -TFNodeRegistrar g_tfSinParser("Sin", new TFArithmeticSelfParser()); -TFNodeRegistrar g_tfSquareParser("Square", new TFArithmeticSelfParser()); -TFNodeRegistrar g_tfCeilParser("Ceil", new TFArithmeticSelfParser()); -TFNodeRegistrar g_tfExpParser("Exp", new TFArithmeticSelfParser()); -TFNodeRegistrar g_tfFloorParser("Floor", new TFArithmeticSelfParser()); -TFNodeRegistrar g_tfLogParser("Log", new TFArithmeticSelfParser()); -TFNodeRegistrar g_tfSqrtParser("Sqrt", new TFArithmeticSelfParser()); -TFNodeRegistrar g_tfPowParser("Pow", new TFArithmeticSelfParser()); -TFNodeRegistrar g_tfAbsParser("Abs", new TFArithmeticSelfParser()); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_arithmetic_self_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_arithmetic_self_parser.h deleted file mode 100644 index 16acb588b7..0000000000 --- a/mindspore/lite/tools/converter/parser/tf/tf_arithmetic_self_parser.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_ARITHMETIC_SELF_PARSER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_ARITHMETIC_SELF_PARSER_H_ -#include <string> -#include <memory> -#include <map> -#include <vector> -#include "tools/converter/parser/tf/tf_node_parser.h" - -namespace mindspore { -namespace lite { -class TFArithmeticSelfParser : public TFNodeParser { - public: - TFArithmeticSelfParser() = default; - ~TFArithmeticSelfParser() override = default; - - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; -}; -} // namespace lite -} // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_ARITHMETIC_SELF_PARSER_H_ diff --git a/mindspore/lite/tools/converter/parser/tf/tf_assert_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_assert_parser.cc index 1cc480453c..99548a847a 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_assert_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_assert_parser.cc @@ -19,53 +19,33 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/assert.h" namespace mindspore { namespace lite { -STATUS TFAssertParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF AssertParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::AssertT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFAssertParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Assert>(); tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "summarize", &attr_value)) { MS_LOG(ERROR) << "The keep_dims attr should be specified"; - return RET_ERROR; - } - attr->summarize = attr_value.i(); - - primitive->value.type = schema::PrimitiveType_Assert; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_summarize(attr_value.i()); *output_size = 0; // Assert not have output for (int i = 0; i < tf_op.input_size(); ++i) { - auto status = AddOpInput(tf_op, i, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input " << i << " failed"; + return nullptr; } } - return RET_OK; + + return prim.release(); } + TFNodeRegistrar g_tfAssertParser("Assert", new TFAssertParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_assert_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_assert_parser.h index 818cf15b5d..b1f3b1cc52 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_assert_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_assert_parser.h @@ -28,8 +28,9 @@ class TFAssertParser : public TFNodeParser { TFAssertParser() = default; ~TFAssertParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_batch_matmul_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_batch_matmul_parser.cc index cf3cdb09bb..f0c71afb97 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_batch_matmul_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_batch_matmul_parser.cc @@ -19,53 +19,32 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/mat_mul.h" namespace mindspore { namespace lite { -STATUS TFBatchMatMulParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(DEBUG) << "TF BatchMatMulParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::MatMulT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFBatchMatMulParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::MatMul>(); + tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "adj_x", &attr_value)) { MS_LOG(ERROR) << "The begin_mask attr should be specified"; - return RET_ERROR; + return nullptr; } - attr->transposeA = attr_value.b(); + prim->set_transpose_a(attr_value.b()); if (!TensorFlowUtils::FindAttrValue(tf_op, "adj_y", &attr_value)) { MS_LOG(ERROR) << "The begin_mask attr should be specified"; - return RET_ERROR; - } - attr->transposeB = attr_value.b(); - primitive->value.type = schema::PrimitiveType_MatMul; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_transpose_b(attr_value.b()); + *output_size = 1; - for (int i = 0; i < tf_op.input_size(); ++i) { - auto status = AddOpInput(tf_op, i, inputs); - if (status != RET_OK) { - return status; - } + for (int i = 0; i < tf_op.input_size(); i++) { + inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + return prim.release(); } TFNodeRegistrar g_tfBatchMatMulParser("BatchMatMul", new TFBatchMatMulParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_batch_matmul_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_batch_matmul_parser.h index af5d7c3439..287e295700 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_batch_matmul_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_batch_matmul_parser.h @@ -28,8 +28,9 @@ class TFBatchMatMulParser : public TFNodeParser { TFBatchMatMulParser() = default; ~TFBatchMatMulParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_batch_to_space_nd_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_batch_to_space_nd_parser.cc index 75f8db75c4..129c905d9d 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_batch_to_space_nd_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_batch_to_space_nd_parser.cc @@ -19,45 +19,24 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/batch_to_space.h" namespace mindspore { namespace lite { -STATUS TFBatchToSpaceNDParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(WARNING) << "TF BatchToSpaceNDParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::BatchToSpaceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_BatchToSpace; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFBatchToSpaceNDParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::BatchToSpace>(); *output_size = 1; for (int i = 0; i < tf_op.input_size(); ++i) { - auto status = AddOpInput(tf_op, i, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input " << i << " failed"; + return nullptr; } } - return RET_OK; + + return prim.release(); } TFNodeRegistrar g_tfBatchToSpaceNDParser("BatchToSpaceND", new TFBatchToSpaceNDParser()); TFNodeRegistrar g_tfBatchToSpaceParser("BatchToSpace", new TFBatchToSpaceNDParser()); diff --git a/mindspore/lite/tools/converter/parser/tf/tf_batch_to_space_nd_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_batch_to_space_nd_parser.h index c28f194088..3c5befaa57 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_batch_to_space_nd_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_batch_to_space_nd_parser.h @@ -28,8 +28,9 @@ class TFBatchToSpaceNDParser : public TFNodeParser { TFBatchToSpaceNDParser() = default; ~TFBatchToSpaceNDParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_batchnorm_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_batchnorm_parser.cc index 5d0cb25ca2..4e02d7a276 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_batchnorm_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_batchnorm_parser.cc @@ -19,46 +19,26 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/fused_batch_norm.h" namespace mindspore { namespace lite { -STATUS TFBatchNormParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF BatchNormParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFBatchNormParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::FusedBatchNorm>(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::FusedBatchNormT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } tensorflow::AttrValue attr_value; TensorFlowUtils::FindAttrValue(tf_op, "epsilon", &attr_value); - attr->epsilon = attr_value.f(); - - primitive->value.type = schema::PrimitiveType_FusedBatchNorm; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } + prim->set_epsilon(attr_value.f()); *output_size = 1; for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + return prim.release(); } + TFNodeRegistrar g_tfBatchNormParser("FusedBatchNormV3", new TFBatchNormParser()); TFNodeRegistrar g_tfFusedBatchNormParser("FusedBatchNorm", new TFBatchNormParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_batchnorm_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_batchnorm_parser.h index 44b388afb5..15a9b283a3 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_batchnorm_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_batchnorm_parser.h @@ -28,8 +28,9 @@ class TFBatchNormParser : public TFNodeParser { TFBatchNormParser() = default; ~TFBatchNormParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_biasadd_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_biasadd_parser.cc index 52cb99a407..1e613ff621 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_biasadd_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_biasadd_parser.cc @@ -19,47 +19,24 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/bias_add.h" namespace mindspore { namespace lite { -STATUS TFBiasAddParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF BiasAddParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::BiasAddT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - - attr->axis = {1}; - - primitive->value.type = schema::PrimitiveType_BiasAdd; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFBiasAddParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::BiasAdd>(); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - status = AddOpInput(tf_op, 1, inputs); - return status; + + return prim.release(); } + TFNodeRegistrar g_tfBiasAddParser("BiasAdd", new TFBiasAddParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_biasadd_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_biasadd_parser.h index 12fa610a01..fd5e0557fd 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_biasadd_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_biasadd_parser.h @@ -29,8 +29,9 @@ class TFBiasAddParser : public TFNodeParser { TFBiasAddParser() = default; ~TFBiasAddParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_cast_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_cast_parser.cc index 27ac7c988f..92621ada31 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_cast_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_cast_parser.cc @@ -19,47 +19,31 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/cast.h" namespace mindspore { namespace lite { -STATUS TFCastParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF CastParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFCastParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Cast>(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::CastT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } auto dst_type = TensorFlowUtils::ParseAttrDataType(tf_op, "DstT"); if (dst_type == kTypeUnknown) { MS_LOG(ERROR) << "Get attr DstT failed"; - return RET_ERROR; + return nullptr; } - attr->dstT = dst_type; + prim->AddAttr("to", MakeValue(static_cast<int32_t>(dst_type))); - primitive->value.type = schema::PrimitiveType_Cast; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - return status; + return prim.release(); } + TFNodeRegistrar g_tfCastParser("Cast", new TFCastParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_cast_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_cast_parser.h index 5f0bca39a0..267ad49abc 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_cast_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_cast_parser.h @@ -28,8 +28,9 @@ class TFCastParser : public TFNodeParser { TFCastParser() = default; ~TFCastParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_concat_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_concat_parser.cc index 0b87142e93..036c5e3dd4 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_concat_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_concat_parser.cc @@ -19,65 +19,39 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/concat.h" namespace mindspore { namespace lite { -STATUS TFConcatParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF ConcatParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::ConcatT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFConcatParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Concat>(); auto axis_node = GetConstInputNode(tf_node_map, tf_op.input(tf_op.input_size() - 1)); if (axis_node == nullptr) { MS_LOG(ERROR) << "get concat axis attr node failed"; - return RET_ERROR; + return nullptr; } tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(*axis_node, "value", &attr_value)) { MS_LOG(ERROR) << "The value attr should be specified"; - return RET_ERROR; + return nullptr; } auto tensor_proto = attr_value.tensor(); - attr->axis = tensor_proto.int_val(0); - - if (!TensorFlowUtils::FindAttrValue(tf_op, "N", &attr_value)) { - MS_LOG(ERROR) << "The N attr should be specified"; - return RET_ERROR; - } - attr->n = (int32_t)attr_value.i(); - - primitive->value.type = schema::PrimitiveType_Concat; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } + prim->set_axis(tensor_proto.int_val(0)); *output_size = 1; for (int i = 0; i < tf_op.input_size() - 1; ++i) { - auto status = AddOpInput(tf_op, i, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } } - return RET_OK; + + return prim.release(); } + TFNodeRegistrar g_tfConcatV2Parser("ConcatV2", new TFConcatParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_concat_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_concat_parser.h index ea9fccc142..130ac1ed88 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_concat_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_concat_parser.h @@ -28,8 +28,9 @@ class TFConcatParser : public TFNodeParser { TFConcatParser() = default; ~TFConcatParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_conv_base_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_conv_base_parser.cc index a775ec8786..28666d007e 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_conv_base_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_conv_base_parser.cc @@ -22,19 +22,34 @@ #include "schema/inner/model_generated.h" namespace mindspore { namespace lite { -namespace { -const uint32_t STRIDE_DEFAULT_VALUE = 1; -const uint32_t DILATION_DEFAULT_VALUE = 1; -} // namespace -STATUS TFConvBaseParser::ParseStrides(const tensorflow::NodeDef &node_def, const schema::Format &format, +STATUS TFConvBaseParser::ParseKernels(const tensorflow::NodeDef &node_def, const mindspore::Format &format, + std::vector<int64_t> *kernel) { + tensorflow::AttrValue attr_value; + if (!TensorFlowUtils::FindAttrValue(node_def, "value", &attr_value)) { + MS_LOG(ERROR) << "The kernels should be specified"; + return RET_PARAM_INVALID; + } + auto shape = attr_value.tensor().tensor_shape(); + if (shape.dim().size() != 4) { + MS_LOG(ERROR) << "Dims of Kernel should be 4."; + return RET_PARAM_INVALID; + } + kernel->at(0) = shape.dim(0).size(); + kernel->at(1) = shape.dim(1).size(); + kernel->at(2) = shape.dim(2).size(); + kernel->at(3) = shape.dim(3).size(); + return RET_OK; +} + +STATUS TFConvBaseParser::ParseStrides(const tensorflow::NodeDef &node_def, const mindspore::Format &format, std::vector<int64_t> *strides) { tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(node_def, "strides", &attr_value)) { - strides->at(0) = STRIDE_DEFAULT_VALUE; - strides->at(1) = STRIDE_DEFAULT_VALUE; + strides->at(0) = 1; + strides->at(1) = 1; } else { auto stride_list = attr_value.list(); - if (format == schema::Format_NHWC) { + if (format == mindspore::NHWC) { strides->at(0) = stride_list.i(1); strides->at(1) = stride_list.i(2); } else { @@ -45,15 +60,15 @@ STATUS TFConvBaseParser::ParseStrides(const tensorflow::NodeDef &node_def, const return RET_OK; } -STATUS TFConvBaseParser::ParseDilations(const tensorflow::NodeDef &node_def, const schema::Format &format, +STATUS TFConvBaseParser::ParseDilations(const tensorflow::NodeDef &node_def, const mindspore::Format &format, std::vector<int64_t> *dilations) { tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(node_def, "dilations", &attr_value)) { - dilations->at(0) = DILATION_DEFAULT_VALUE; - dilations->at(1) = DILATION_DEFAULT_VALUE; + dilations->at(0) = 1; + dilations->at(1) = 1; } else { auto dilation_list = attr_value.list(); - if (format == schema::Format_NHWC) { + if (format == mindspore::NHWC) { dilations->at(0) = dilation_list.i(1); dilations->at(1) = dilation_list.i(2); } else { @@ -64,39 +79,16 @@ STATUS TFConvBaseParser::ParseDilations(const tensorflow::NodeDef &node_def, con return RET_OK; } -STATUS TFConvBaseParser::ParseKernels(const tensorflow::NodeDef &node_def, const schema::Format &format, - std::vector<int64_t> *kernel) { - tensorflow::AttrValue attr_value; - if (!TensorFlowUtils::FindAttrValue(node_def, "value", &attr_value)) { - MS_LOG(ERROR) << "The kernels should be specified"; - return RET_PARAM_INVALID; - } - auto shape = attr_value.tensor().tensor_shape(); - if (shape.dim().size() != 4) { - MS_LOG(ERROR) << "Dims of Kernel should be 4."; - return RET_PARAM_INVALID; - } - kernel->at(0) = shape.dim(0).size(); - kernel->at(1) = shape.dim(1).size(); - kernel->at(2) = shape.dim(2).size(); - kernel->at(3) = shape.dim(3).size(); - return RET_OK; -} - -STATUS TFConvBaseParser::ParsePadMode(const tensorflow::NodeDef &node_def, schema::PadMode *pad_mode) { +mindspore::PadMode TFConvBaseParser::ParsePadMode(const tensorflow::NodeDef &node_def) { tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(node_def, "padding", &attr_value)) { MS_LOG(ERROR) << "The attr padding should be specified"; - return RET_PARAM_INVALID; + return mindspore::PadMode::VALID; } - if (attr_value.s() == "VALID") { - *pad_mode = schema::PadMode_VALID; - } else if (attr_value.s() == "SAME") { - *pad_mode = schema::PadMode_SAME_UPPER; - } else { - *pad_mode = schema::PadMode_NOTSET; + if (attr_value.s() == "SAME") { + return mindspore::PadMode::SAME; } - return RET_OK; + return mindspore::PadMode::VALID; } } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_conv_base_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_conv_base_parser.h index d03f4167bc..37d195f504 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_conv_base_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_conv_base_parser.h @@ -27,11 +27,14 @@ class TFConvBaseParser : public TFNodeParser { public: TFConvBaseParser() = default; ~TFConvBaseParser() override = default; - STATUS ParseStrides(const tensorflow::NodeDef &node_def, const schema::Format &format, std::vector<int64_t> *strides); - STATUS ParseDilations(const tensorflow::NodeDef &node_def, const schema::Format &format, - std::vector<int64_t> *dilations); - STATUS ParseKernels(const tensorflow::NodeDef &node_def, const schema::Format &format, std::vector<int64_t> *kernel); - STATUS ParsePadMode(const tensorflow::NodeDef &node_def, schema::PadMode *pad_mode); + + static STATUS ParseStrides(const tensorflow::NodeDef &node_def, const mindspore::Format &format, + std::vector<int64_t> *stridstatices); + static STATUS ParseDilations(const tensorflow::NodeDef &node_def, const mindspore::Format &format, + std::vector<int64_t> *dilations); + static STATUS ParseKernels(const tensorflow::NodeDef &node_def, const mindspore::Format &format, + std::vector<int64_t> *kernel); + static mindspore::PadMode ParsePadMode(const tensorflow::NodeDef &node_def); }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_conv_depthwise_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_conv_depthwise_parser.cc deleted file mode 100644 index 01fcf51e09..0000000000 --- a/mindspore/lite/tools/converter/parser/tf/tf_conv_depthwise_parser.cc +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "tools/converter/parser/tf/tf_conv_depthwise_parser.h" -#include <string> -#include <memory> -#include <map> -#include <vector> -#include "tools/converter/parser/tf/tf_node_parser_registry.h" -#include "tools/converter/parser/tf/tf_util.h" - -namespace mindspore { -namespace lite { -STATUS TFConvDepthwiseParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(DEBUG) << "TF ConvDepthwiseParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::DepthwiseConv2DT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - - attr->format = TensorFlowUtils::ParseNodeFormat(tf_op); - if (attr->format == schema::Format_NCHW) { - MS_LOG(ERROR) << "TF Conv2D with data_format=NCHW is not supported now"; - return RET_ERROR; - } - - std::vector<int64_t> dilations(2); - auto status = ParseDilations(tf_op, attr->format, &dilations); - if (status != RET_OK) { - return status; - } - attr->dilateH = dilations[0]; - attr->dilateW = dilations[1]; - - std::vector<int64_t> strides(2); - status = ParseStrides(tf_op, attr->format, &strides); - if (status != RET_OK) { - return status; - } - attr->strideH = strides[0]; - attr->strideW = strides[1]; - - auto weight_node = GetConstInputNode(tf_node_map, tf_op.input(1)); - if (weight_node != nullptr) { - std::vector<int64_t> kernels(4); - status = ParseKernels(*weight_node, attr->format, &kernels); - if (status != RET_OK) { - return status; - } - attr->kernelH = kernels[0]; - attr->kernelW = kernels[1]; - attr->channelIn = kernels[2]; - attr->channelMultiplier = kernels[3]; - } else { - attr->kernelH = -1; - attr->kernelW = -1; - attr->channelIn = -1; - attr->channelMultiplier = -1; - MS_LOG(WARNING) << "parsing of kernelH/W channelIn/Out is delayed"; - } - - status = ParsePadMode(tf_op, &attr->padMode); - if (status != RET_OK) { - return status; - } - - primitive->value.type = schema::PrimitiveType_DepthwiseConv2D; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } - - *output_size = 1; - status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; - } - status = AddOpInput(tf_op, 1, inputs); // weights - return status; -} -TFNodeRegistrar g_tfConvDepthwiseParser("DepthwiseConv2dNative", new TFConvDepthwiseParser()); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_conv_depthwise_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_conv_depthwise_parser.h deleted file mode 100644 index d9fc3a428e..0000000000 --- a/mindspore/lite/tools/converter/parser/tf/tf_conv_depthwise_parser.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_CONV_DEPTHWISE_PARSER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_CONV_DEPTHWISE_PARSER_H_ - -#include <string> -#include <memory> -#include <map> -#include <vector> -#include "tools/converter/parser/tf/tf_conv_base_parser.h" -namespace mindspore { -namespace lite { -class TFConvDepthwiseParser : public TFConvBaseParser { - public: - TFConvDepthwiseParser() = default; - ~TFConvDepthwiseParser() override = default; - - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; -}; -} // namespace lite -} // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_CONV_DEPTHWISE_PARSER_H_ diff --git a/mindspore/lite/tools/converter/parser/tf/tf_conv_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_conv_parser.cc index 0e89662182..621fcee9de 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_conv_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_conv_parser.cc @@ -20,88 +20,70 @@ #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" #include "tools/converter/parser/tf/tf_util.h" +#include "ops/fusion/conv2d_fusion.h" namespace mindspore { namespace lite { -STATUS TFConvParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF ConvParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFConvParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Conv2DFusion>(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::Conv2DT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } + prim->set_pad({0, 0, 0, 0}); + prim->set_group(1); - attr->group = 1; - attr->format = TensorFlowUtils::ParseNodeFormat(tf_op); + auto format = TensorFlowUtils::ParseNodeFormat(tf_op); + prim->set_format(format); std::vector<int64_t> dilations(2); - auto status = ParseDilations(tf_op, attr->format, &dilations); - if (status != RET_OK) { - return status; + if (ParseDilations(tf_op, format, &dilations) != RET_OK) { + MS_LOG(ERROR) << "parse dilations failed"; + return nullptr; } - attr->dilateH = dilations[0]; - attr->dilateW = dilations[1]; + prim->set_dilation(dilations); std::vector<int64_t> strides(2); - status = ParseStrides(tf_op, attr->format, &strides); - if (status != RET_OK) { - return status; + if (ParseStrides(tf_op, format, &strides) != RET_OK) { + MS_LOG(ERROR) << "parse strides failed"; + return nullptr; } - attr->strideH = strides[0]; - attr->strideW = strides[1]; + prim->set_stride(strides); auto weight_node = GetConstInputNode(tf_node_map, tf_op.input(1)); if (weight_node != nullptr) { std::vector<int64_t> kernels(4); - status = ParseKernels(*weight_node, attr->format, &kernels); - if (status != RET_OK) { - return status; + if (ParseKernels(*weight_node, format, &kernels) != RET_OK) { + MS_LOG(ERROR) << "parse kernels failed"; + return nullptr; } - attr->kernelH = kernels[0]; - attr->kernelW = kernels[1]; - attr->channelIn = kernels[2]; - attr->channelOut = kernels[3]; + prim->set_kernel_size({kernels[0], kernels[1]}); + prim->set_out_channel(kernels[3]); + prim->set_in_channel(kernels[2]); } else { - attr->kernelH = -1; - attr->kernelW = -1; - attr->channelIn = -1; - attr->channelOut = -1; + prim->set_kernel_size({0, 0}); + prim->set_out_channel(1); + prim->set_in_channel(1); MS_LOG(WARNING) << "parsing of kernelH/W channelIn/Out is delayed"; } - status = ParsePadMode(tf_op, &attr->padMode); - if (status != RET_OK) { - return status; - } - - primitive->value.type = schema::PrimitiveType_Conv2D; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } + auto pad_mode = ParsePadMode(tf_op); + prim->set_pad_mode(pad_mode); *output_size = 1; - status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + if (tf_op.op() == "DepthwiseConv2dNative") { + prim->AddAttr(ops::kIsDepthWise, MakeValue<bool>(true)); + prim->set_group(prim->get_in_channel()); + prim->set_out_channel(prim->get_in_channel()); } - status = AddOpInput(tf_op, 1, inputs); // weights - return status; + + return prim.release(); } + TFNodeRegistrar g_tfConvParser("Conv2D", new TFConvParser()); +TFNodeRegistrar g_tfConvDepthwiseParser("DepthwiseConv2dNative", new TFConvParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_conv_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_conv_parser.h index ffcc403272..0a1fe286df 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_conv_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_conv_parser.h @@ -28,8 +28,9 @@ class TFConvParser : public TFConvBaseParser { TFConvParser() = default; ~TFConvParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_converter.cc b/mindspore/lite/tools/converter/parser/tf/tf_converter.cc deleted file mode 100644 index 13aff62310..0000000000 --- a/mindspore/lite/tools/converter/parser/tf/tf_converter.cc +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "tools/converter/parser/tf/tf_converter.h" -#include "tools/converter/parser/tf/tf_model_parser.h" -namespace mindspore { -namespace lite { -TFConverter::TFConverter() { modelParser = new TFModelParser(); } -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_converter.h b/mindspore/lite/tools/converter/parser/tf/tf_converter.h index 6e1a685c05..108d65f6ed 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_converter.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_converter.h @@ -18,13 +18,21 @@ #include <string> #include <memory> #include "tools/converter/converter.h" +#include "tools/converter/parser/tf/tf_model_parser.h" + namespace mindspore { namespace lite { class TFConverter : public Converter { public: - TFConverter(); + TFConverter() = default; + + ~TFConverter() override = default; - ~TFConverter() = default; + FuncGraphPtr BuildFuncGraph(const std::string &model_file, const std::string &weight_file, + schema::QuantType quant_type) override { + TFModelParser parser; + return parser.Parse(model_file, weight_file, quant_type); + } }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_crop_and_resize_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_crop_and_resize_parser.cc index a88f5de5b5..1f415af4c3 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_crop_and_resize_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_crop_and_resize_parser.cc @@ -19,81 +19,43 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/crop_and_resize.h" namespace mindspore { namespace lite { -STATUS TFCropAndResizeParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF ResizeParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFCropAndResizeParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::CropAndResize>(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::CropAndResizeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } tensorflow::AttrValue attr_value; - - // extrapolation_value if (!TensorFlowUtils::FindAttrValue(tf_op, "extrapolation_value", &attr_value)) { MS_LOG(ERROR) << "The align_corners attr should be specified"; - return RET_ERROR; + return nullptr; } + prim->set_extrapolation_value(attr_value.f()); - attr->extrapolation_value = attr_value.f(); - - // method if (!TensorFlowUtils::FindAttrValue(tf_op, "method", &attr_value)) { MS_LOG(ERROR) << "The align_corners attr should be specified"; - return RET_ERROR; + return nullptr; } if (attr_value.s() == "bilinear") { - attr->method = schema::ResizeMethod_LINEAR; + prim->set_method(mindspore::ResizeMethod::LINEAR); } else if (attr_value.s() == "nearest_neighbor") { - attr->method = schema::ResizeMethod_NEAREST; + prim->set_method(mindspore::ResizeMethod::NEAREST); } else { MS_LOG(ERROR) << "Do not support method: " << attr_value.s(); } - primitive->value.type = schema::PrimitiveType_CropAndResize; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } - *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; - } - status = AddOpInput(tf_op, 1, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; - } - status = AddOpInput(tf_op, 2, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; + for (int i = 0; i < 4; ++i) { + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input-" << i << " failed."; + return nullptr; + } } - status = AddOpInput(tf_op, 3, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; - } - return status; + + return prim.release(); } TFNodeRegistrar g_tfCropAndResizeParser("CropAndResize", new TFCropAndResizeParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_crop_and_resize_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_crop_and_resize_parser.h index 61645df1c5..7901418720 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_crop_and_resize_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_crop_and_resize_parser.h @@ -28,8 +28,9 @@ class TFCropAndResizeParser : public TFNodeParser { TFCropAndResizeParser() = default; ~TFCropAndResizeParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_deconv_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_deconv_parser.cc index fd3f3ce800..6e83079439 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_deconv_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_deconv_parser.cc @@ -20,87 +20,60 @@ #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" #include "tools/converter/parser/tf/tf_util.h" +#include "ops/fusion/conv2d_transpose_fusion.h" namespace mindspore { namespace lite { -STATUS TFDeconvParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(DEBUG) << "TF DeConvParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::DeConv2DT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFDeconvParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Conv2dTransposeFusion>(); - attr->group = 1; - attr->format = TensorFlowUtils::ParseNodeFormat(tf_op); + prim->set_group(1); + prim->set_pad({0, 0, 0, 0}); + auto format = TensorFlowUtils::ParseNodeFormat(tf_op); + prim->set_format(format); std::vector<int64_t> dilations(2); - auto status = ParseDilations(tf_op, attr->format, &dilations); - if (status != RET_OK) { - return status; + if (ParseDilations(tf_op, format, &dilations) != RET_OK) { + MS_LOG(ERROR) << "parse dilations failed"; + return nullptr; } - attr->dilateH = dilations[0]; - attr->dilateW = dilations[1]; + prim->set_dilation({dilations[0], dilations[1]}); std::vector<int64_t> strides(2); - status = ParseStrides(tf_op, attr->format, &strides); - if (status != RET_OK) { - return status; + if (ParseStrides(tf_op, format, &strides) != RET_OK) { + MS_LOG(ERROR) << "parse strides failed"; + return nullptr; } - attr->strideH = strides[0]; - attr->strideW = strides[1]; + prim->set_stride({strides[0], strides[1]}); auto weight_node = GetConstInputNode(tf_node_map, tf_op.input(1)); if (weight_node != nullptr) { std::vector<int64_t> kernels(4); - status = ParseKernels(*weight_node, attr->format, &kernels); - if (status != RET_OK) { - return status; + if (ParseKernels(*weight_node, format, &kernels) != RET_OK) { + MS_LOG(ERROR) << "parse kernels failed"; + return nullptr; } - attr->kernelH = kernels[0]; - attr->kernelW = kernels[1]; - attr->channelOut = kernels[2]; - attr->channelIn = kernels[3]; + prim->set_kernel_size({kernels[0], kernels[1]}); + prim->set_out_channel(kernels[2]); + prim->set_in_channel(kernels[3]); } else { - attr->kernelH = -1; - attr->kernelW = -1; - attr->channelIn = -1; - attr->channelOut = -1; + prim->set_kernel_size({-1, -1}); + prim->set_out_channel(-1); + prim->set_in_channel(-1); MS_LOG(WARNING) << "parsing of kernelH/W channelIn/Out is delayed"; } - status = ParsePadMode(tf_op, &attr->padMode); - if (status != RET_OK) { - return status; - } - - primitive->value.type = schema::PrimitiveType_DeConv2D; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } + prim->set_pad_mode(ParsePadMode(tf_op)); *output_size = 1; - status = AddOpInput(tf_op, 2, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, 2, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - status = AddOpInput(tf_op, 1, inputs); // weights - return status; + + return prim.release(); } TFNodeRegistrar g_tf_deconv_parser("Conv2DBackpropInput", new TFDeconvParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_deconv_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_deconv_parser.h index 9d2fde610e..ccc472e811 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_deconv_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_deconv_parser.h @@ -28,8 +28,9 @@ class TFDeconvParser : public TFConvBaseParser { TFDeconvParser() = default; ~TFDeconvParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_dropout_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_dropout_parser.cc index 5e7474fc30..9ba07d9521 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_dropout_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_dropout_parser.cc @@ -19,45 +19,29 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/dropout.h" namespace mindspore { namespace lite { -STATUS TFDropoutParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF DropoutParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::DropoutT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFDropoutParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Dropout>(); tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "ratio", &attr_value)) { MS_LOG(ERROR) << "The ratio attr should be specified"; - return RET_ERROR; - } - attr->ratio = static_cast<int32_t>(attr_value.i()); - primitive->value.type = schema::PrimitiveType_Dropout; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_keep_prob(attr_value.i()); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - return status; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } + + return prim.release(); } TFNodeRegistrar g_tfDropoutParser("Dropout", new TFDropoutParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_dropout_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_dropout_parser.h index 62e992ac89..ab43592b48 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_dropout_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_dropout_parser.h @@ -28,8 +28,9 @@ class TFDropoutParser : public TFNodeParser { TFDropoutParser() = default; ~TFDropoutParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_enter_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_enter_parser.cc index 5a3785b4dc..35824aa655 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_enter_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_enter_parser.cc @@ -23,27 +23,17 @@ namespace mindspore { namespace lite { -STATUS TFEnterParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF EnterParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - *primitiveC = new (std::nothrow) Enter(); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFEnterParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<Enter>(); *output_size = tf_op.input_size(); for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + return prim.release(); } TFNodeRegistrar g_tfEnterParser("Enter", new TFEnterParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_enter_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_enter_parser.h index e1f2119e11..c8c1d25844 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_enter_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_enter_parser.h @@ -29,8 +29,9 @@ class TFEnterParser : public TFNodeParser { TFEnterParser() = default; ~TFEnterParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_exit_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_exit_parser.cc index 8c42b5b27b..79d0a5b3d3 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_exit_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_exit_parser.cc @@ -22,27 +22,17 @@ namespace mindspore { namespace lite { -STATUS TFExitParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF ExitParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - *primitiveC = new (std::nothrow) Exit(); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFExitParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<Exit>(); *output_size = tf_op.input_size(); for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + return prim.release(); } TFNodeRegistrar g_tfExitParser("Exit", new TFExitParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_exit_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_exit_parser.h index f53bb92b40..9cb2e780df 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_exit_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_exit_parser.h @@ -29,8 +29,9 @@ class TFExitParser : public TFNodeParser { TFExitParser() = default; ~TFExitParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_expand_dims_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_expand_dims_parser.cc index a2e6bb2d8e..e7764765e3 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_expand_dims_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_expand_dims_parser.cc @@ -19,56 +19,24 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/expand_dims.h" namespace mindspore { namespace lite { -STATUS TFExpandDimsParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF ExpandDimsParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::ExpandDimsT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - - auto axis_node = GetConstInputNode(tf_node_map, tf_op.input(1)); - if (axis_node == nullptr) { - MS_LOG(ERROR) << "Find ExpandDims input axis failed"; - return RET_ERROR; - } - tensorflow::AttrValue attr_value; - if (TensorFlowUtils::FindAttrValue(*axis_node, "value", &attr_value)) { - const auto &tensor_proto = attr_value.tensor(); - if (tensor_proto.int_val_size() > 0) { - attr->dim = tensor_proto.int_val(0); - } else { - attr->dim = (reinterpret_cast<const int32_t *>(tensor_proto.tensor_content().data()))[0]; - } - } +ops::PrimitiveC *TFExpandDimsParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::ExpandDims>(); - primitive->value.type = schema::PrimitiveType_ExpandDims; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - return status; + return prim.release(); } + TFNodeRegistrar g_tfExpandDimsParser("ExpandDims", new TFExpandDimsParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_expand_dims_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_expand_dims_parser.h index 68744c40a1..1f258c150f 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_expand_dims_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_expand_dims_parser.h @@ -28,8 +28,9 @@ class TFExpandDimsParser : public TFNodeParser { TFExpandDimsParser() = default; ~TFExpandDimsParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_fill_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_fill_parser.cc index 801a28e5a9..524458ce49 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_fill_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_fill_parser.cc @@ -19,87 +19,22 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/fill.h" namespace mindspore { namespace lite { -STATUS TFFillParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF FillParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::FillT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFFillParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Fill>(); - inputs->emplace_back(tf_op.input(1)); - // parse dims - tensorflow::AttrValue attr_value; - auto dims_node = GetConstInputNode(tf_node_map, tf_op.input(0)); - if (dims_node != nullptr) { - if (!TensorFlowUtils::FindAttrValue(*dims_node, "value", &attr_value)) { - MS_LOG(ERROR) << "fill dims input not have value attr"; - return RET_ERROR; - } - if (attr_value.value_case() != tensorflow::AttrValue::kTensor) { - MS_LOG(ERROR) << "The attrValue of value should have tensor type, actual: " << attr_value.value_case() - << ", node: " << tf_op.name().c_str(); - return RET_ERROR; - } - const tensorflow::TensorProto &dims_tensor = attr_value.tensor(); - if (dims_tensor.dtype() != tensorflow::DT_INT32) { - MS_LOG(ERROR) << "The dimsTensor dataType should be DT_INT32, actual : " << dims_tensor.dtype(); - return RET_ERROR; - } - const tensorflow::TensorShapeProto &dims_tensor_shape = dims_tensor.tensor_shape(); - size_t shape_size = 1; - for (int i = 0; i < dims_tensor_shape.dim_size(); i++) { - shape_size *= dims_tensor_shape.dim(i).size(); - } - size_t size = dims_tensor.int_val().size(); - if (size > 0) { - for (size_t i = 0; i < shape_size; i++) { - attr->dims.emplace_back(dims_tensor.int_val().Get(i)); - } - } else { - size = dims_tensor.tensor_content().length(); - if (size > 0) { - if (size != shape_size * sizeof(int32_t)) { - MS_LOG(ERROR) << "tensor size mismatch"; - return RET_ERROR; - } - attr->dims.resize(shape_size); - if (EOK != ::memcpy_s(attr->dims.data(), size, dims_tensor.tensor_content().data(), size)) { - MS_LOG(ERROR) << "Memcpy_s from dimsTensor to attr failed"; - return RET_ERROR; - } - } else { - MS_LOG(DEBUG) << "empty dims"; - } - } - } else { - inputs->emplace_back(tf_op.input(0)); + *output_size = 1; + if (AddOpInput(tf_op, 1, inputs) != RET_OK || AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - primitive->value.type = schema::PrimitiveType_Fill; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } - *output_size = 1; - return RET_OK; + return prim.release(); } TFNodeRegistrar g_tfFillParser("Fill", new TFFillParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_fill_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_fill_parser.h index f663cf3917..a6bed38f9e 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_fill_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_fill_parser.h @@ -29,8 +29,9 @@ class TFFillParser : public TFNodeParser { TFFillParser() = default; ~TFFillParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_gather_nd_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_gather_nd_parser.cc index 07b796b29b..f9c1a8274d 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_gather_nd_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_gather_nd_parser.cc @@ -19,49 +19,22 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/gather_nd.h" namespace mindspore { namespace lite { -STATUS TFGatherNDParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF GatherNDParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::GatherT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_GatherNd; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFGatherNDParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::GatherNd>(); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; - } - status = AddOpInput(tf_op, 1, inputs); - if (status != RET_OK) { + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { MS_LOG(ERROR) << "Add Op input failed."; - return status; + return nullptr; } - return status; + + return prim.release(); } TFNodeRegistrar g_tfGatherNDParser("GatherNd", new TFGatherNDParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_gather_nd_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_gather_nd_parser.h index c082cff4df..b288fa4a79 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_gather_nd_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_gather_nd_parser.h @@ -28,8 +28,9 @@ class TFGatherNDParser : public TFNodeParser { TFGatherNDParser() = default; ~TFGatherNDParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_gather_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_gather_parser.cc index aec1ba450a..248a255e09 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_gather_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_gather_parser.cc @@ -19,83 +19,64 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/gather.h" namespace mindspore { namespace lite { -STATUS TFGatherParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF GatherParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::GatherT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFGatherParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Gather>(); + int batchDims = 0; tensorflow::AttrValue attr_value; if (TensorFlowUtils::FindAttrValue(tf_op, "batch_dims", &attr_value)) { - attr->batchDims = attr_value.i(); + batchDims = attr_value.i(); } + int32_t axis = 1; bool axis_is_set = false; if (tf_op.input_size() == 3) { axis_is_set = true; auto axis_node = GetConstInputNode(tf_node_map, tf_op.input(2)); if (axis_node == nullptr) { MS_LOG(ERROR) << "Find Gather input axis failed"; - return RET_ERROR; + return nullptr; } if (!TensorFlowUtils::FindAttrValue(*axis_node, "value", &attr_value)) { MS_LOG(ERROR) << "The value attr should be specified"; - return RET_ERROR; + return nullptr; } auto tensor_proto = attr_value.tensor(); if (tensor_proto.dtype() == tensorflow::DT_INT32) { if (tensor_proto.int_val_size() > 0) { - attr->axis = tensor_proto.int_val(0); + axis = tensor_proto.int_val(0); } else { - attr->axis = (reinterpret_cast<const int32_t *>(tensor_proto.tensor_content().data()))[0]; + axis = (reinterpret_cast<const int32_t *>(tensor_proto.tensor_content().data()))[0]; } } else if (tensor_proto.dtype() == tensorflow::DT_INT64) { if (tensor_proto.int64_val_size() > 0) { - attr->axis = tensor_proto.int64_val(0); + axis = tensor_proto.int64_val(0); } else { - attr->axis = (reinterpret_cast<const int64_t *>(tensor_proto.tensor_content().data()))[0]; + axis = (reinterpret_cast<const int64_t *>(tensor_proto.tensor_content().data()))[0]; } } else { MS_LOG(ERROR) << "axis must be int32 or int64"; - return RET_ERROR; + return nullptr; } } - if (attr->batchDims != 0 && !axis_is_set) { - attr->axis = attr->batchDims; - } - - primitive->value.type = schema::PrimitiveType_Gather; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + if (batchDims != 0 && !axis_is_set) { + axis = batchDims; } + prim->AddAttr("axis", MakeValue(axis)); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - status = AddOpInput(tf_op, 1, inputs); - return status; + + return prim.release(); } TFNodeRegistrar g_tfGatherV2Parser("GatherV2", new TFGatherParser()); diff --git a/mindspore/lite/tools/converter/parser/tf/tf_gather_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_gather_parser.h index 03e4e31ecd..9285c6452a 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_gather_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_gather_parser.h @@ -28,8 +28,9 @@ class TFGatherParser : public TFNodeParser { TFGatherParser() = default; ~TFGatherParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_if_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_if_parser.cc index 9fd87b24fb..7fc4b29cd3 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_if_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_if_parser.cc @@ -19,42 +19,20 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/if.h" namespace mindspore { namespace lite { -STATUS TFIfParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF IfParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::IfT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_If; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFIfParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::If>(); *output_size = 1; for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + return prim.release(); } TFNodeRegistrar g_tfStatelessIfParser("StatelessIf", new TFIfParser()); TFNodeRegistrar g_tfIfParser("If", new TFIfParser()); diff --git a/mindspore/lite/tools/converter/parser/tf/tf_if_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_if_parser.h index 77874abc09..7be09f4d33 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_if_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_if_parser.h @@ -29,8 +29,9 @@ class TFIfParser : public TFNodeParser { TFIfParser() = default; ~TFIfParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_invert_permutation_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_invert_permutation_parser.cc index 08c6e27d0d..2ef5b9c76d 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_invert_permutation_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_invert_permutation_parser.cc @@ -19,44 +19,21 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/invert_permutation.h" namespace mindspore { namespace lite { -STATUS TFInvertPermutationParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF SizeParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::SizeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_InvertPermutation; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFInvertPermutationParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::InvertPermutation>(); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { MS_LOG(ERROR) << "Add Op input failed."; - return status; + return nullptr; } - return status; + return prim.release(); } TFNodeRegistrar g_tfInvertPermutationParser("InvertPermutation", new TFInvertPermutationParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_invert_permutation_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_invert_permutation_parser.h index 9510cae454..e66d5a3f47 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_invert_permutation_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_invert_permutation_parser.h @@ -29,8 +29,9 @@ class TFInvertPermutationParser : public TFNodeParser { TFInvertPermutationParser() = default; ~TFInvertPermutationParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_is_finite_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_is_finite_parser.cc index 81a2e52ed5..d11954a1fe 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_is_finite_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_is_finite_parser.cc @@ -19,40 +19,21 @@ #include <string> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" -#include "tools/common/node_util.h" +#include "ops/is_finite.h" namespace mindspore { namespace lite { -STATUS TFIsFiniteParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; - } - - int status = CreateOperator<schema::IsFiniteT>(primitive, schema::PrimitiveType_IsFinite); - if (status != RET_OK) { - return status; - } - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFIsFiniteParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::IsFinite>(); *output_size = 1; for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + return prim.release(); } TFNodeRegistrar g_tf_is_finite_parser("IsFinite", new TFIsFiniteParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_is_finite_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_is_finite_parser.h index 22975d6225..911c0181be 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_is_finite_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_is_finite_parser.h @@ -29,8 +29,9 @@ class TFIsFiniteParser : public TFNodeParser { TFIsFiniteParser() = default; ~TFIsFiniteParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_linspace_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_linspace_parser.cc index aeded4ce47..2d2762a369 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_linspace_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_linspace_parser.cc @@ -18,43 +18,24 @@ #include <memory> #include <map> #include <vector> +#include "ops/lin_space.h" #include "tools/converter/parser/tf/tf_node_parser_registry.h" namespace mindspore { namespace lite { -STATUS TFLinSpaceParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { +ops::PrimitiveC *TFLinSpaceParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { MS_LOG(DEBUG) << "TF LinSpaceParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::LinSpaceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_LinSpace; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } + auto prim = std::make_unique<ops::LinSpace>(); *output_size = 1; for (int i = 0; i < tf_op.input_size(); ++i) { auto status = AddOpInput(tf_op, i, inputs); if (status != RET_OK) { - return status; + return nullptr; } } - return RET_OK; + return prim.release(); } TFNodeRegistrar g_tfLinSpaceParser("LinSpace", new TFLinSpaceParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_linspace_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_linspace_parser.h index 18af673e29..6dc2cb1c83 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_linspace_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_linspace_parser.h @@ -28,8 +28,9 @@ class TFLinSpaceParser : public TFNodeParser { TFLinSpaceParser() = default; ~TFLinSpaceParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_logical_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_logical_parser.cc index 4dc87fc77f..d70ca41f5c 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_logical_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_logical_parser.cc @@ -19,51 +19,53 @@ #include <string> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" -#include "tools/common/node_util.h" +#include "ops/logical_and.h" +#include "ops/logical_or.h" +#include "ops/logical_not.h" namespace mindspore { namespace lite { -STATUS TFLogicalParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF LogicalParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFLogicalAndParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::LogicalAnd>(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; + *output_size = 1; + for (int i = 0; i < tf_op.input_size(); i++) { + inputs->emplace_back(tf_op.input(i)); } - int status = RET_ERROR; - if (tf_op.op() == "LogicalAnd") { - status = CreateOperator<schema::LogicalAndT>(primitive, schema::PrimitiveType_LogicalAnd); - } else if (tf_op.op() == "LogicalOr") { - status = CreateOperator<schema::LogicalOrT>(primitive, schema::PrimitiveType_LogicalOr); - } else if (tf_op.op() == "LogicalNot") { - status = CreateOperator<schema::LogicalNotT>(primitive, schema::PrimitiveType_LogicalNot); - } - if (status != RET_OK) { - return status; - } - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return prim.release(); +} + +ops::PrimitiveC *TFLogicalOrParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::LogicalOr>(); + + *output_size = 1; + for (int i = 0; i < tf_op.input_size(); i++) { + inputs->emplace_back(tf_op.input(i)); } + return prim.release(); +} + +ops::PrimitiveC *TFLogicalNotParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::LogicalNot>(); + *output_size = 1; for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + return prim.release(); } -TFNodeRegistrar g_tfLogicalNotParser("LogicalNot", new TFLogicalParser()); -TFNodeRegistrar g_tfLogicalOrParser("LogicalOr", new TFLogicalParser()); -TFNodeRegistrar g_tfLogicalAndParser("LogicalAnd", new TFLogicalParser()); + +TFNodeRegistrar g_tfLogicalAndParser("LogicalAnd", new TFLogicalAndParser()); +TFNodeRegistrar g_tfLogicalOrParser("LogicalOr", new TFLogicalOrParser()); +TFNodeRegistrar g_tfLogicalNotParser("LogicalNot", new TFLogicalNotParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_logical_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_logical_parser.h index c06893f9fd..95437ced53 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_logical_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_logical_parser.h @@ -24,13 +24,34 @@ namespace mindspore { namespace lite { -class TFLogicalParser : public TFNodeParser { +class TFLogicalAndParser : public TFNodeParser { public: - TFLogicalParser() = default; - ~TFLogicalParser() override = default; + TFLogicalAndParser() = default; + ~TFLogicalAndParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFLogicalOrParser : public TFNodeParser { + public: + TFLogicalOrParser() = default; + ~TFLogicalOrParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFLogicalNotParser : public TFNodeParser { + public: + TFLogicalNotParser() = default; + ~TFLogicalNotParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_loop_cond_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_loop_cond_parser.cc index a1199aaf03..7a24b104ca 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_loop_cond_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_loop_cond_parser.cc @@ -22,27 +22,17 @@ namespace mindspore { namespace lite { -STATUS TFLoopCondParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF LoopCondParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - *primitiveC = new (std::nothrow) LoopCond(); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFLoopCondParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<LoopCond>(); *output_size = tf_op.input_size(); for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + return prim.release(); } TFNodeRegistrar g_tfLoopCondParser("LoopCond", new TFLoopCondParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_loop_cond_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_loop_cond_parser.h index 4ec996c47d..fd4cc2358a 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_loop_cond_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_loop_cond_parser.h @@ -29,8 +29,9 @@ class TFLoopCondParser : public TFNodeParser { TFLoopCondParser() = default; ~TFLoopCondParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_matmul_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_matmul_parser.cc index a0660f7301..3541629aa8 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_matmul_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_matmul_parser.cc @@ -19,52 +19,32 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/mat_mul.h" namespace mindspore { namespace lite { -STATUS TFMatMulParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF MatMulParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFMatMulParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::MatMul>(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::MatMulT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } tensorflow::AttrValue attr_value; if (TensorFlowUtils::FindAttrValue(tf_op, "transpose_a", &attr_value)) { - attr->transposeA = attr_value.b(); + prim->set_transpose_a(attr_value.b()); } if (TensorFlowUtils::FindAttrValue(tf_op, "transpose_b", &attr_value)) { - attr->transposeB = attr_value.b(); - } - - primitive->value.type = schema::PrimitiveType_MatMul; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + prim->set_transpose_b(attr_value.b()); } *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - status = AddOpInput(tf_op, 1, inputs); - return status; + + return prim.release(); } + TFNodeRegistrar g_tfMatMulParser("MatMul", new TFMatMulParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_matmul_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_matmul_parser.h index 8335b96fa7..986332f6af 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_matmul_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_matmul_parser.h @@ -29,8 +29,9 @@ class TFMatMulParser : public TFNodeParser { TFMatMulParser() = default; ~TFMatMulParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_merge_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_merge_parser.cc index ced56a501f..2a38c4638d 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_merge_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_merge_parser.cc @@ -19,43 +19,21 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/merge.h" namespace mindspore { namespace lite { -STATUS TFMergeParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF MergeParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::MergeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_Merge; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFMergeParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Merge>(); *output_size = tf_op.input_size(); for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + return prim.release(); } TFNodeRegistrar g_tfMergeParser("Merge", new TFMergeParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_merge_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_merge_parser.h index 400d1ce10c..dccd6f7260 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_merge_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_merge_parser.h @@ -29,8 +29,9 @@ class TFMergeParser : public TFNodeParser { TFMergeParser() = default; ~TFMergeParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_model_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_model_parser.cc index aef43e3886..33f386775d 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_model_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_model_parser.cc @@ -25,15 +25,20 @@ #include "tools/common/protobuf_utils.h" #include "tools/converter/parser/tf/tf_node_parser_registry.h" #include "tools/optimizer/common/gllo_utils.h" +#include "ops/return.h" +#include "ops/make_tuple.h" +#include "ops/tuple_get_item.h" +#include "ir/anf.h" +#include "tools/converter/converter_flags.h" namespace mindspore { namespace lite { namespace { -static const std::vector<schema::PrimitiveType> tensorListOutputOpList = { - schema::PrimitiveType_TensorListFromTensor, - schema::PrimitiveType_TensorListSetItem, - schema::PrimitiveType_TensorListReserve, -}; +bool IsTensorListOp(const AnfNodePtr &anf_node) { + return opt::CheckPrimitiveType(anf_node, prim::kPrimTensorListFromTensor) || + opt::CheckPrimitiveType(anf_node, prim::kPrimTensorListSetItem) || + opt::CheckPrimitiveType(anf_node, prim::kPrimTensorListReserve); +} AnfNodePtr GetAnfNode(const std::string &name, const std::unordered_map<std::string, AnfNodePtr> &anf_node_map) { AnfNodePtr ret = nullptr; @@ -61,24 +66,125 @@ std::string GetOriginInputName(const tensorflow::NodeDef &node, } return tmp_node->name(); } + +STATUS CheckStrView(std::string_view str_view, uint64_t *scratch) { + if (!TensorFlowUtils::DecodeInt64(&str_view, scratch)) { + return RET_ERROR; + } + for (size_t i = 0; i < static_cast<size_t>(*scratch); ++i) { + if (!TensorFlowUtils::DecodeInt64(&str_view, scratch)) { + return RET_ERROR; + } + } + if (!TensorFlowUtils::DecodeInt64(&str_view, scratch)) { + return RET_ERROR; + } + if (!TensorFlowUtils::DecodeInt64(&str_view, scratch)) { + return RET_ERROR; + } + return RET_OK; +} + +STATUS GetFloatValue(const tensorflow::TensorProto &tensor_proto, const tensorflow::TensorShapeProto &tensor_shape, + ParamValueLitePtr param_value, int shape_size) { + auto tensor_data = new (std::nothrow) float[shape_size]; + if (tensor_data == nullptr) { + MS_LOG(ERROR) << "new data failed"; + delete[] tensor_data; + return RET_ERROR; + } + + if (tensor_proto.float_val_size() == 1) { + for (int i = 0; i < shape_size; i++) { + tensor_data[i] = tensor_proto.float_val(0); + } + } + if (tensor_proto.tensor_content().size() == shape_size * sizeof(float)) { + const auto addr = reinterpret_cast<const float *>(tensor_proto.tensor_content().data()); + if (::memcpy_s(tensor_data, shape_size * sizeof(float), addr, shape_size * sizeof(float)) != EOK) { + MS_LOG(ERROR) << "memcpy_s failed"; + delete[] tensor_data; + return RET_ERROR; + } + } + auto tensor_size = shape_size * sizeof(float); + param_value->SetTensorData(tensor_data, tensor_size); + return RET_OK; +} + +STATUS GetInt32Value(const tensorflow::TensorProto &tensor_proto, const tensorflow::TensorShapeProto &tensor_shape, + ParamValueLitePtr param_value, int shape_size) { + auto tensor_data = new (std::nothrow) int[shape_size]; + if (tensor_data == nullptr) { + MS_LOG(ERROR) << "new data failed"; + delete[] tensor_data; + return RET_ERROR; + } + + if (tensor_proto.int_val_size() == 1) { + for (int i = 0; i < shape_size; i++) { + tensor_data[i] = tensor_proto.int_val(0); + } + } + if (shape_size != 0 && tensor_proto.tensor_content().size() == shape_size * sizeof(int32_t)) { + const auto addr = reinterpret_cast<const int32_t *>(tensor_proto.tensor_content().data()); + if (::memcpy_s(tensor_data, shape_size * sizeof(int32_t), addr, shape_size * sizeof(int32_t)) != EOK) { + MS_LOG(ERROR) << "memcpy_s failed"; + delete[] tensor_data; + return RET_ERROR; + } + } + auto tensor_size = shape_size * sizeof(int); + param_value->SetTensorData(tensor_data, tensor_size); + return RET_OK; +} + +STATUS GetInt64Value(const tensorflow::TensorProto &tensor_proto, const tensorflow::TensorShapeProto &tensor_shape, + ParamValueLitePtr param_value, int shape_size) { + param_value->set_tensor_type(kNumberTypeInt32); + auto *tensor_data = new (std::nothrow) int[shape_size]; + if (tensor_data == nullptr) { + MS_LOG(ERROR) << "new data failed"; + delete[] tensor_data; + return RET_ERROR; + } + if (tensor_shape.dim_size() == 0) { // scalar + const auto &origin_data = tensor_proto.int64_val(); + for (int i = 0; i < tensor_proto.int64_val_size(); ++i) { + if (origin_data[i] > static_cast<int64_t>(INT32_MAX) || origin_data[i] < static_cast<int64_t>(INT32_MIN)) { + MS_LOG(ERROR) << "int64 data " << origin_data[i] << "too big to fit into int32"; + delete[] tensor_data; + return RET_ERROR; + } else { + tensor_data[i] = static_cast<int>(origin_data[i]); + } + } + } else { + const auto origin_data = reinterpret_cast<const int64_t *>(tensor_proto.tensor_content().data()); + for (int i = 0; i < shape_size; ++i) { + if (origin_data[i] > static_cast<int64_t>(INT32_MAX) || origin_data[i] < static_cast<int64_t>(INT32_MIN)) { + MS_LOG(WARNING) << "int64 data " << origin_data[i] << "too big to fit into int32"; + tensor_data[i] = origin_data[i] > 0 ? INT32_MAX : INT32_MIN; + } else { + tensor_data[i] = static_cast<int>(origin_data[i]); + } + } + } + param_value->SetTensorData(tensor_data, shape_size * sizeof(int32_t)); + return RET_OK; +} + } // namespace STATUS TFModelParser::ConvertConstVariant(const tensorflow::TensorProto &tensor_proto, const ParamValueLitePtr &param_value) { - MS_ASSERT(param_value != nullptr); - auto variant_size = tensor_proto.variant_val_size(); - if (variant_size != 1) { + if (tensor_proto.variant_val_size() != 1) { MS_LOG(ERROR) << "only support variant_val_size == 1 now"; return RET_ERROR; } auto &variant = tensor_proto.variant_val(0); - if (variant.type_name() != "tensorflow::TensorList") { - MS_LOG(ERROR) << "Only TensorList type is supported now"; - return RET_NOT_SUPPORT; - } - if (variant.tensors_size() > 0) { - MS_LOG(ERROR) << "Only empty tensorlist is supported now"; - return RET_NOT_SUPPORT; + if (variant.type_name() != "tensorflow::TensorList" || variant.tensors_size() <= 0) { + MS_LOG(ERROR) << "Only nonempty TensorList type is supported now"; } auto descriptor = variant.GetMetadata().descriptor; auto reflection = variant.GetMetadata().reflection; @@ -91,36 +197,21 @@ STATUS TFModelParser::ConvertConstVariant(const tensorflow::TensorProto &tensor_ MS_LOG(ERROR) << "field_descriptor is nullptr"; return RET_ERROR; } - auto type = field_descriptor->type(); - if (type != google::protobuf::FieldDescriptor::TYPE_BYTES) { + if (field_descriptor->type() != google::protobuf::FieldDescriptor::TYPE_BYTES) { MS_LOG(ERROR) << "metadata type is not TYPE_BYTES"; return RET_ERROR; } - auto str = reflection->GetString(variant, field_descriptor); - std::string_view str_view(str); + std::string_view str_view(reflection->GetString(variant, field_descriptor)); uint64_t scratch; - if (!TensorFlowUtils::DecodeInt64(&str_view, &scratch)) { - return RET_ERROR; - } - size_t num_invalid_tensors = static_cast<size_t>(scratch); - for (size_t i = 0; i < num_invalid_tensors; ++i) { - if (!TensorFlowUtils::DecodeInt64(&str_view, &scratch)) { - return RET_ERROR; - } - } - if (!TensorFlowUtils::DecodeInt64(&str_view, &scratch)) { - return RET_ERROR; - } - size_t element_dtype = static_cast<size_t>(scratch); - if (!TensorFlowUtils::DecodeInt64(&str_view, &scratch)) { + if (CheckStrView(str_view, &scratch) != RET_OK) { return RET_ERROR; } - std::string element_shape_str = std::string(str_view.data(), str_view.size()); + auto element_dtype = static_cast<size_t>(scratch); + tensorflow::TensorShapeProto element_shape_proto; - element_shape_proto.ParseFromString(element_shape_str); + element_shape_proto.ParseFromString(std::string(str_view.data(), str_view.size())); auto dim_size = element_shape_proto.dim_size(); - // we encode element_dtype,shape.size,shape[i]... into data - auto tensor_data = new (std::nothrow) int[dim_size + 2]; + auto tensor_data = new (std::nothrow) int[dim_size + 2]; // encode element_dtype,shape.size,shape[i]... into data if (tensor_data == nullptr) { MS_LOG(ERROR) << "tensor_data is nullptr"; return RET_ERROR; @@ -137,7 +228,6 @@ STATUS TFModelParser::ConvertConstVariant(const tensorflow::TensorProto &tensor_ tensor_data[i + 2] = static_cast<int>(dim); } } - std::vector<int> tensor_list_data(dim_size + 2); tensor_list_data[0] = TensorFlowUtils::GetTFDataType(tensorflow::DataType(element_dtype)); tensor_list_data[1] = element_shape_proto.dim_size(); @@ -170,70 +260,19 @@ STATUS TFModelParser::ConvertConstVariant(const tensorflow::TensorProto &tensor_ MS_LOG(ERROR) << "memcpy_s failed"; return RET_NULL_PTR; } - param_value->SetTensorData(tensor_data_ptr, tensor_list_data.size() * sizeof(int)); return RET_OK; } -STATUS TFModelParser::ConvertConstTensor(const tensorflow::NodeDef &node_def, const tensorflow::AttrValue &attr_value, - const TypeId &type, const ParameterPtr &parameter, - std::vector<int64_t> *shape_vector) { - MS_ASSERT(parameter != nullptr); - MS_ASSERT(shape_vector != nullptr); - const tensorflow::TensorProto &tensor_proto = attr_value.tensor(); - const tensorflow::TensorShapeProto &tensor_shape = tensor_proto.tensor_shape(); - int shape_size = 1; - shape_vector->clear(); - for (int i = 0; i < tensor_shape.dim_size(); i++) { - shape_vector->push_back(tensor_shape.dim(i).size()); - shape_size *= tensor_shape.dim(i).size(); - } - - int tensor_size; - auto param_value = std::make_shared<ParamValueLite>(); - if (param_value == nullptr) { - MS_LOG(ERROR) << "param_value is nullptr"; - return RET_ERROR; - } - param_value->set_tensor_type(type); +STATUS TFModelParser::GetValueFromType(const tensorflow::TensorProto &tensor_proto, + const tensorflow::TensorShapeProto &tensor_shape, ParamValueLitePtr param_value, + const TypeId &type, int shape_size) { if (type == kNumberTypeFloat32 || type == kNumberTypeFloat) { - auto tensor_data = new (std::nothrow) float[shape_size]; - if (tensor_proto.float_val_size() == 1) { - float value = tensor_proto.float_val(0); - for (int i = 0; i < shape_size; i++) { - tensor_data[i] = value; - } - } - if (tensor_proto.tensor_content().size() == shape_size * sizeof(float)) { - const auto addr = reinterpret_cast<const float *>(tensor_proto.tensor_content().data()); - auto ret = ::memcpy_s(tensor_data, shape_size * sizeof(float), addr, shape_size * sizeof(float)); - if (ret != EOK) { - MS_LOG(ERROR) << "memcpy_s failed"; - delete[] tensor_data; - return RET_ERROR; - } - } - tensor_size = shape_size * sizeof(float); - param_value->SetTensorData(tensor_data, tensor_size); + return GetFloatValue(tensor_proto, tensor_shape, param_value, shape_size); } else if (type == kNumberTypeInt32 || type == kNumberTypeInt) { - auto tensor_data = new (std::nothrow) int[shape_size]; - if (tensor_proto.int_val_size() == 1) { - int value = tensor_proto.int_val(0); - for (int i = 0; i < shape_size; i++) { - tensor_data[i] = value; - } - } - if (shape_size != 0 && tensor_proto.tensor_content().size() == shape_size * sizeof(int32_t)) { - const auto addr = reinterpret_cast<const int32_t *>(tensor_proto.tensor_content().data()); - auto ret = ::memcpy_s(tensor_data, shape_size * sizeof(int32_t), addr, shape_size * sizeof(int32_t)); - if (ret != EOK) { - MS_LOG(ERROR) << "memcpy_s failed"; - delete[] tensor_data; - return RET_ERROR; - } - } - tensor_size = shape_size * sizeof(int); - param_value->SetTensorData(tensor_data, tensor_size); + return GetInt32Value(tensor_proto, tensor_shape, param_value, shape_size); + } else if (type == kNumberTypeInt64) { + return GetInt64Value(tensor_proto, tensor_shape, param_value, shape_size); } else if (type == kNumberTypeBool) { auto tensor_data = new (std::nothrow) int[shape_size]; if (tensor_proto.bool_val_size() == 1) { @@ -242,63 +281,56 @@ STATUS TFModelParser::ConvertConstTensor(const tensorflow::NodeDef &node_def, co tensor_data[i] = value; } } - tensor_size = shape_size * sizeof(int); + auto tensor_size = shape_size * sizeof(int); param_value->SetTensorData(tensor_data, tensor_size); } else if (type == kObjectTypeTensorType) { - auto status = ConvertConstVariant(tensor_proto, param_value); - if (status != RET_OK) { - return status; - } + return ConvertConstVariant(tensor_proto, param_value); } else if (type == kObjectTypeString) { auto tensor_data = new (std::nothrow) string; if (tensor_proto.string_val_size() == 1) { - string value = tensor_proto.string_val(0); - *tensor_data = value; + *tensor_data = tensor_proto.string_val(0); } else { MS_LOG(ERROR) << "string size bigger than one, not support."; return RET_ERROR; } - tensor_size = (*tensor_data).size(); + auto tensor_size = (*tensor_data).size(); param_value->SetTensorData(tensor_data, tensor_size); - } else if (type == kNumberTypeInt64) { - param_value->set_tensor_type(kNumberTypeInt32); - auto *tensor_data = new (std::nothrow) int[shape_size]; - if (tensor_data == nullptr) { - MS_LOG(ERROR) << "new data failed"; - return RET_ERROR; - } - if (tensor_shape.dim_size() == 0) { // scalar - const auto &origin_data = tensor_proto.int64_val(); - for (int i = 0; i < tensor_proto.int64_val_size(); ++i) { - if (origin_data[i] > static_cast<int64_t>(INT32_MAX) || origin_data[i] < static_cast<int64_t>(INT32_MIN)) { - MS_LOG(ERROR) << "int64 data " << origin_data[i] << "too big to fit into int32"; - return RET_ERROR; - } else { - tensor_data[i] = static_cast<int>(origin_data[i]); - } - } - } else { - const auto origin_data = reinterpret_cast<const int64_t *>(tensor_proto.tensor_content().data()); - for (int i = 0; i < shape_size; ++i) { - if (origin_data[i] > static_cast<int64_t>(INT32_MAX) || origin_data[i] < static_cast<int64_t>(INT32_MIN)) { - MS_LOG(WARNING) << "int64 data " << origin_data[i] << "too big to fit into int32"; - tensor_data[i] = origin_data[i] > 0 ? INT32_MAX : INT32_MIN; - } else { - tensor_data[i] = static_cast<int>(origin_data[i]); - } - } - } - param_value->SetTensorData(tensor_data, shape_size * sizeof(int32_t)); } else { MS_LOG(ERROR) << "Unsupported dataType: " << type; return RET_ERROR; } + return RET_OK; +} + +STATUS TFModelParser::ConvertConstTensor(const tensorflow::NodeDef &node_def, const tensorflow::AttrValue &attr_value, + const TypeId &type, const ParameterPtr &parameter, + std::vector<int64_t> *shape_vector) { + MS_ASSERT(parameter != nullptr); + MS_ASSERT(shape_vector != nullptr); + const tensorflow::TensorProto &tensor_proto = attr_value.tensor(); + const tensorflow::TensorShapeProto &tensor_shape = tensor_proto.tensor_shape(); + int shape_size = 1; + shape_vector->clear(); + for (int i = 0; i < tensor_shape.dim_size(); i++) { + shape_vector->push_back(tensor_shape.dim(i).size()); + shape_size *= tensor_shape.dim(i).size(); + } + auto param_value = std::make_shared<ParamValueLite>(); + if (param_value == nullptr) { + MS_LOG(ERROR) << "param_value is nullptr"; + return RET_ERROR; + } + param_value->set_tensor_type(type); + if (GetValueFromType(tensor_proto, tensor_shape, param_value, type, shape_size) != RET_OK) { + MS_LOG(ERROR) << "get value from type failed."; + return RET_ERROR; + } std::vector<int> param_shape(shape_vector->begin(), shape_vector->end()); param_value->set_tensor_shape(param_shape); if (TensorFlowUtils::FindAttrValue(node_def, "data_format", const_cast<tensorflow::AttrValue *>(&attr_value))) { auto format = mindspore::lite::TensorFlowUtils::ParseNodeFormat(node_def); - if (format == schema::Format_NUM_OF_FORMAT) { + if (format == mindspore::Format::NUM_OF_FORMAT) { MS_LOG(ERROR) << "Do not support data format: " << attr_value.s(); } param_value->set_format(format); @@ -338,10 +370,8 @@ STATUS TFModelParser::ConvertParameter(const tensorflow::NodeDef &node, const Pa } else { graph_input_names_.emplace_back(node.name()); // only root graph need set graph input names } - if (type == kNumberTypeInt64) { - type = kNumberTypeInt32; - } - auto type_ptr = TypeIdToType(type); + + auto type_ptr = TypeIdToType(type == kNumberTypeInt64 ? kNumberTypeInt32 : type); auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector); if (abstract_tensor == nullptr) { MS_LOG(ERROR) << "abstract_tensor is nullptr"; @@ -457,19 +487,110 @@ FuncGraphPtr TFModelParser::Parse(const std::string &modelFile, const std::strin return anf_root_graph_; } + +STATUS TFModelParser::ConvertSubgraphInputs(std::map<std::string, const tensorflow::NodeDef *> *tf_sub_node_map, + std::unordered_map<std::string, AnfNodePtr> *anf_sub_node_map, + const tensorflow::FunctionDef &tf_sub_fuction, CNodePtr cnode, + FuncGraphPtr sub_func_graph) { + std::vector<ParameterPtr> sub_graph_inputs; + auto &tf_sub_signature = tf_sub_fuction.signature(); + auto &sub_graph_name = tf_sub_signature.name(); + auto input_arg_size = tf_sub_signature.input_arg_size(); + for (int j = 0; j < input_arg_size; j++) { + auto &input_arg = tf_sub_signature.input_arg(j); + auto parameter = sub_func_graph->add_parameter(); + parameter->set_name(input_arg.name()); + (*anf_sub_node_map)[input_arg.name()] = parameter; + auto root_inputs = cnode->inputs(); + if (opt::CheckPrimitiveType(cnode, prim::kPrimWhile)) { + parameter->set_abstract(root_inputs[j + 1]->abstract()); + } else { + parameter->set_abstract(root_inputs[j + 2]->abstract()); + } + sub_graph_inputs.emplace_back(parameter); + } + for (int j = 0; j < tf_sub_fuction.node_def_size(); j++) { + auto &node_def = tf_sub_fuction.node_def(j); + (*tf_sub_node_map)[node_def.name()] = &node_def; + } + if (ConvertGraphInputsAndConsts(*tf_sub_node_map, sub_func_graph, anf_sub_node_map) != RET_OK) { + MS_LOG(ERROR) << "Convert subgraph consts failed"; + return RET_ERROR; + } + + // hardcode subgraph inputs name + for (size_t j = 0; j < sub_graph_inputs.size(); j++) { + sub_graph_inputs[j]->set_name(sub_graph_name + "_input_" + std::to_string(j) + "_parameter"); + } + + return RET_OK; +} + +STATUS TFModelParser::ConvertSubgraphOutputs(std::map<std::string, const tensorflow::NodeDef *> *tf_sub_node_map, + const std::unordered_map<std::string, AnfNodePtr> &anf_sub_node_map, + const tensorflow::FunctionDef &tf_sub_fuction, + FuncGraphPtr sub_func_graph) { + auto &tf_sub_signature = tf_sub_fuction.signature(); + auto &sub_graph_name = tf_sub_signature.name(); + + std::vector<AnfNodePtr> sub_output_nodes; + auto &subgraph_ret = tf_sub_fuction.ret(); + for (auto &output_arg : tf_sub_signature.output_arg()) { + auto &signature_name = output_arg.name(); + if (subgraph_ret.find(signature_name) == subgraph_ret.end()) { + MS_LOG(ERROR) << "can't find signature_name: " << signature_name; + return RET_ERROR; + } + auto t = subgraph_ret.find(signature_name); + MS_LOG(INFO) << "subret " << t->first << " " << t->second; + auto tf_output_name = TensorFlowUtils::GetFlattenNodeName(t->second); + AnfNodePtr anf_node = nullptr; + if (tf_sub_node_map->find(tf_output_name) == tf_sub_node_map->end()) { + anf_node = GetAnfNode(tf_output_name, anf_sub_node_map); + } else { + auto tf_real_name = GetOriginInputName(*tf_sub_node_map->at(tf_output_name), *tf_sub_node_map); + anf_node = GetAnfNode(tf_real_name, anf_sub_node_map); + } + if (anf_node == nullptr) { + MS_LOG(ERROR) << "can't find anf node,tf node flatten name" << tf_output_name; + return RET_ERROR; + } + sub_output_nodes.push_back(anf_node); + } + if (MakeAnfGraphOutputs(&sub_output_nodes, sub_func_graph) != RET_OK) { + MS_LOG(ERROR) << "cmake anf graph outputs node error"; + return RET_ERROR; + } + + // hardcode subgraph outputs name + if (sub_output_nodes.size() == 1) { + if (utils::isa<CNodePtr>(sub_output_nodes[0])) { + sub_output_nodes[0]->cast<CNodePtr>()->set_fullname_with_scope(sub_graph_name + "_output_0_cnode"); + } else if (utils::isa<ParameterPtr>(sub_output_nodes[0])) { + sub_output_nodes[0]->cast<ParameterPtr>()->set_name(sub_graph_name + "_output_0_parameter"); + } + } else { + for (size_t j = 1; j < sub_output_nodes.size(); j++) { + if (utils::isa<CNodePtr>(sub_output_nodes[j])) { + sub_output_nodes[j]->cast<CNodePtr>()->set_fullname_with_scope(sub_graph_name + "_output_" + + std::to_string(j - 1) + "_cnode"); + } else if (utils::isa<ParameterPtr>(sub_output_nodes[j])) { + sub_output_nodes[j]->cast<ParameterPtr>()->set_name(sub_graph_name + "_output_" + std::to_string(j - 1) + + "_parameter"); + } + } + } + + return RET_OK; +} + STATUS TFModelParser::ConvertSubgraph() { - auto graph_def_liarary = tf_root_graph_->library(); - auto subgraph_size = graph_def_liarary.function_size(); - std::map<CNodePtr, FuncGraphPtr> while_cond_map; - std::map<CNodePtr, FuncGraphPtr> while_body_map; - std::map<CNodePtr, FuncGraphPtr> if_then_map; - std::map<CNodePtr, FuncGraphPtr> if_else_map; + std::map<CNodePtr, FuncGraphPtr> while_cond_map, while_body_map, if_then_map, if_else_map; bool success_flag = true; - for (int i = 0; i < subgraph_size; i++) { - auto &tf_sub_fuction = graph_def_liarary.function(i); + for (int i = 0; i < tf_root_graph_->library().function_size(); i++) { + auto &tf_sub_fuction = tf_root_graph_->library().function(i); auto &tf_sub_signature = tf_sub_fuction.signature(); auto input_arg_size = tf_sub_signature.input_arg_size(); - auto &sub_graph_name = tf_sub_signature.name(); CNodePtr cnode = nullptr; if (function_while_map_.count(sub_graph_name)) { @@ -488,39 +609,19 @@ STATUS TFModelParser::ConvertSubgraph() { continue; } - auto op_type = opt::GetCNodeType(cnode); - FuncGraphPtr sub_func_graph = std::make_shared<FuncGraph>(); sub_func_graph->set_attr("graph_name", MakeValue(sub_graph_name)); sub_func_graph->set_attr("fmk", MakeValue(static_cast<int>(converter::FmkType_TF))); std::unordered_map<std::string, AnfNodePtr> anf_sub_node_map; - // convert sub graph inputs - std::vector<ParameterPtr> sub_graph_inputs; - for (int j = 0; j < input_arg_size; j++) { - auto &input_arg = tf_sub_signature.input_arg(j); - auto parameter = sub_func_graph->add_parameter(); - parameter->set_name(input_arg.name()); - anf_sub_node_map[input_arg.name()] = parameter; - auto root_inputs = cnode->inputs(); - if (op_type == schema::PrimitiveType_While) { - parameter->set_abstract(root_inputs[j + 1]->abstract()); - } else { - parameter->set_abstract(root_inputs[j + 2]->abstract()); - } - sub_graph_inputs.emplace_back(parameter); - } std::map<std::string, const tensorflow::NodeDef *> tf_sub_node_map; - for (int j = 0; j < tf_sub_fuction.node_def_size(); j++) { - auto &node_def = tf_sub_fuction.node_def(j); - tf_sub_node_map[node_def.name()] = &node_def; - } - STATUS status = RET_OK; - status = ConvertGraphInputsAndConsts(tf_sub_node_map, sub_func_graph, &anf_sub_node_map); - if (status != RET_OK) { - MS_LOG(ERROR) << "Convert subgraph consts failed"; - return status; + + if (ConvertSubgraphInputs(&tf_sub_node_map, &anf_sub_node_map, tf_sub_fuction, cnode, sub_func_graph) != RET_OK) { + MS_LOG(ERROR) << "Convert subgraph inputs failed."; + return RET_ERROR; } + // convert sub graph ops + STATUS status = RET_OK; for (int j = 0; j < tf_sub_fuction.node_def_size(); j++) { auto &node_def = tf_sub_fuction.node_def(j); status = ConvertOps(node_def, tf_sub_node_map, sub_func_graph, &anf_sub_node_map); @@ -535,40 +636,13 @@ STATUS TFModelParser::ConvertSubgraph() { continue; } - // convert subgraph outputs - std::vector<AnfNodePtr> sub_output_nodes; - auto &subgraph_ret = tf_sub_fuction.ret(); - auto &output_args = tf_sub_signature.output_arg(); - for (auto &output_arg : output_args) { - auto &signature_name = output_arg.name(); - if (subgraph_ret.find(signature_name) == subgraph_ret.end()) { - MS_LOG(ERROR) << "can't find signature_name: " << signature_name; - return RET_ERROR; - } - auto t = subgraph_ret.find(signature_name); - MS_LOG(INFO) << "subret " << t->first << " " << t->second; - auto tf_output_name = TensorFlowUtils::GetFlattenNodeName(t->second); - AnfNodePtr anf_node = nullptr; - if (tf_sub_node_map.find(tf_output_name) == tf_sub_node_map.end()) { - anf_node = GetAnfNode(tf_output_name, anf_sub_node_map); - } else { - auto tf_real_name = GetOriginInputName(*tf_sub_node_map[tf_output_name], tf_sub_node_map); - anf_node = GetAnfNode(tf_real_name, anf_sub_node_map); - } - if (anf_node == nullptr) { - MS_LOG(ERROR) << "can't find anf node,tf node flatten name" << tf_output_name; - return RET_ERROR; - } - sub_output_nodes.push_back(anf_node); - } - status = MakeAnfGraphOutputs(&sub_output_nodes, sub_func_graph); - if (status != RET_OK) { - MS_LOG(ERROR) << "cmake anf graph outputs node error"; - return status; + if (ConvertSubgraphOutputs(&tf_sub_node_map, anf_sub_node_map, tf_sub_fuction, sub_func_graph) != RET_OK) { + MS_LOG(ERROR) << "Convert subgraph outputs failed."; + return RET_ERROR; } // add while cond body function to while node input - if (op_type == PrimitiveType_While) { + if (opt::CheckPrimitiveType(cnode, prim::kPrimWhile)) { if (sub_graph_name.find("cond") != std::string::npos) { while_cond_map[cnode] = sub_func_graph; } else { @@ -581,49 +655,22 @@ STATUS TFModelParser::ConvertSubgraph() { if_else_map[cnode] = sub_func_graph; } } - - // hardcode subgraph inputs name - for (size_t j = 0; j < sub_graph_inputs.size(); j++) { - sub_graph_inputs[j]->set_name(sub_graph_name + "_input_" + std::to_string(j) + "_parameter"); - } - // hardcode subgraph outputs name - if (sub_output_nodes.size() == 1) { - if (utils::isa<CNodePtr>(sub_output_nodes[0])) { - sub_output_nodes[0]->cast<CNodePtr>()->set_fullname_with_scope(sub_graph_name + "_output_0_cnode"); - } else if (utils::isa<ParameterPtr>(sub_output_nodes[0])) { - sub_output_nodes[0]->cast<ParameterPtr>()->set_name(sub_graph_name + "_output_0_parameter"); - } - } else { - for (size_t j = 1; j < sub_output_nodes.size(); j++) { - if (utils::isa<CNodePtr>(sub_output_nodes[j])) { - sub_output_nodes[j]->cast<CNodePtr>()->set_fullname_with_scope(sub_graph_name + "_output_" + - std::to_string(j - 1) + "_cnode"); - } else if (utils::isa<ParameterPtr>(sub_output_nodes[j])) { - sub_output_nodes[j]->cast<ParameterPtr>()->set_name(sub_graph_name + "_output_" + std::to_string(j - 1) + - "_parameter"); - } - } - } - - MS_LOG(INFO) << "parse subgraph end:" << sub_graph_name; } if (!success_flag) { MS_LOG(ERROR) << "Convert subgraph is failed."; return RET_ERROR; } - auto status = ControlFlowNodePostProcess(while_cond_map, while_body_map); - if (status != RET_OK) { + if (ControlFlowNodePostProcess(while_cond_map, while_body_map) != RET_OK) { MS_LOG(ERROR) << "while node post process failed"; - return status; + return RET_ERROR; } - - status = ControlFlowNodePostProcess(if_then_map, if_else_map); - if (status != RET_OK) { + if (ControlFlowNodePostProcess(if_then_map, if_else_map) != RET_OK) { MS_LOG(ERROR) << "if node post process failed"; - return status; + return RET_ERROR; } return RET_OK; } + STATUS TFModelParser::ControlFlowNodePostProcess(const std::map<CNodePtr, FuncGraphPtr> &first_func_map, const std::map<CNodePtr, FuncGraphPtr> &second_func_map) { if (first_func_map.size() != second_func_map.size()) { @@ -657,12 +704,6 @@ STATUS TFModelParser::ControlFlowNodePostProcess(const std::map<CNodePtr, FuncGr return RET_OK; } -schema::MetaGraphT *TFModelParser::ParseToFb(const std::string &modelFile, const std::string &weightFile, - const QuantType &quantType) { - MS_LOG(ERROR) << "TF Model Parser not return MetaGraph, use TFModelParser::Parse instead"; - return nullptr; -} - STATUS TFModelParser::ConvertInputNodes(const tensorflow::NodeDef &node_def, const std::vector<std::string> &input_names, const std::map<std::string, const tensorflow::NodeDef *> &tf_node_map, @@ -695,7 +736,7 @@ STATUS TFModelParser::ConvertOutputTensor(const tensorflow::NodeDef &op, const C MS_ASSERT(op != nullptr); MS_ASSERT(anf_node != nullptr); MS_ASSERT(anf_graph != nullptr); - if (IsContain(tensorListOutputOpList, opt::GetCNodeType(anf_node)) && output_size != 1) { + if (IsTensorListOp(anf_node) && output_size != 1) { MS_LOG(ERROR) << "tensorlist output op output_size !=1"; return RET_ERROR; } @@ -704,7 +745,7 @@ STATUS TFModelParser::ConvertOutputTensor(const tensorflow::NodeDef &op, const C } else if (output_size == 1) { auto type = kFloat32; std::vector<int64_t> shape_vector; - if (IsContain(tensorListOutputOpList, opt::GetCNodeType(anf_node))) { + if (IsTensorListOp(anf_node)) { type = TypeIdToType(kObjectTypeTensorType); } auto abstract = std::make_shared<abstract::AbstractTensor>(type, shape_vector); @@ -719,9 +760,9 @@ STATUS TFModelParser::ConvertOutputTensor(const tensorflow::NodeDef &op, const C for (int output_idx = 0; output_idx < output_size; output_idx++) { std::vector<int64_t> shape_vector; abstractList.emplace_back(std::make_shared<abstract::AbstractTensor>(kFloat32, shape_vector)); - auto tupleGetItemPrimPtr = GetTupleGetItemPrim(); + auto tupleGetItemPrimPtr = std::make_shared<ops::TupleGetItem>(); if (tupleGetItemPrimPtr == nullptr) { - MS_LOG(ERROR) << "GetTupleGetItemPrim return nullptr"; + MS_LOG(ERROR) << "new TupleGetItem failed"; return RET_NULL_PTR; } auto tupleGetItemPrim = NewValueNode(tupleGetItemPrimPtr); @@ -776,6 +817,7 @@ STATUS TFModelParser::ConvertOps(const tensorflow::NodeDef &node_def, return RET_OK; } + MS_LOG(INFO) << "parse op : " << op_type; auto node_parser = TFNodeParserRegistry::GetInstance()->GetNodeParser(op_type); if (node_parser == nullptr) { NoSupportOp::GetInstance()->InsertOp(op_type); @@ -783,16 +825,15 @@ STATUS TFModelParser::ConvertOps(const tensorflow::NodeDef &node_def, << func_graph_ptr->get_attr("graph_name")->ToString(); return RET_NOT_FIND_OP; } - PrimitiveC *primitiveC = nullptr; + int output_size; std::vector<std::string> input_names; - status = node_parser->Parse(node_def, tf_node_map, &primitiveC, &input_names, &output_size); - if (status != RET_OK) { - MS_LOG(ERROR) << "node " << node_def.name() << " parser failed in " - << func_graph_ptr->get_attr("graph_name")->ToString(); + auto primitiveC = node_parser->Parse(node_def, tf_node_map, &input_names, &output_size); + if (primitiveC == nullptr) { + MS_LOG(ERROR) << "node " << op_type << " parser failed"; return RET_ERROR; } - auto value_node = NewValueNode(std::shared_ptr<PrimitiveC>(primitiveC)); + auto value_node = NewValueNode(std::shared_ptr<ops::PrimitiveC>(primitiveC)); if (value_node == nullptr) { MS_LOG(ERROR) << "value_node is nullptr"; return RET_ERROR; @@ -890,9 +931,9 @@ STATUS TFModelParser::MakeAnfGraphOutputs(std::vector<AnfNodePtr> *output_nodes, } if (output_nodes->size() > 1) { std::vector<AnfNodePtr> *make_tuple_inputs = output_nodes; - auto make_tuple_prim_ptr = GetMakeTuplePrim(); + auto make_tuple_prim_ptr = std::make_shared<ops::MakeTuple>(); if (make_tuple_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetMakeTuplePrim return nullptr"; + MS_LOG(ERROR) << "new MakeTuple failed"; return RET_NULL_PTR; } auto make_tuple_prim = NewValueNode(make_tuple_prim_ptr); @@ -900,9 +941,9 @@ STATUS TFModelParser::MakeAnfGraphOutputs(std::vector<AnfNodePtr> *output_nodes, auto make_tuple_cnode = anf_graph->NewCNode(*make_tuple_inputs); make_tuple_cnode->set_fullname_with_scope("return tuple"); - auto return_prim_ptr = GetReturnPrim(); + auto return_prim_ptr = std::make_shared<ops::Return>(); if (return_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetReturnPrim return nullptr"; + MS_LOG(ERROR) << "new Return failed"; return RET_NULL_PTR; } auto value_node = NewValueNode(return_prim_ptr); @@ -911,9 +952,9 @@ STATUS TFModelParser::MakeAnfGraphOutputs(std::vector<AnfNodePtr> *output_nodes, cnode->set_fullname_with_scope("Return"); anf_graph->set_return(cnode); } else { - auto return_prim_ptr = GetReturnPrim(); + auto return_prim_ptr = std::make_shared<ops::Return>(); if (return_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetReturnPrim return nullptr"; + MS_LOG(ERROR) << "new Return failed"; return RET_NULL_PTR; } auto value_node = NewValueNode(return_prim_ptr); diff --git a/mindspore/lite/tools/converter/parser/tf/tf_model_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_model_parser.h index cd16e76378..a7f6a54b55 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_model_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_model_parser.h @@ -29,7 +29,7 @@ #include "securec/include/securec.h" #include "tools/common/tensor_util.h" #include "tools/converter/model_parser.h" -#include "mindspore/lite/src/param_value_lite.h" +#include "src/param_value_lite.h" namespace mindspore { namespace lite { @@ -40,26 +40,25 @@ class TFModelParser : public ModelParser { FuncGraphPtr Parse(const std::string &modelFile, const std::string &weightFile, const QuantType &quantType); - protected: - schema::MetaGraphT *ParseToFb(const std::string &modelFile, const std::string &weightFile, - const QuantType &quantType = QuantType_QUANT_NONE) override; - private: - STATUS ConvertConstVariant(const tensorflow::TensorProto &tensor_proto, const ParamValueLitePtr &param_value); + static STATUS ConvertConstVariant(const tensorflow::TensorProto &tensor_proto, const ParamValueLitePtr &param_value); STATUS ConvertConstTensor(const tensorflow::NodeDef &node_def, const tensorflow::AttrValue &attr_value, const TypeId &type, const ParameterPtr &parameter, std::vector<int64_t> *shape_vector); + static STATUS GetValueFromType(const tensorflow::TensorProto &tensor_proto, + const tensorflow::TensorShapeProto &tensor_shape, ParamValueLitePtr param_value, + const TypeId &type, int shape_size); STATUS ConvertParameter(const tensorflow::NodeDef &node, const ParameterPtr &parameter, std::unordered_map<std::string, AnfNodePtr> *anf_node_map); STATUS ConvertGraphInputsAndConsts(const std::map<std::string, const tensorflow::NodeDef *> &tf_graph_nodes, const FuncGraphPtr &anf_graph, std::unordered_map<std::string, AnfNodePtr> *anf_node_map); - STATUS ConvertInputNodes(const tensorflow::NodeDef &node_def, const std::vector<std::string> &input_names, - const std::map<std::string, const tensorflow::NodeDef *> &tf_node_map, - const std::unordered_map<std::string, AnfNodePtr> &anf_node_map, - std::vector<AnfNodePtr> *inputs, std::vector<std::string> *input_name_not_found); - STATUS ConvertOutputTensor(const tensorflow::NodeDef &op, const CNodePtr &anf_node, - std::unordered_map<std::string, AnfNodePtr> *anf_node_map, const FuncGraphPtr &anf_graph, - int output_size); + static STATUS ConvertInputNodes(const tensorflow::NodeDef &node_def, const std::vector<std::string> &input_names, + const std::map<std::string, const tensorflow::NodeDef *> &tf_node_map, + const std::unordered_map<std::string, AnfNodePtr> &anf_node_map, + std::vector<AnfNodePtr> *inputs, std::vector<std::string> *input_name_not_found); + static STATUS ConvertOutputTensor(const tensorflow::NodeDef &op, const CNodePtr &anf_node, + std::unordered_map<std::string, AnfNodePtr> *anf_node_map, + const FuncGraphPtr &anf_graph, int output_size); STATUS ConvertOps(const tensorflow::NodeDef &node_def, const std::map<std::string, const tensorflow::NodeDef *> &tf_node_map, const FuncGraphPtr &func_graph_ptr, std::unordered_map<std::string, AnfNodePtr> *anf_node_map); @@ -67,10 +66,19 @@ class TFModelParser : public ModelParser { STATUS ConvertSubgraph(); + STATUS ConvertSubgraphInputs(std::map<std::string, const tensorflow::NodeDef *> *tf_sub_node_map, + std::unordered_map<std::string, AnfNodePtr> *anf_sub_node_map, + const tensorflow::FunctionDef &tf_sub_fuction, CNodePtr cnode, + FuncGraphPtr sub_func_graph); + + static STATUS ConvertSubgraphOutputs(std::map<std::string, const tensorflow::NodeDef *> *tf_sub_node_map, + const std::unordered_map<std::string, AnfNodePtr> &anf_sub_node_map, + const tensorflow::FunctionDef &tf_sub_fuction, FuncGraphPtr sub_func_graph); + STATUS ControlFlowNodePostProcess(const std::map<CNodePtr, FuncGraphPtr> &first_func_map, const std::map<CNodePtr, FuncGraphPtr> &second_func_map); - STATUS MakeAnfGraphOutputs(std::vector<AnfNodePtr> *output_nodes, const FuncGraphPtr &anf_graph); + static STATUS MakeAnfGraphOutputs(std::vector<AnfNodePtr> *output_nodes, const FuncGraphPtr &anf_graph); STATUS RecordNullInput(const CNodePtr &node, const std::vector<std::string> &input_name_not_found); diff --git a/mindspore/lite/tools/converter/parser/tf/tf_next_iteration_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_next_iteration_parser.cc index 9469266525..3253f4bad3 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_next_iteration_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_next_iteration_parser.cc @@ -22,27 +22,17 @@ namespace mindspore { namespace lite { -STATUS TFNextIterationParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF NextIterationParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - *primitiveC = new (std::nothrow) NextIteration(); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFNextIterationParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<NextIteration>(); *output_size = tf_op.input_size(); for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + return prim.release(); } TFNodeRegistrar g_tfNextIterationParser("NextIteration", new TFNextIterationParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_next_iteration_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_next_iteration_parser.h index 430a42fe13..2cfa9d601d 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_next_iteration_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_next_iteration_parser.h @@ -29,8 +29,9 @@ class TFNextIterationParser : public TFNodeParser { TFNextIterationParser() = default; ~TFNextIterationParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_node_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_node_parser.h index 2b36a83eef..a9adf8cb90 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_node_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_node_parser.h @@ -23,7 +23,8 @@ #include <memory> #include "tools/converter/parser/tf/tf_util.h" #include "proto/graph.pb.h" -#include "src/ops/primitive_c.h" +#include "ops/primitive_c.h" +#include "mindspore/core/utils/check_convert_utils.h" namespace mindspore { namespace lite { @@ -33,10 +34,10 @@ class TFNodeParser { virtual ~TFNodeParser() = default; - virtual STATUS Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - return RET_OK; + virtual ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + return nullptr; } STATUS AddOpInput(const tensorflow::NodeDef &tf_op, const int idx, std::vector<std::string> *inputs); diff --git a/mindspore/lite/tools/converter/parser/tf/tf_non_max_suppression_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_non_max_suppression_parser.cc index c261e3e32f..e231e430bf 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_non_max_suppression_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_non_max_suppression_parser.cc @@ -19,64 +19,26 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/non_max_suppression.h" namespace mindspore { namespace lite { -STATUS TFNonMaxSuppressionParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF NonMaxSuppressionParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFNonMaxSuppressionParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::NonMaxSuppression>(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::NonMaxSuppressionT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - attr->centerPointBox = 0; - primitive->value.type = schema::PrimitiveType_NonMaxSuppression; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } + prim->set_center_point_box(0); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; - } - status = AddOpInput(tf_op, 1, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; + for (int i = 0; i < 5; i++) { + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input " << i << " failed."; + return nullptr; + } } - status = AddOpInput(tf_op, 2, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; - } - status = AddOpInput(tf_op, 3, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; - } - status = AddOpInput(tf_op, 4, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; - } - return status; + + return prim.release(); } TFNodeRegistrar g_tfNonMaxSuppressionV3Parser("NonMaxSuppressionV3", new TFNonMaxSuppressionParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_non_max_suppression_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_non_max_suppression_parser.h index 3471e80e74..4915ffe6da 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_non_max_suppression_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_non_max_suppression_parser.h @@ -29,8 +29,9 @@ class TFNonMaxSuppressionParser : public TFNodeParser { TFNonMaxSuppressionParser() = default; ~TFNonMaxSuppressionParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_one_hot_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_one_hot_parser.cc index 07144c651f..9cc8637f16 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_one_hot_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_one_hot_parser.cc @@ -19,48 +19,31 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/one_hot.h" namespace mindspore { namespace lite { -STATUS TFOneHotParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF OneHotParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::OneHotT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFOneHotParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::OneHot>(); + tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "axis", &attr_value)) { MS_LOG(ERROR) << "The axis attr should be specified"; - return RET_ERROR; - } - attr->axis = static_cast<int32_t>(attr_value.i()); - primitive->value.type = schema::PrimitiveType_OneHot; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_axis(attr_value.i()); + *output_size = 1; for (int i = 0; i < tf_op.input_size(); ++i) { - auto status = AddOpInput(tf_op, i, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input " << i << " failed."; + return nullptr; } } - return RET_OK; + + return prim.release(); } TFNodeRegistrar g_tfOneHotParser("OneHot", new TFOneHotParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_one_hot_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_one_hot_parser.h index ead8ca7cd3..0ceab33391 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_one_hot_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_one_hot_parser.h @@ -28,8 +28,9 @@ class TFOneHotParser : public TFNodeParser { TFOneHotParser() = default; ~TFOneHotParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_pack_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_pack_parser.cc index 5a0d2d872d..ab2b3c3de5 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_pack_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_pack_parser.cc @@ -19,59 +19,33 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/stack.h" namespace mindspore { namespace lite { -STATUS TFPackParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF PackParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::StackT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFPackParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Stack>(); tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "axis", &attr_value)) { MS_LOG(ERROR) << "The axis attr should be specified"; - return RET_ERROR; - } - attr->axis = static_cast<int32_t>(attr_value.i()); - - if (!TensorFlowUtils::FindAttrValue(tf_op, "N", &attr_value)) { - MS_LOG(ERROR) << "The axis attr should be specified"; - return RET_ERROR; - } - attr->n = static_cast<int32_t>(attr_value.i()); - - primitive->value.type = schema::PrimitiveType_Stack; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_axis({attr_value.i()}); *output_size = 1; for (int i = 0; i < tf_op.input_size(); ++i) { - auto status = AddOpInput(tf_op, i, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input " << i << " failed"; + return nullptr; } } - return RET_OK; + + return prim.release(); } + TFNodeRegistrar g_tfPackParser("Pack", new TFPackParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_pack_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_pack_parser.h index 9fa7eaf96b..d5630f6ef7 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_pack_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_pack_parser.h @@ -28,8 +28,9 @@ class TFPackParser : public TFNodeParser { TFPackParser() = default; ~TFPackParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_pad_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_pad_parser.cc index 87e14126a5..489a8c3603 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_pad_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_pad_parser.cc @@ -19,69 +19,43 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/fusion/pad_fusion.h" namespace mindspore { namespace lite { -STATUS TFPadParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF PadParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFPadParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::PadFusion>(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::PadT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } if (tf_op.op() == "Pad") { - attr->paddingMode = schema::PaddingMode_CONSTANT; - attr->constantValue = 0.0f; + prim->set_padding_mode(mindspore::PaddingMode::CONSTANT); + prim->set_constant_value(0.0f); } else if (tf_op.op() == "MirrorPad") { tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "mode", &attr_value)) { MS_LOG(ERROR) << "The axis attr should be specified"; - return RET_ERROR; + return nullptr; } if (attr_value.s() == "SYMMETRIC") { - attr->paddingMode = schema::PaddingMode_SYMMETRIC; + prim->set_padding_mode(mindspore::PaddingMode::SYMMETRIC); } else if (attr_value.s() == "REFLECT") { - attr->paddingMode = schema::PaddingMode_REFLECT; + prim->set_padding_mode(mindspore::PaddingMode::REFLECT); } else { MS_LOG(ERROR) << "padding mode:" << attr_value.s() << " don't support"; - return RET_ERROR; + return nullptr; } } - primitive->value.type = schema::PrimitiveType_Pad; - primitive->value.value = attr.release(); - - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; - } - status = AddOpInput(tf_op, 1, inputs); - if (status != RET_OK) { + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { MS_LOG(ERROR) << "Add Op input failed."; - return status; + return nullptr; } - return status; + + return prim.release(); } TFNodeRegistrar g_tfPadParser("Pad", new TFPadParser()); TFNodeRegistrar g_tfMirrorPadParser("MirrorPad", new TFPadParser()); diff --git a/mindspore/lite/tools/converter/parser/tf/tf_pad_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_pad_parser.h index 633b376b23..e3aaa95567 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_pad_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_pad_parser.h @@ -28,8 +28,9 @@ class TFPadParser : public TFNodeParser { TFPadParser() = default; ~TFPadParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_pool_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_pool_parser.cc index 6886d5afc9..8b523aff18 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_pool_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_pool_parser.cc @@ -20,83 +20,98 @@ #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" #include "tools/converter/parser/tf/tf_util.h" +#include "ops/fusion/avg_pool_fusion.h" +#include "ops/fusion/max_pool_fusion.h" namespace mindspore { namespace lite { -STATUS TFPoolParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF PoolParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; +ops::PrimitiveC *TFMaxPoolParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::MaxPoolFusion>(); + + tensorflow::AttrValue attr_value; + if (TensorFlowUtils::FindAttrValue(tf_op, "padding", &attr_value)) { + if (attr_value.s() == "VALID") { + prim->set_pad_mode(mindspore::PadMode::VALID); + } else if (attr_value.s() == "SAME") { + prim->set_pad_mode(mindspore::PadMode::SAME); + } } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; + auto format = TensorFlowUtils::ParseNodeFormat(tf_op); + prim->set_format(format); + + if (TensorFlowUtils::FindAttrValue(tf_op, "strides", &attr_value)) { + const auto &stride_list = attr_value.list(); + if (format == mindspore::Format::NCHW) { + prim->set_strides({stride_list.i(2), stride_list.i(3)}); + } else { + prim->set_strides({stride_list.i(1), stride_list.i(2)}); + } } - auto attr = std::make_unique<schema::PoolingT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; + + if (TensorFlowUtils::FindAttrValue(tf_op, "ksize", &attr_value)) { + const auto &kernel_list = attr_value.list(); + if (format == mindspore::Format::NCHW) { + prim->set_kernel_size({kernel_list.i(2), kernel_list.i(3)}); + } else { + prim->set_kernel_size({kernel_list.i(1), kernel_list.i(2)}); + } } - if (tf_op.op() == "MaxPool") { - attr->poolingMode = schema::PoolMode_MAX_POOLING; - } else if (tf_op.op() == "AvgPool") { - attr->poolingMode = schema::PoolMode_MEAN_POOLING; + *output_size = 1; + for (int i = 0; i < tf_op.input_size(); i++) { + inputs->emplace_back(tf_op.input(i)); } + return prim.release(); +} + +ops::PrimitiveC *TFAvgPoolParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::AvgPoolFusion>(); + tensorflow::AttrValue attr_value; if (TensorFlowUtils::FindAttrValue(tf_op, "padding", &attr_value)) { if (attr_value.s() == "VALID") { - attr->padMode = schema::PadMode_VALID; + prim->set_pad_mode(mindspore::PadMode::VALID); } else if (attr_value.s() == "SAME") { - attr->padMode = schema::PadMode_SAME_UPPER; + prim->set_pad_mode(mindspore::PadMode::SAME); } } - attr->format = TensorFlowUtils::ParseNodeFormat(tf_op); + auto format = TensorFlowUtils::ParseNodeFormat(tf_op); + prim->set_format(format); if (TensorFlowUtils::FindAttrValue(tf_op, "strides", &attr_value)) { const auto &stride_list = attr_value.list(); - if (attr->format == schema::Format_NCHW) { - attr->strideH = (int32_t)stride_list.i(2); - attr->strideW = (int32_t)stride_list.i(3); + if (format == mindspore::Format::NCHW) { + prim->set_strides({stride_list.i(2), stride_list.i(3)}); } else { - attr->strideH = (int32_t)stride_list.i(1); - attr->strideW = (int32_t)stride_list.i(2); + prim->set_strides({stride_list.i(1), stride_list.i(2)}); } } if (TensorFlowUtils::FindAttrValue(tf_op, "ksize", &attr_value)) { const auto &kernel_list = attr_value.list(); - if (attr->format == schema::Format_NCHW) { - attr->windowH = (int32_t)kernel_list.i(2); - attr->windowW = (int32_t)kernel_list.i(3); + if (format == mindspore::Format::NCHW) { + prim->set_kernel_size({kernel_list.i(2), kernel_list.i(3)}); } else { - attr->windowH = (int32_t)kernel_list.i(1); - attr->windowW = (int32_t)kernel_list.i(2); + prim->set_kernel_size({kernel_list.i(1), kernel_list.i(2)}); } } - primitive->value.type = schema::PrimitiveType_Pooling; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } - *output_size = 1; for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + + return prim.release(); } -TFNodeRegistrar g_tfMaxPoolParser("MaxPool", new TFPoolParser()); -TFNodeRegistrar g_tfAvgPoolParser("AvgPool", new TFPoolParser()); + +TFNodeRegistrar g_tfMaxPoolParser("MaxPool", new TFMaxPoolParser()); +TFNodeRegistrar g_tfAvgPoolParser("AvgPool", new TFAvgPoolParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_pool_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_pool_parser.h index f64bed2845..646d7f1f76 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_pool_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_pool_parser.h @@ -23,13 +23,24 @@ namespace mindspore { namespace lite { -class TFPoolParser : public TFNodeParser { +class TFMaxPoolParser : public TFNodeParser { public: - TFPoolParser() = default; - ~TFPoolParser() override = default; + TFMaxPoolParser() = default; + ~TFMaxPoolParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; +}; + +class TFAvgPoolParser : public TFNodeParser { + public: + TFAvgPoolParser() = default; + ~TFAvgPoolParser() override = default; + + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_ragged_range_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_ragged_range_parser.cc index 0640645908..4c3572b63e 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_ragged_range_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_ragged_range_parser.cc @@ -20,80 +20,57 @@ #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" #include "tools/converter/parser/tf/tf_util.h" +#include "ops/range.h" namespace mindspore { namespace lite { -STATUS TFRaggedRangeParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF RaggedRangeParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::RangeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFRaggedRangeParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Range>(); tensorflow::AttrValue attr_value; - if (TensorFlowUtils::FindAttrValue(tf_op, "starts", &attr_value)) { - attr->start = static_cast<int32_t>(attr_value.i()); + if (!TensorFlowUtils::FindAttrValue(tf_op, "starts", &attr_value)) { + prim->set_start(attr_value.i()); } else { auto start_node = tf_node_map.at(TensorFlowUtils::GetFlattenNodeName(tf_op.input(0))); if (TensorFlowUtils::FindAttrValue(*start_node, "value", &attr_value)) { MS_LOG(INFO) << "Found raggedrange start node value attr, means it has default value"; - attr->start = static_cast<int32_t>(attr_value.i()); + prim->set_start(attr_value.i()); } } if (TensorFlowUtils::FindAttrValue(tf_op, "limits", &attr_value)) { - attr->limit = static_cast<int32_t>(attr_value.i()); + prim->set_limit(attr_value.i()); } else { auto limit_node = tf_node_map.at(TensorFlowUtils::GetFlattenNodeName(tf_op.input(1))); if (TensorFlowUtils::FindAttrValue(*limit_node, "value", &attr_value)) { MS_LOG(INFO) << "Found raggedrange limit node value attr, means it has default value"; - attr->limit = static_cast<int32_t>(attr_value.i()); + prim->set_limit(attr_value.i()); } } if (TensorFlowUtils::FindAttrValue(tf_op, "deltas", &attr_value)) { - attr->delta = static_cast<int32_t>(attr_value.i()); + prim->set_delta(attr_value.i()); } else { auto delta_node = tf_node_map.at(TensorFlowUtils::GetFlattenNodeName(tf_op.input(2))); if (TensorFlowUtils::FindAttrValue(*delta_node, "value", &attr_value)) { MS_LOG(INFO) << "Found raggedrange delta node value attr, means it has default value"; } - attr->delta = static_cast<int32_t>(attr_value.i()); - } - - primitive->value.type = schema::PrimitiveType_Range; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + prim->set_delta(attr_value.i()); } *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; - } - status = AddOpInput(tf_op, 1, inputs); - if (status != RET_OK) { - return status; + for (int i = 0; i < 3; i++) { + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input " << i << " failed!"; + return nullptr; + } } - status = AddOpInput(tf_op, 2, inputs); - return status; + + return prim.release(); } + TFNodeRegistrar g_tfRaggedRangeParser("RaggedRange", new TFRaggedRangeParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_ragged_range_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_ragged_range_parser.h index be1bbf888e..ea86c3010a 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_ragged_range_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_ragged_range_parser.h @@ -28,8 +28,9 @@ class TFRaggedRangeParser : public TFNodeParser { TFRaggedRangeParser() = default; ~TFRaggedRangeParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_random_standard_normal_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_random_standard_normal_parser.cc index 0192dd95b9..85c0132a9a 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_random_standard_normal_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_random_standard_normal_parser.cc @@ -19,50 +19,34 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/random_standard_normal.h" namespace mindspore { namespace lite { -STATUS TFRandomStandardNormalParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, - int *output_size) { - MS_LOG(WARNING) << "TF RandomStandardNormalParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFRandomStandardNormalParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::RandomStandardNormal>(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::RandomStandardNormalT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "seed", &attr_value)) { MS_LOG(ERROR) << "The seed attr should be specified"; - return RET_ERROR; + return nullptr; } - attr->seed = attr_value.i(); + prim->set_seed(attr_value.i()); if (!TensorFlowUtils::FindAttrValue(tf_op, "seed2", &attr_value)) { MS_LOG(ERROR) << "The seed2 attr should be specified"; - return RET_ERROR; - } - attr->seed2 = attr_value.i(); - primitive->value.type = schema::PrimitiveType_RandomStandardNormal; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_seed2(attr_value.i()); + *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - return status; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input failed."; + return nullptr; + } + + return prim.release(); } TFNodeRegistrar g_tfRandomStandardNormalParser("RandomStandardNormal", new TFRandomStandardNormalParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_random_standard_normal_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_random_standard_normal_parser.h index 0e0990e392..c1fd43499d 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_random_standard_normal_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_random_standard_normal_parser.h @@ -28,8 +28,9 @@ class TFRandomStandardNormalParser : public TFNodeParser { TFRandomStandardNormalParser() = default; ~TFRandomStandardNormalParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_range_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_range_parser.cc index ed6d1d65a2..6cd3f73d9d 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_range_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_range_parser.cc @@ -19,80 +19,58 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/range.h" namespace mindspore { namespace lite { -STATUS TFRangeParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF RangeParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::RangeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFRangeParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Range>(); tensorflow::AttrValue attr_value; if (TensorFlowUtils::FindAttrValue(tf_op, "starts", &attr_value)) { - attr->start = static_cast<int32_t>(attr_value.i()); + prim->set_start(attr_value.i()); } else { auto start_node = tf_node_map.at(TensorFlowUtils::GetFlattenNodeName(tf_op.input(0))); if (TensorFlowUtils::FindAttrValue(*start_node, "value", &attr_value)) { MS_LOG(INFO) << "Found raggedrange start node value attr, means it has default value"; - attr->start = static_cast<int32_t>(attr_value.i()); + prim->set_start(attr_value.i()); } } if (TensorFlowUtils::FindAttrValue(tf_op, "limits", &attr_value)) { - attr->limit = static_cast<int32_t>(attr_value.i()); + prim->set_limit(attr_value.i()); } else { auto limit_node = tf_node_map.at(TensorFlowUtils::GetFlattenNodeName(tf_op.input(1))); if (TensorFlowUtils::FindAttrValue(*limit_node, "value", &attr_value)) { MS_LOG(INFO) << "Found raggedrange limit node value attr, means it has default value"; - attr->limit = static_cast<int32_t>(attr_value.i()); + prim->set_limit(attr_value.i()); } } if (TensorFlowUtils::FindAttrValue(tf_op, "deltas", &attr_value)) { - attr->delta = static_cast<int32_t>(attr_value.i()); + prim->set_delta(attr_value.i()); } else { auto delta_node = tf_node_map.at(TensorFlowUtils::GetFlattenNodeName(tf_op.input(2))); if (TensorFlowUtils::FindAttrValue(*delta_node, "value", &attr_value)) { MS_LOG(INFO) << "Found raggedrange delta node value attr, means it has default value"; } - attr->delta = static_cast<int32_t>(attr_value.i()); - } - - primitive->value.type = schema::PrimitiveType_Range; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + prim->set_delta(attr_value.i()); } *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; - } - status = AddOpInput(tf_op, 1, inputs); - if (status != RET_OK) { - return status; + for (int i = 0; i < 3; i++) { + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input " << i << " failed!"; + return nullptr; + } } - status = AddOpInput(tf_op, 2, inputs); - return status; + + return prim.release(); } + TFNodeRegistrar g_tfRangeParser("Range", new TFRangeParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_range_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_range_parser.h index bf62cd0271..decd7cbbf6 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_range_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_range_parser.h @@ -28,8 +28,9 @@ class TFRangeParser : public TFNodeParser { TFRangeParser() = default; ~TFRangeParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_rank_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_rank_parser.cc index e1d0500562..3c98975a9d 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_rank_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_rank_parser.cc @@ -18,41 +18,30 @@ #include <memory> #include <map> #include <vector> +#include "ops/rank.h" #include "tools/converter/parser/tf/tf_node_parser_registry.h" namespace mindspore { namespace lite { -STATUS TFRankParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { +ops::PrimitiveC *TFRankParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { MS_LOG(DEBUG) << "TF RankParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; + if (output_size == nullptr) { + MS_LOG(ERROR) << "output_size is nullptr"; + return nullptr; } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::RankT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - primitive->value.type = schema::PrimitiveType_Rank; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + auto prim = std::make_unique<ops::Rank>(); + if (prim == nullptr) { + MS_LOG(ERROR) << "New Primitive failed"; + return nullptr; } *output_size = 1; auto status = AddOpInput(tf_op, 0, inputs); if (status != RET_OK) { - return status; + return nullptr; } - return RET_OK; + return prim.release(); } TFNodeRegistrar g_tfRankParser("Rank", new TFRankParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_rank_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_rank_parser.h index b4297831a7..efea02bd8d 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_rank_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_rank_parser.h @@ -28,8 +28,9 @@ class TFRankParser : public TFNodeParser { TFRankParser() = default; ~TFRankParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_reduce_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_reduce_parser.cc index 1776868565..b0eb803f89 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_reduce_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_reduce_parser.cc @@ -19,90 +19,53 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/fusion/reduce_fusion.h" namespace mindspore { namespace lite { -STATUS TFReduceParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF ReduceParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::ReduceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFReduceParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::ReduceFusion>(); if (tf_op.op() == "Sum") { - attr->mode = schema::ReduceMode_ReduceSum; + prim->set_mode(mindspore::ReduceMode::Reduce_Sum); } else if (tf_op.op() == "Max") { - attr->mode = schema::ReduceMode_ReduceMax; + prim->set_mode(mindspore::ReduceMode::Reduce_Max); } else if (tf_op.op() == "Min") { - attr->mode = schema::ReduceMode_ReduceMin; + prim->set_mode(mindspore::ReduceMode::Reduce_Min); } else if (tf_op.op() == "Mean") { - attr->mode = schema::ReduceMode_ReduceMean; + prim->set_mode(mindspore::ReduceMode::Reduce_Mean); } else if (tf_op.op() == "Prod") { - attr->mode = schema::ReduceMode_ReduceProd; + prim->set_mode(mindspore::ReduceMode::Reduce_Prod); } else if (tf_op.op() == "All") { - attr->mode = schema::ReduceMode_ReduceAll; + prim->set_mode(mindspore::ReduceMode::Reduce_All); } else { MS_LOG(ERROR) << "unsupported reduce mode: " << tf_op.op(); - return RET_ERROR; + return nullptr; } + tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "keep_dims", &attr_value)) { MS_LOG(ERROR) << "The keep_dims attr should be specified"; - return RET_ERROR; + return nullptr; } + if (attr_value.value_case() != tensorflow::AttrValue::kB) { MS_LOG(ERROR) << "the keep_dims attr of reduce should be bool type"; - return RET_ERROR; - } - attr->keepDims = attr_value.b(); - - auto axis_node = GetConstInputNode(tf_node_map, tf_op.input(1)); - if (axis_node == nullptr) { - MS_LOG(ERROR) << "Find Reduce input axis failed"; - return RET_ERROR; - } - if (!TensorFlowUtils::FindAttrValue(*axis_node, "value", &attr_value)) { - MS_LOG(ERROR) << "The value attr should be specified"; - return RET_ERROR; - } - auto tensor_proto = attr_value.tensor(); - if (tensor_proto.int_val_size() > 0) { - for (int i = 0; i < tensor_proto.int_val_size(); ++i) { - attr->axes.push_back(tensor_proto.int_val(i)); - } - } else { - auto data_num = tensor_proto.tensor_content().size() / sizeof(int32_t); - auto data = reinterpret_cast<const int32_t *>(tensor_proto.tensor_content().data()); - for (size_t i = 0; i < data_num; ++i) { - attr->axes.push_back(data[i]); - } + return nullptr; } + prim->set_keep_dims(attr_value.b()); - primitive->value.type = schema::PrimitiveType_Reduce; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - return status; + return prim.release(); } + TFNodeRegistrar g_tfSumParser("Sum", new TFReduceParser()); TFNodeRegistrar g_tfMaxParser("Max", new TFReduceParser()); TFNodeRegistrar g_tfMinParser("Min", new TFReduceParser()); diff --git a/mindspore/lite/tools/converter/parser/tf/tf_reduce_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_reduce_parser.h index b1914f21f7..3c3411654b 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_reduce_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_reduce_parser.h @@ -28,8 +28,9 @@ class TFReduceParser : public TFNodeParser { TFReduceParser() = default; ~TFReduceParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_reshape_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_reshape_parser.cc index a32ff06fff..748a19090c 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_reshape_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_reshape_parser.cc @@ -19,48 +19,25 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/reshape.h" namespace mindspore { namespace lite { -STATUS TFReshapeParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF ReshapeParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::ReshapeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - attr->format = schema::Format_NHWC; - // attr->shape is omitted cause input[1] provide shape info - - primitive->value.type = schema::PrimitiveType_Reshape; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFReshapeParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Reshape>(); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - status = AddOpInput(tf_op, 1, inputs); - return status; + + return prim.release(); } + TFNodeRegistrar g_tfReshapeParser("Reshape", new TFReshapeParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_reshape_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_reshape_parser.h index d873c54363..4d99a77c8e 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_reshape_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_reshape_parser.h @@ -28,8 +28,9 @@ class TFReshapeParser : public TFNodeParser { TFReshapeParser() = default; ~TFReshapeParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_resize_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_resize_parser.cc index 1c8c365c88..ca26ea9024 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_resize_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_resize_parser.cc @@ -19,76 +19,55 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/resize.h" namespace mindspore { namespace lite { -STATUS TFResizeParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF ResizeParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFResizeParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Resize>(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::ResizeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } tensorflow::AttrValue attr_value; - attr->format = schema::Format_NHWC; + prim->set_format(mindspore::Format::NHWC); if (!TensorFlowUtils::FindAttrValue(tf_op, "align_corners", &attr_value)) { MS_LOG(ERROR) << "The align_corners attr should be specified"; - return RET_ERROR; + return nullptr; } if (attr_value.b()) { - attr->coordinateTransformMode = schema::CoordinateTransformMode_ALIGN_CORNERS; + prim->set_coordinate_transform_mode(mindspore::CoordinateTransformMode::ALIGN_CORNERS); } else { - attr->coordinateTransformMode = schema::CoordinateTransformMode_ASYMMETRIC; + prim->set_coordinate_transform_mode(mindspore::CoordinateTransformMode::ASYMMETRIC); } if (tf_op.op() == "ResizeBilinear") { - attr->method = schema::ResizeMethod_LINEAR; + prim->set_method(mindspore::ResizeMethod::LINEAR); } else if (tf_op.op() == "ResizeNearestNeighbor") { - attr->method = schema::ResizeMethod_NEAREST; + prim->set_method(mindspore::ResizeMethod::NEAREST); } else if (tf_op.op() == "ResizeBicubic") { - attr->method = schema::ResizeMethod_CUBIC; + prim->set_method(mindspore::ResizeMethod::CUBIC); } else { - attr->method = schema::ResizeMethod_UNKNOWN; + prim->set_method(mindspore::ResizeMethod::UNKNOWN); } auto size_node = tf_node_map.at(tf_op.input(1)); if (size_node == nullptr) { MS_LOG(ERROR) << "Find size input failed."; - return RET_ERROR; + return nullptr; } if (!TensorFlowUtils::FindAttrValue(tf_op, "value", &attr_value)) { MS_LOG(WARNING) << "The value attr should be specified"; } auto tensor_proto = attr_value.tensor(); auto size_ptr = reinterpret_cast<const int32_t *>(tensor_proto.tensor_content().data()); - attr->newHeight = size_ptr[0]; - attr->newWidth = size_ptr[1]; - - primitive->value.type = schema::PrimitiveType_Resize; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } + prim->set_new_height(size_ptr[0]); + prim->set_new_width(size_ptr[1]); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input failed."; + return nullptr; } - status = AddOpInput(tf_op, 1, inputs); - return status; + + return prim.release(); } TFNodeRegistrar g_tfResizeBilinearParser("ResizeBilinear", new TFResizeParser()); TFNodeRegistrar g_tfResizeNearestNeighborParser("ResizeNearestNeighbor", new TFResizeParser()); diff --git a/mindspore/lite/tools/converter/parser/tf/tf_resize_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_resize_parser.h index f753620d7e..359e00c34e 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_resize_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_resize_parser.h @@ -28,8 +28,9 @@ class TFResizeParser : public TFNodeParser { TFResizeParser() = default; ~TFResizeParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_reverse_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_reverse_parser.cc index ba41439a09..9a84e4b32c 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_reverse_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_reverse_parser.cc @@ -19,67 +19,48 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/reverse_v2.h" namespace mindspore { namespace lite { -STATUS TFReverseParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF ReverseParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::ReverseT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFReverseParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::ReverseV2>(); tensorflow::AttrValue attr_value; - auto axis = GetConstInputNode(tf_node_map, tf_op.input(1)); - if (axis == nullptr) { + auto value = GetConstInputNode(tf_node_map, tf_op.input(1)); + if (value == nullptr) { MS_LOG(ERROR) << "Find axis failed"; - return RET_ERROR; + return nullptr; } - if (!TensorFlowUtils::FindAttrValue(*axis, "value", &attr_value)) { + if (!TensorFlowUtils::FindAttrValue(*value, "value", &attr_value)) { MS_LOG(ERROR) << "The value attr should be specified"; - return RET_ERROR; + return nullptr; } auto tensor_proto = attr_value.tensor(); + + std::vector<int64_t> axis; if (tensor_proto.int_val_size() > 0) { for (int i = 0; i < tensor_proto.int_val_size(); ++i) { - attr->axis.push_back(tensor_proto.int_val(i)); + axis.push_back(tensor_proto.int_val(i)); } } else { auto data_num = tensor_proto.tensor_content().size() / sizeof(int32_t); auto data = reinterpret_cast<const int32_t *>(tensor_proto.tensor_content().data()); for (size_t i = 0; i < data_num; ++i) { - attr->axis.push_back(data[i]); + axis.push_back(data[i]); } } - - primitive->value.type = schema::PrimitiveType_Reverse; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } + prim->set_axis(axis); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { MS_LOG(ERROR) << "Add Op input failed."; - return status; + return nullptr; } - return status; + + return prim.release(); } TFNodeRegistrar g_tfReverseV2Parser("ReverseV2", new TFReverseParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_reverse_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_reverse_parser.h index 9b99271b2e..43dafd9b67 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_reverse_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_reverse_parser.h @@ -29,8 +29,9 @@ class TFReverseParser : public TFNodeParser { TFReverseParser() = default; ~TFReverseParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_reverse_sequence_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_reverse_sequence_parser.cc index e4f1ee1ea9..d4f357ee99 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_reverse_sequence_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_reverse_sequence_parser.cc @@ -19,56 +19,36 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/reverse_sequence.h" namespace mindspore { namespace lite { -STATUS TFReverseSequenceParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF ReverseSequenceParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::ReverseSequenceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFReverseSequenceParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::ReverseSequence>(); tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "batch_dim", &attr_value)) { MS_LOG(ERROR) << "The batch_dim attr should be specified"; - return RET_ERROR; + return nullptr; } - attr->batchAxis = attr_value.i(); + prim->set_batch_dim(attr_value.i()); if (!TensorFlowUtils::FindAttrValue(tf_op, "seq_dim", &attr_value)) { MS_LOG(ERROR) << "The seq_dim attr should be specified"; - return RET_ERROR; - } - attr->seqAxis = attr_value.i(); - - primitive->value.type = schema::PrimitiveType_ReverseSequence; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_seq_dim(attr_value.i()); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed!"; + return nullptr; } - return AddOpInput(tf_op, 1, inputs); + + return prim.release(); } + TFNodeRegistrar g_tfReverseSequenceParser("ReverseSequence", new TFReverseSequenceParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_reverse_sequence_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_reverse_sequence_parser.h index e7b6e13742..229e83b551 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_reverse_sequence_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_reverse_sequence_parser.h @@ -28,8 +28,9 @@ class TFReverseSequenceParser : public TFNodeParser { TFReverseSequenceParser() = default; ~TFReverseSequenceParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_round_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_round_parser.cc deleted file mode 100644 index 86a5a8368f..0000000000 --- a/mindspore/lite/tools/converter/parser/tf/tf_round_parser.cc +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "tools/converter/parser/tf/tf_round_parser.h" -#include <string> -#include <memory> -#include <map> -#include <vector> -#include "tools/converter/parser/tf/tf_node_parser_registry.h" - -namespace mindspore { -namespace lite { -STATUS TFRoundParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF RoundParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::RoundT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_Round; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } - - *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - return status; -} -TFNodeRegistrar g_tfRoundParser("Round", new TFRoundParser()); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_round_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_round_parser.h deleted file mode 100644 index 229181aa7e..0000000000 --- a/mindspore/lite/tools/converter/parser/tf/tf_round_parser.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_ROUND_PARSER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_ROUND_PARSER_H_ -#include <string> -#include <memory> -#include <map> -#include <vector> -#include "tools/converter/parser/tf/tf_node_parser.h" - -namespace mindspore { -namespace lite { -class TFRoundParser : public TFNodeParser { - public: - TFRoundParser() = default; - ~TFRoundParser() override = default; - - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; -}; -} // namespace lite -} // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_ROUND_PARSER_H_ diff --git a/mindspore/lite/tools/converter/parser/tf/tf_rsqrt_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_rsqrt_parser.cc deleted file mode 100644 index ef2251e2a4..0000000000 --- a/mindspore/lite/tools/converter/parser/tf/tf_rsqrt_parser.cc +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "tools/converter/parser/tf/tf_rsqrt_parser.h" -#include <string> -#include <memory> -#include <map> -#include <vector> -#include "tools/converter/parser/tf/tf_node_parser_registry.h" - -namespace mindspore { -namespace lite { -STATUS TFRsqrtParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF RsqrtParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::RsqrtT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_Rsqrt; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } - - *output_size = 1; - for (int i = 0; i < tf_op.input_size(); i++) { - inputs->emplace_back(tf_op.input(i)); - } - return RET_OK; -} -TFNodeRegistrar g_tfRsqrtParser("Rsqrt", new TFRsqrtParser()); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_rsqrt_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_rsqrt_parser.h deleted file mode 100644 index dd40da5ce0..0000000000 --- a/mindspore/lite/tools/converter/parser/tf/tf_rsqrt_parser.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_RSQRT_PARSER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_RSQRT_PARSER_H_ - -#include <string> -#include <memory> -#include <map> -#include <vector> -#include "tools/converter/parser/tf/tf_node_parser.h" - -namespace mindspore { -namespace lite { -class TFRsqrtParser : public TFNodeParser { - public: - TFRsqrtParser() = default; - ~TFRsqrtParser() override = default; - - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_RSQRT_PARSER_H_ diff --git a/mindspore/lite/tools/converter/parser/tf/tf_select_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_select_parser.cc index e5e73cec96..27e490d2fc 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_select_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_select_parser.cc @@ -19,42 +19,21 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/select.h" namespace mindspore { namespace lite { -STATUS TFSelectParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF SelectParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::SelectT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_Select; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFSelectParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Select>(); *output_size = 1; for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + + return prim.release(); } TFNodeRegistrar g_tfSelectParser("Select", new TFSelectParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_select_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_select_parser.h index 79e1fa8da5..a771378eae 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_select_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_select_parser.h @@ -29,8 +29,9 @@ class TFSelectParser : public TFNodeParser { TFSelectParser() = default; ~TFSelectParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_shape_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_shape_parser.cc index 9b53470872..d9ebc035a5 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_shape_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_shape_parser.cc @@ -19,41 +19,24 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/shape.h" namespace mindspore { namespace lite { -STATUS TFShapeParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF ShapeParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::ShapeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFShapeParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Shape>(); - primitive->value.type = schema::PrimitiveType_Shape; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - return status; + return prim.release(); } + TFNodeRegistrar g_tfShapeParser("Shape", new TFShapeParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_shape_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_shape_parser.h index f65a9c1467..d0b0799e7c 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_shape_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_shape_parser.h @@ -28,8 +28,9 @@ class TFShapeParser : public TFNodeParser { TFShapeParser() = default; ~TFShapeParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_size_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_size_parser.cc index 2bcade2fc0..81c9574589 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_size_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_size_parser.cc @@ -19,44 +19,22 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/size.h" namespace mindspore { namespace lite { -STATUS TFSizeParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF SizeParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::SizeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_Size; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFSizeParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Size>(); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { MS_LOG(ERROR) << "Add Op input failed."; - return status; + return nullptr; } - return status; + + return prim.release(); } TFNodeRegistrar g_tfSizeParser("Size", new TFSizeParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_size_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_size_parser.h index e7c8879a67..c31c1025a0 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_size_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_size_parser.h @@ -29,8 +29,9 @@ class TFSizeParser : public TFNodeParser { TFSizeParser() = default; ~TFSizeParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_slice_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_slice_parser.cc index 3f16e4f24f..9caab85248 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_slice_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_slice_parser.cc @@ -19,86 +19,58 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/fusion/slice_fusion.h" namespace mindspore { namespace lite { -STATUS TFSliceParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF SliceParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::SliceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFSliceParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::SliceFusion>(); // begin tensorflow::AttrValue attr_value; auto begin_node = GetConstInputNode(tf_node_map, tf_op.input(1)); if (begin_node == nullptr) { MS_LOG(ERROR) << "Find StridedSlice input begin failed"; - return RET_ERROR; + return nullptr; } if (!TensorFlowUtils::FindAttrValue(*begin_node, "value", &attr_value)) { MS_LOG(ERROR) << "The value attr should be specified"; - return RET_ERROR; + return nullptr; } auto tensor_proto = attr_value.tensor(); + + std::vector<int32_t> begin; if (tensor_proto.int_val_size() > 0) { for (int i = 0; i < tensor_proto.int_val_size(); ++i) { - attr->begin.push_back(tensor_proto.int_val(i)); + begin.push_back(tensor_proto.int_val(i)); } } else { auto data_num = tensor_proto.tensor_content().size() / sizeof(int32_t); auto data = reinterpret_cast<const int32_t *>(tensor_proto.tensor_content().data()); for (size_t i = 0; i < data_num; ++i) { - attr->begin.push_back(data[i]); + begin.push_back(data[i]); } } // axes - std::vector<int> axes; + std::vector<int64_t> axes; axes.clear(); - for (size_t i = 0; i < attr->begin.size(); ++i) { + for (size_t i = 0; i < begin.size(); ++i) { axes.push_back(i); } - attr->axes = axes; - - primitive->value.type = schema::PrimitiveType_Slice; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } + prim->set_axes(axes); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; - } - status = AddOpInput(tf_op, 1, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; - } - status = AddOpInput(tf_op, 2, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; + for (int i = 0; i < 3; i++) { + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input " << i << " failed."; + return nullptr; + } } - return status; + + return prim.release(); } TFNodeRegistrar g_tfSliceParser("Slice", new TFSliceParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_slice_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_slice_parser.h index 88390825b0..e2d170dae3 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_slice_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_slice_parser.h @@ -29,8 +29,9 @@ class TFSliceParser : public TFNodeParser { TFSliceParser() = default; ~TFSliceParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_softmax_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_softmax_parser.cc index ace962b1a0..67d9316a59 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_softmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_softmax_parser.cc @@ -19,45 +19,29 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/softmax.h" namespace mindspore { namespace lite { -STATUS TFSoftmaxParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF SoftmaxParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::SoftMaxT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFSoftmaxParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Softmax>(); tensorflow::AttrValue attr_value; int axis = -1; if (TensorFlowUtils::FindAttrValue(tf_op, "axis", &attr_value)) { axis = static_cast<int32_t>(attr_value.i()); } - attr->axis = axis; - primitive->value.type = schema::PrimitiveType_SoftMax; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } + prim->set_axis({axis}); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - return status; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input failed."; + return nullptr; + } + + return prim.release(); } TFNodeRegistrar g_tfSoftmaxParser("Softmax", new TFSoftmaxParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_softmax_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_softmax_parser.h index ec7d91aa25..8c46590eca 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_softmax_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_softmax_parser.h @@ -28,8 +28,9 @@ class TFSoftmaxParser : public TFNodeParser { TFSoftmaxParser() = default; ~TFSoftmaxParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_space_to_batch_nd_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_space_to_batch_nd_parser.cc index 89eeb0a067..05fc36aa83 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_space_to_batch_nd_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_space_to_batch_nd_parser.cc @@ -19,45 +19,23 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/space_to_batch_nd.h" namespace mindspore { namespace lite { -STATUS TFSpaceToBatchNDParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(WARNING) << "TF SpaceToBatchNDParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::SpaceToBatchNDT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_SpaceToBatchND; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFSpaceToBatchNDParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::SpaceToBatchND>(); *output_size = 1; for (int i = 0; i < tf_op.input_size(); ++i) { - auto status = AddOpInput(tf_op, i, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input " << i << " failed"; + return nullptr; } } - return RET_OK; + return prim.release(); } TFNodeRegistrar g_tfSpaceToBatchNDParser("SpaceToBatchND", new TFSpaceToBatchNDParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_space_to_batch_nd_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_space_to_batch_nd_parser.h index 339c76034a..d00bb00ea9 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_space_to_batch_nd_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_space_to_batch_nd_parser.h @@ -28,8 +28,9 @@ class TFSpaceToBatchNDParser : public TFNodeParser { TFSpaceToBatchNDParser() = default; ~TFSpaceToBatchNDParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_split_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_split_parser.cc index 5dd8733d96..95b6fc4235 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_split_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_split_parser.cc @@ -14,39 +14,29 @@ * limitations under the License. */ #include "tools/converter/parser/tf/tf_split_parser.h" +#include <functional> #include <string> #include <memory> #include <map> #include <vector> +#include <algorithm> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/split.h" namespace mindspore { namespace lite { -STATUS TFSplitParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF SplitParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::SplitT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFSplitParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Split>(); tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "num_split", &attr_value)) { MS_LOG(ERROR) << "The attribute num_split should be specified"; - return RET_PARAM_INVALID; + return nullptr; } - attr->numberSplit = (int32_t)(attr_value.i()); + auto number_split = attr_value.i(); + prim->set_output_num(number_split); int split_dim_index; int input_index; @@ -61,48 +51,51 @@ STATUS TFSplitParser::Parse(const tensorflow::NodeDef &tf_op, auto split_dim_node = GetConstInputNode(tf_node_map, tf_op.input(split_dim_index)); if (split_dim_node == nullptr) { MS_LOG(ERROR) << "Find Split input split_dim node failed"; - return RET_ERROR; + return nullptr; } if (!TensorFlowUtils::FindAttrValue(*split_dim_node, "value", &attr_value)) { MS_LOG(ERROR) << "The attribute splitDim should be specified"; - return RET_PARAM_INVALID; + return nullptr; } - auto split_dim_tensor = attr_value.tensor(); - attr->splitDim = split_dim_tensor.int_val(0); - *output_size = attr->numberSplit; + auto splitDim = attr_value.tensor().int_val(0); + prim->set_axis(splitDim); if (tf_op.op() == "SplitV") { auto size_splits_node = GetConstInputNode(tf_node_map, tf_op.input(1)); if (size_splits_node == nullptr) { MS_LOG(ERROR) << "Find Split input size_splits failed"; - return RET_ERROR; + return nullptr; } if (!TensorFlowUtils::FindAttrValue(*size_splits_node, "value", &attr_value)) { MS_LOG(ERROR) << "The attribute size splits should be specified"; - return RET_PARAM_INVALID; + return nullptr; } auto size_splits_tensor = attr_value.tensor(); auto size = size_splits_tensor.tensor_content().size() / sizeof(int32_t); - attr->sizeSplits.resize(size); - auto ret = memcpy_s(attr->sizeSplits.data(), size * sizeof(int32_t), size_splits_tensor.tensor_content().data(), + + std::vector<int32_t> size_splits_int32; + size_splits_int32.resize(size); + auto ret = memcpy_s(size_splits_int32.data(), size * sizeof(int32_t), size_splits_tensor.tensor_content().data(), size * sizeof(int32_t)); if (ret != EOK) { MS_LOG(ERROR) << "memcpy_s failed"; - return RET_ERROR; + return nullptr; } + std::vector<int64_t> size_splits; + std::transform(size_splits_int32.begin(), size_splits_int32.end(), std::back_inserter(size_splits), + [](int32_t val) { return static_cast<int64_t>(val); }); + prim->set_size_splits(size_splits); } - primitive->value.type = schema::PrimitiveType_Split; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + *output_size = number_split; + if (AddOpInput(tf_op, input_index, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - auto status = AddOpInput(tf_op, input_index, inputs); - return status; + return prim.release(); } + TFNodeRegistrar g_tfSplitParser("Split", new TFSplitParser()); TFNodeRegistrar g_tfSplitVParser("SplitV", new TFSplitParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_split_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_split_parser.h index 3ecefb9bd9..9f33008021 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_split_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_split_parser.h @@ -28,8 +28,9 @@ class TFSplitParser : public TFNodeParser { TFSplitParser() = default; ~TFSplitParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_squared_difference_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_squared_difference_parser.cc deleted file mode 100644 index bbda1360a8..0000000000 --- a/mindspore/lite/tools/converter/parser/tf/tf_squared_difference_parser.cc +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "tools/converter/parser/tf/tf_squared_difference_parser.h" -#include <string> -#include <memory> -#include <map> -#include <vector> -#include "tools/converter/parser/tf/tf_node_parser_registry.h" - -namespace mindspore { -namespace lite { -STATUS TFSquaredDifferenceParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF SquaredDifferenceParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::SquaredDifferenceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_SquaredDifference; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } - *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return RET_ERROR; - } - status = AddOpInput(tf_op, 1, inputs); - return status; -} -TFNodeRegistrar g_tfSquaredDifferenceParser("SquaredDifference", new TFSquaredDifferenceParser()); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_squared_difference_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_squared_difference_parser.h deleted file mode 100644 index 2b557bf615..0000000000 --- a/mindspore/lite/tools/converter/parser/tf/tf_squared_difference_parser.h +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright 2021 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_SQUARED_DIFFERENCE_PARSER_H_ -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_SQUARED_DIFFERENCE_PARSER_H_ -#include <string> -#include <memory> -#include <map> -#include <vector> -#include "tools/converter/parser/tf/tf_node_parser.h" - -namespace mindspore { -namespace lite { -class TFSquaredDifferenceParser : public TFNodeParser { - public: - TFSquaredDifferenceParser() = default; - ~TFSquaredDifferenceParser() override = default; - - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_SQUARED_DIFFERENCE_PARSER_H_ diff --git a/mindspore/lite/tools/converter/parser/tf/tf_squeeze_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_squeeze_parser.cc index 026b8ec520..b812256e13 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_squeeze_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_squeeze_parser.cc @@ -19,51 +19,37 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/squeeze.h" namespace mindspore { namespace lite { -STATUS TFSqueezeParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF SqueezeParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::SqueezeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFSqueezeParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Squeeze>(); + std::vector<int64_t> axis; tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "squeeze_dims", &attr_value)) { MS_LOG(ERROR) << "Find Squeeze input squeeze_dims attr failed"; - return RET_ERROR; + return nullptr; } auto dims = attr_value.list(); for (int i = 0; i < dims.i_size(); ++i) { - attr->axis.push_back(dims.i(i)); + axis.push_back(dims.i(i)); } + prim->set_axis(axis); - primitive->value.type = schema::PrimitiveType_Squeeze; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - return status; + return prim.release(); } + TFNodeRegistrar g_tfSqueezeParser("Squeeze", new TFSqueezeParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_squeeze_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_squeeze_parser.h index 95a765df29..f5998dcfc5 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_squeeze_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_squeeze_parser.h @@ -28,8 +28,9 @@ class TFSqueezeParser : public TFNodeParser { TFSqueezeParser() = default; ~TFSqueezeParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_stride_slice_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_stride_slice_parser.cc index f7959a98b7..7af0a27f8e 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_stride_slice_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_stride_slice_parser.cc @@ -19,78 +19,58 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/strided_slice.h" namespace mindspore { namespace lite { -STATUS TFStrideSliceParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF StrideSliceParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::StridedSliceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFStrideSliceParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::StridedSlice>(); tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "begin_mask", &attr_value)) { MS_LOG(ERROR) << "The begin_mask attr should be specified"; - return RET_ERROR; + return nullptr; } - attr->beginMask = attr_value.i(); + prim->set_begin_mask(attr_value.i()); if (!TensorFlowUtils::FindAttrValue(tf_op, "end_mask", &attr_value)) { MS_LOG(ERROR) << "The end_mask attr should be specified"; - return RET_ERROR; + return nullptr; } - attr->endMask = attr_value.i(); + prim->set_end_mask(attr_value.i()); if (!TensorFlowUtils::FindAttrValue(tf_op, "ellipsis_mask", &attr_value)) { MS_LOG(ERROR) << "The ellipsis_mask attr should be specified"; - return RET_ERROR; + return nullptr; } - attr->ellipsisMask = attr_value.i(); + prim->set_ellipsis_mask(attr_value.i()); if (!TensorFlowUtils::FindAttrValue(tf_op, "new_axis_mask", &attr_value)) { MS_LOG(ERROR) << "The new_axis_mask attr should be specified"; - return RET_ERROR; + return nullptr; } - attr->newAxisMask = attr_value.i(); + prim->set_new_axis_mask(attr_value.i()); if (!TensorFlowUtils::FindAttrValue(tf_op, "shrink_axis_mask", &attr_value)) { MS_LOG(ERROR) << "The shrink_axis_mask attr should be specified"; - return RET_ERROR; - } - attr->shrinkAxisMask = attr_value.i(); - primitive->value.type = schema::PrimitiveType_StridedSlice; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_shrink_axis_mask(attr_value.i()); *output_size = 1; - STATUS status = RET_OK; for (int i = 0; i < tf_op.input_size(); i++) { - status = AddOpInput(tf_op, i, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "Add Op input " << i << " failed."; + return nullptr; } } - return status; + + return prim.release(); } + TFNodeRegistrar g_tfStrideSliceParser("StridedSlice", new TFStrideSliceParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_stride_slice_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_stride_slice_parser.h index 2cbc75ba7d..03fdaad661 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_stride_slice_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_stride_slice_parser.h @@ -28,8 +28,9 @@ class TFStrideSliceParser : public TFNodeParser { TFStrideSliceParser() = default; ~TFStrideSliceParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_switch_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_switch_parser.cc index 0c87cf5fdf..6e3d5e8027 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_switch_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_switch_parser.cc @@ -19,43 +19,21 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/switch.h" namespace mindspore { namespace lite { -STATUS TFSwitchParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF SwitchParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::SwitchT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_Switch; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFSwitchParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Switch>(); *output_size = tf_op.input_size(); for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + return prim.release(); } TFNodeRegistrar g_tfSwitchParser("Switch", new TFSwitchParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_switch_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_switch_parser.h index 7874a0f7be..9f15e157cb 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_switch_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_switch_parser.h @@ -29,8 +29,9 @@ class TFSwitchParser : public TFNodeParser { TFSwitchParser() = default; ~TFSwitchParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_from_tensor_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_from_tensor_parser.cc index 07b5376073..c5c70c5a0a 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_from_tensor_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_from_tensor_parser.cc @@ -19,69 +19,49 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/tensor_list_from_tensor.h" namespace mindspore { namespace lite { -STATUS TFTensorListFromTensorParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, - int *output_size) { - MS_LOG(INFO) << "TF TensorListFromTensorParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::TensorListFromTensorT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFTensorListFromTensorParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::TensorListFromTensor>(); tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "element_dtype", &attr_value)) { MS_LOG(ERROR) << "The element_dtype attr should be specified"; - return RET_ERROR; + return nullptr; } auto type = TensorFlowUtils::GetTFDataType(attr_value.type()); if (type == kTypeUnknown) { MS_LOG(ERROR) << "tensor_list_from_tensor element dtype must be known type"; - return RET_ERROR; + return nullptr; } - attr->elementDType = type; + prim->set_element_dtype((int64_t)(type)); if (!TensorFlowUtils::FindAttrValue(tf_op, "shape_type", &attr_value)) { MS_LOG(ERROR) << "The shape_type attr should be specified"; - return RET_ERROR; + return nullptr; } type = TensorFlowUtils::GetTFDataType(attr_value.type()); if (type == kTypeUnknown) { MS_LOG(ERROR) << "tensor_list_from_tensor shape type must be known type"; - return RET_ERROR; - } - attr->shapeType = type; - - primitive->value.type = schema::PrimitiveType_TensorListFromTensor; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_shape_type((int64_t)(type)); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; + for (int i = 0; i < 2; ++i) { + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input " << i << " failed"; + return nullptr; + } } - status = AddOpInput(tf_op, 1, inputs); - return status; + + return prim.release(); } + TFNodeRegistrar g_tfTensorListFromTensorParser("TensorListFromTensor", new TFTensorListFromTensorParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_from_tensor_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_from_tensor_parser.h index 5cb732867a..49f950367f 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_from_tensor_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_from_tensor_parser.h @@ -28,8 +28,9 @@ class TFTensorListFromTensorParser : public TFNodeParser { TFTensorListFromTensorParser() = default; ~TFTensorListFromTensorParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_get_item_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_get_item_parser.cc index 6071939e85..5f9c5f277e 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_get_item_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_get_item_parser.cc @@ -19,58 +19,38 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/tensor_list_get_item.h" namespace mindspore { namespace lite { -STATUS TFTensorListGetItemParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF TensorListGetItemParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::TensorListGetItemT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFTensorListGetItemParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::TensorListGetItem>(); tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "element_dtype", &attr_value)) { MS_LOG(ERROR) << "The element_dtype attr should be specified"; - return RET_ERROR; + return nullptr; } auto type = TensorFlowUtils::GetTFDataType(attr_value.type()); if (type == kTypeUnknown) { MS_LOG(ERROR) << "tensor_list_get_item element_dtype must be known type"; - return RET_ERROR; - } - attr->elementDType = type; - - primitive->value.type = schema::PrimitiveType_TensorListGetItem; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_element_dtype((int64_t)(type)); *output_size = 1; for (int i = 0; i < 3; ++i) { - auto status = AddOpInput(tf_op, i, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input " << i << " failed"; + return nullptr; } } - return RET_OK; + + return prim.release(); } + TFNodeRegistrar g_tfTensorListGetItemParser("TensorListGetItem", new TFTensorListGetItemParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_get_item_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_get_item_parser.h index 37e5076947..f3b8224b93 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_get_item_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_get_item_parser.h @@ -29,8 +29,9 @@ class TFTensorListGetItemParser : public TFNodeParser { TFTensorListGetItemParser() = default; ~TFTensorListGetItemParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_reserve_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_reserve_parser.cc index 6b6139c54f..72cfeaefe4 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_reserve_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_reserve_parser.cc @@ -19,68 +19,49 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/tensor_list_reserve.h" namespace mindspore { namespace lite { -STATUS TFTensorListReserveParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF TensorListReserveParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::TensorListReserveT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFTensorListReserveParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::TensorListReserve>(); tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "element_dtype", &attr_value)) { MS_LOG(ERROR) << "The element_dtype attr should be specified"; - return RET_ERROR; + return nullptr; } auto type = TensorFlowUtils::GetTFDataType(attr_value.type()); if (type == kTypeUnknown) { MS_LOG(ERROR) << "tensor_list_reserve element dtype must be known type"; - return RET_ERROR; + return nullptr; } - attr->elementDType = type; + prim->set_element_dtype((int64_t)(type)); if (!TensorFlowUtils::FindAttrValue(tf_op, "shape_type", &attr_value)) { MS_LOG(ERROR) << "The shape_type attr should be specified"; - return RET_ERROR; + return nullptr; } type = TensorFlowUtils::GetTFDataType(attr_value.type()); if (type == kTypeUnknown) { MS_LOG(ERROR) << "tensor_list_reserve shape_type must be known type"; - return RET_ERROR; - } - attr->shapeType = type; - - primitive->value.type = schema::PrimitiveType_TensorListReserve; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_shape_type((int64_t)(type)); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; + for (int i = 0; i < 2; ++i) { + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input " << i << " failed"; + return nullptr; + } } - status = AddOpInput(tf_op, 1, inputs); - return status; + + return prim.release(); } + TFNodeRegistrar g_tfTensorListReserveParser("TensorListReserve", new TFTensorListReserveParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_reserve_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_reserve_parser.h index a9c81ba830..4b2ce85433 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_reserve_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_reserve_parser.h @@ -28,8 +28,9 @@ class TFTensorListReserveParser : public TFNodeParser { TFTensorListReserveParser() = default; ~TFTensorListReserveParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_set_item_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_set_item_parser.cc index ac86daebf4..83f04a9f60 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_set_item_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_set_item_parser.cc @@ -19,58 +19,37 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/tensor_list_set_item.h" namespace mindspore { namespace lite { -STATUS TFTensorListSetItemParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF TensorListSetItemParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::TensorListSetItemT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFTensorListSetItemParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::TensorListSetItem>(); tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "element_dtype", &attr_value)) { MS_LOG(ERROR) << "The element_dtype attr should be specified"; - return RET_ERROR; + return nullptr; } auto type = TensorFlowUtils::GetTFDataType(attr_value.type()); if (type == kTypeUnknown) { MS_LOG(ERROR) << "tensor_list_set_item element dtype must be known type"; - return RET_ERROR; - } - attr->elementDType = type; - - primitive->value.type = schema::PrimitiveType_TensorListSetItem; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_element_dtype((int64_t)(type)); *output_size = 1; for (int i = 0; i < 3; ++i) { - auto status = AddOpInput(tf_op, i, inputs); - if (status != RET_OK) { - return status; + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input " << i << " failed"; + return nullptr; } } - return RET_OK; + return prim.release(); } + TFNodeRegistrar g_tfTensorListSetItemParser("TensorListSetItem", new TFTensorListSetItemParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_set_item_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_set_item_parser.h index b7c3f19049..5e7dde35c6 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_set_item_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_set_item_parser.h @@ -28,8 +28,9 @@ class TFTensorListSetItemParser : public TFNodeParser { TFTensorListSetItemParser() = default; ~TFTensorListSetItemParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_stack_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_stack_parser.cc index 18af91461c..9cbf2af1e7 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_stack_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_stack_parser.cc @@ -19,63 +19,44 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/tensor_list_stack.h" namespace mindspore { namespace lite { -STATUS TFTensorListStackParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF TensorListStackParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::TensorListStackT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFTensorListStackParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::TensorListStack>(); tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "element_dtype", &attr_value)) { MS_LOG(ERROR) << "The element_dtype attr should be specified"; - return RET_ERROR; + return nullptr; } auto type = TensorFlowUtils::GetTFDataType(attr_value.type()); if (type == kTypeUnknown) { MS_LOG(ERROR) << "tensor_list_stack element_dtype must be known type"; - return RET_ERROR; + return nullptr; } - attr->elementDType = type; + prim->set_element_dtype((int64_t)(type)); if (!TensorFlowUtils::FindAttrValue(tf_op, "num_elements", &attr_value)) { MS_LOG(ERROR) << "The element_dtype attr should be specified"; - return RET_ERROR; - } - attr->numElements = attr_value.i(); - - primitive->value.type = schema::PrimitiveType_TensorListStack; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_num_elements(attr_value.i()); *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; + for (int i = 0; i < 2; ++i) { + if (AddOpInput(tf_op, i, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; + } } - status = AddOpInput(tf_op, 1, inputs); - return status; + + return prim.release(); } + TFNodeRegistrar g_tfTensorListStackParser("TensorListStack", new TFTensorListStackParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_stack_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_stack_parser.h index f39777b447..c47a4bc1c2 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_stack_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_tensor_list_stack_parser.h @@ -28,8 +28,9 @@ class TFTensorListStackParser : public TFNodeParser { TFTensorListStackParser() = default; ~TFTensorListStackParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_tile_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_tile_parser.cc index 1b2ebb5183..bef8ce0bed 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_tile_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_tile_parser.cc @@ -19,63 +19,38 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/fusion/tile_fusion.h" namespace mindspore { namespace lite { -STATUS TFTileParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF TileParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::TileT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - - *output_size = 1; - auto status = AddOpInput(tf_op, 0, inputs); +ops::PrimitiveC *TFTileParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::TileFusion>(); - auto multiplies_node = GetConstInputNode(tf_node_map, tf_op.input(1)); tensorflow::AttrValue attr_value; - if (multiplies_node != nullptr && TensorFlowUtils::FindAttrValue(*multiplies_node, "value", &attr_value)) { - auto tensor_proto = attr_value.tensor(); - if (tensor_proto.int_val_size() > 0) { - for (int i = 0; i < tensor_proto.int_val_size(); ++i) { - attr->dims.push_back(i); - attr->multiples.push_back(tensor_proto.int_val(i)); - } - } else { - auto data_num = tensor_proto.tensor_content().size() / sizeof(int32_t); - auto data = reinterpret_cast<const int32_t *>(tensor_proto.tensor_content().data()); - for (size_t i = 0; i < data_num; ++i) { - attr->dims.push_back(i); - attr->multiples.push_back(data[i]); - } + std::vector<int64_t> dims; + const auto &tensor_proto = attr_value.tensor(); + if (tensor_proto.int_val_size() > 0) { + for (int i = 0; i < tensor_proto.int_val_size(); ++i) { + dims.push_back(i); } } else { - AddOpInput(tf_op, 1, inputs); + auto data_num = tensor_proto.tensor_content().size() / sizeof(int32_t); + for (size_t i = 0; i < data_num; ++i) { + dims.push_back(i); + } } + prim->set_dims(dims); - primitive->value.type = schema::PrimitiveType_Tile; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - - return status; + return prim.release(); } + TFNodeRegistrar g_tfTileParser("Tile", new TFTileParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_tile_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_tile_parser.h index fee9e31639..587face512 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_tile_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_tile_parser.h @@ -28,8 +28,9 @@ class TFTileParser : public TFNodeParser { TFTileParser() = default; ~TFTileParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_topk_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_topk_parser.cc index 5fd007583c..22f7e415e6 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_topk_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_topk_parser.cc @@ -19,57 +19,29 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/fusion/topk_fusion.h" namespace mindspore { namespace lite { -STATUS TFTopKParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF TopKParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TFTopKParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::TopKFusion>(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::TopKT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - - // sorted tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "sorted", &attr_value)) { MS_LOG(ERROR) << "The begin_mask attr should be specified"; - return RET_ERROR; - } - attr->sorted = attr_value.i(); - - primitive->value.type = schema::PrimitiveType_TopK; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_sorted(attr_value.b()); *output_size = 2; - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - MS_LOG(ERROR) << "Add Op input failed."; - return status; - } - status = AddOpInput(tf_op, 1, inputs); - if (status != RET_OK) { + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { MS_LOG(ERROR) << "Add Op input failed."; - return status; + return nullptr; } - return status; + + return prim.release(); } TFNodeRegistrar g_tfTopKV2Parser("TopKV2", new TFTopKParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_topk_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_topk_parser.h index addb43536d..3fafb4aebd 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_topk_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_topk_parser.h @@ -29,8 +29,9 @@ class TFTopKParser : public TFNodeParser { TFTopKParser() = default; ~TFTopKParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_transpose_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_transpose_parser.cc index 9aba20133d..7aee38e567 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_transpose_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_transpose_parser.cc @@ -19,72 +19,24 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/transpose.h" namespace mindspore { namespace lite { -STATUS TFTransposeParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF TransposeParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::TransposeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } - attr->conjugate = false; - - auto status = AddOpInput(tf_op, 0, inputs); - if (status != RET_OK) { - return status; - } +ops::PrimitiveC *TFTransposeParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Transpose>(); - auto perm_node = GetConstInputNode(tf_node_map, tf_op.input(1)); - if (perm_node == nullptr) { - status = AddOpInput(tf_op, 1, inputs); - if (status != RET_OK) { - return status; - } - } else { - tensorflow::AttrValue attr_value; - if (!TensorFlowUtils::FindAttrValue(*perm_node, "value", &attr_value)) { - MS_LOG(ERROR) << "The value attr should be specified"; - return RET_ERROR; - } - auto tensor_proto = attr_value.tensor(); - if (tensor_proto.int_val_size() > 0) { - for (int i = 0; i < tensor_proto.int_val_size(); ++i) { - attr->perm.push_back(tensor_proto.int_val(i)); - } - } else { - auto data_num = tensor_proto.tensor_content().size() / sizeof(int32_t); - auto data = reinterpret_cast<const int32_t *>(tensor_proto.tensor_content().data()); - for (size_t i = 0; i < data_num; ++i) { - attr->perm.push_back(data[i]); - } - } - } - - primitive->value.type = schema::PrimitiveType_Transpose; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + *output_size = 1; + if (AddOpInput(tf_op, 0, inputs) != RET_OK || AddOpInput(tf_op, 1, inputs) != RET_OK) { + MS_LOG(ERROR) << "add op input failed"; + return nullptr; } - *output_size = 1; - return status; + return prim.release(); } + TFNodeRegistrar g_tfTransposeParser("Transpose", new TFTransposeParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_transpose_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_transpose_parser.h index 1dd30d0532..cfa6d78fb2 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_transpose_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_transpose_parser.h @@ -28,8 +28,9 @@ class TFTransposeParser : public TFNodeParser { TFTransposeParser() = default; ~TFTransposeParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_uniform_real_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_uniform_real_parser.cc index 0bffaade6b..c5093b6347 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_uniform_real_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_uniform_real_parser.cc @@ -19,49 +19,37 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/uniform_real.h" namespace mindspore { namespace lite { -STATUS TFUniformRealParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { +ops::PrimitiveC *TFUniformRealParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { MS_LOG(DEBUG) << "TF UniformRealParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; + if (output_size == nullptr) { + MS_LOG(ERROR) << "output_size is nullptr"; + return nullptr; } - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "New PrimitiveT failed"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::UniformRealT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new attr failed"; - return RET_NULL_PTR; - } + auto prim = std::make_unique<ops::UniformReal>(); tensorflow::AttrValue attr_value; if (!TensorFlowUtils::FindAttrValue(tf_op, "seed", &attr_value)) { MS_LOG(ERROR) << "The seed attr should be specified"; - return RET_ERROR; + return nullptr; } - attr->seed = attr_value.i(); + prim->set_seed(attr_value.i()); if (!TensorFlowUtils::FindAttrValue(tf_op, "seed2", &attr_value)) { MS_LOG(ERROR) << "The seed2 attr should be specified"; - return RET_ERROR; - } - attr->seed2 = attr_value.i(); - primitive->value.type = schema::PrimitiveType_UniformReal; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; + return nullptr; } + prim->set_seed2(attr_value.i()); *output_size = 1; auto status = AddOpInput(tf_op, 0, inputs); - return status; + if (status != RET_OK) { + return nullptr; + } + return prim.release(); } TFNodeRegistrar g_tfRandomUniformParser("RandomUniform", new TFUniformRealParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_uniform_real_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_uniform_real_parser.h index 7cbee4b80c..77177a5389 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_uniform_real_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_uniform_real_parser.h @@ -28,8 +28,9 @@ class TFUniformRealParser : public TFNodeParser { TFUniformRealParser() = default; ~TFUniformRealParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_util.cc b/mindspore/lite/tools/converter/parser/tf/tf_util.cc index ebedec45c0..b99a79d3da 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_util.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_util.cc @@ -69,22 +69,9 @@ TypeId TensorFlowUtils::ParseAttrDataType(const tensorflow::NodeDef &node_def, c return GetTFDataType(attr_value.type()); } -schema::Format TensorFlowUtils::ParseNodeFormat(const tensorflow::NodeDef &node_def) { - tensorflow::AttrValue attr_value; - if (!FindAttrValue(node_def, "data_format", &attr_value)) { - MS_LOG(ERROR) << "Find attr data_format failed"; - return schema::Format_NUM_OF_FORMAT; - } - if (attr_value.s() == "NHWC") { - return schema::Format_NHWC; - } else if (attr_value.s() == "NCHW") { - return schema::Format_NCHW; - } - return schema::Format_NUM_OF_FORMAT; -} - bool TensorFlowUtils::DecodeInt64(std::string_view *str_view, uint64_t *value) { if (str_view == nullptr || value == nullptr) { + *value = 0; MS_LOG(ERROR) << "str_view or value is nullptr"; return false; } @@ -121,7 +108,7 @@ std::string TensorFlowUtils::GetFlattenNodeName(const std::string &input_name) { std::sregex_token_iterator()); std::string ret = input_name; if (input_splits.size() == 3) { - if (input_splits[2].compare("0") == 0) { + if (input_splits[2] == "0") { ret = input_splits[0]; } else { ret = input_splits[0] + ":" + input_splits[2]; // multi output node @@ -140,5 +127,17 @@ std::string TensorFlowUtils::GetNodeName(const std::string &input_name) { } return input_name; } + +mindspore::Format TensorFlowUtils::ParseNodeFormat(const tensorflow::NodeDef &node_def) { + tensorflow::AttrValue attr_value; + if (!FindAttrValue(node_def, "data_format", &attr_value)) { + MS_LOG(ERROR) << "Find attr data_format failed"; + return mindspore::Format::NCHW; + } + if (attr_value.s() == "NHWC") { + return mindspore::Format::NHWC; + } + return mindspore::Format::NCHW; +} } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_util.h b/mindspore/lite/tools/converter/parser/tf/tf_util.h index d93cdebacb..1a30ee78b5 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_util.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_util.h @@ -23,6 +23,7 @@ #include "ir/dtype/type_id.h" #include "include/errorcode.h" #include "schema/inner/model_generated.h" +#include "mindspore/core/utils/check_convert_utils.h" namespace mindspore { namespace lite { @@ -32,10 +33,10 @@ class TensorFlowUtils { static bool FindAttrValue(const tensorflow::NodeDef &node_def, const std::string &attr_name, tensorflow::AttrValue *attr_value); static TypeId ParseAttrDataType(const tensorflow::NodeDef &node_def, const std::string &attr_name); - static schema::Format ParseNodeFormat(const tensorflow::NodeDef &node_def); static bool DecodeInt64(std::string_view *str_view, uint64_t *value); static std::string GetFlattenNodeName(const std::string &input_name); static std::string GetNodeName(const std::string &input_name); + static mindspore::Format ParseNodeFormat(const tensorflow::NodeDef &node_def); }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_where_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_where_parser.cc index 78471d48b9..948499228a 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_where_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_where_parser.cc @@ -19,42 +19,21 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/where.h" namespace mindspore { namespace lite { -STATUS TFWhereParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF WhereParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::WhereT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_Where; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFWhereParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::Where>(); *output_size = tf_op.input_size(); for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + + return prim.release(); } TFNodeRegistrar g_tfWhereParser("Where", new TFWhereParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_where_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_where_parser.h index 6212e875eb..e669af5066 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_where_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_where_parser.h @@ -29,8 +29,9 @@ class TFWhereParser : public TFNodeParser { TFWhereParser() = default; ~TFWhereParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_while_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_while_parser.cc index f4c8869bb5..32b92f6d82 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_while_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_while_parser.cc @@ -19,43 +19,23 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/while.h" namespace mindspore { namespace lite { -STATUS TFWhileParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, PrimitiveC **primitiveC, - std::vector<std::string> *inputs, int *output_size) { - MS_LOG(INFO) << "TF WhileParser"; - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::WhileT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_While; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFWhileParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::While>(); *output_size = tf_op.input_size(); for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + + return prim.release(); } + TFNodeRegistrar g_tfStatelessWhileParser("StatelessWhile", new TFWhileParser()); TFNodeRegistrar g_tfWhileParser("While", new TFWhileParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_while_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_while_parser.h index 287d5cb43b..7de0c1880d 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_while_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_while_parser.h @@ -29,8 +29,9 @@ class TFWhileParser : public TFNodeParser { TFWhileParser() = default; ~TFWhileParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tf/tf_zeros_like_parser.cc b/mindspore/lite/tools/converter/parser/tf/tf_zeros_like_parser.cc index b72a6ecc92..9a1a2f5b8d 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_zeros_like_parser.cc +++ b/mindspore/lite/tools/converter/parser/tf/tf_zeros_like_parser.cc @@ -19,41 +19,21 @@ #include <map> #include <vector> #include "tools/converter/parser/tf/tf_node_parser_registry.h" +#include "ops/zeros_like.h" namespace mindspore { namespace lite { -STATUS TFZerosLikeParser::Parse(const tensorflow::NodeDef &tf_op, - const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) { - if (primitiveC == nullptr || output_size == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_NULL_PTR; - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is nullptr"; - return RET_NULL_PTR; - } - auto attr = std::make_unique<schema::ZerosLikeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - - primitive->value.type = schema::PrimitiveType_ZerosLike; - primitive->value.value = attr.release(); - *primitiveC = PrimitiveC::Create(primitive.release()); - if (*primitiveC == nullptr) { - MS_LOG(ERROR) << "primitiveC is nullptr"; - return RET_ERROR; - } +ops::PrimitiveC *TFZerosLikeParser::Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) { + auto prim = std::make_unique<ops::ZerosLike>(); *output_size = tf_op.input_size(); for (int i = 0; i < tf_op.input_size(); i++) { inputs->emplace_back(tf_op.input(i)); } - return RET_OK; + + return prim.release(); } TFNodeRegistrar g_tfZerosLikeParser("ZerosLike", new TFZerosLikeParser()); } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tf/tf_zeros_like_parser.h b/mindspore/lite/tools/converter/parser/tf/tf_zeros_like_parser.h index 5be3bfd272..b09c85e2b7 100644 --- a/mindspore/lite/tools/converter/parser/tf/tf_zeros_like_parser.h +++ b/mindspore/lite/tools/converter/parser/tf/tf_zeros_like_parser.h @@ -28,8 +28,9 @@ class TFZerosLikeParser : public TFNodeParser { TFZerosLikeParser() = default; ~TFZerosLikeParser() override = default; - STATUS Parse(const tensorflow::NodeDef &tf_op, const std::map<string, const tensorflow::NodeDef *> &tf_node_map, - PrimitiveC **primitiveC, std::vector<std::string> *inputs, int *output_size) override; + ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op, + const std::map<string, const tensorflow::NodeDef *> &tf_node_map, + std::vector<std::string> *inputs, int *output_size) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc index ce9825dd43..830e189930 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.cc @@ -17,45 +17,89 @@ #include "tools/converter/parser/tflite/tflite_activation_parser.h" #include <memory> #include <vector> -#include <string> -#include "src/ops/activation.h" -#include "src/ops/primitive_c.h" #include "tools/converter/parser/tflite/tflite_util.h" +#include "ops/fusion/prelu_fusion.h" +#include "ops/fusion/activation.h" -namespace mindspore::lite { -lite::PrimitiveC *TfliteActivationParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - std::unique_ptr<schema::ActivationT> attr = std::make_unique<schema::ActivationT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteReluParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Activation>(); + + prim->set_activation_type(mindspore::ActivationType::RELU); + + return prim.release(); +} + +ops::PrimitiveC *TfliteRelu6Parser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Activation>(); - auto tflite_op_type = (tflite_model->operator_codes[tflite_op->opcode_index])->builtin_code; - auto ms_op_type = GetMSOpType(tflite_op_type); - if (kActivationTypeMap.find(ms_op_type) == kActivationTypeMap.end()) { - MS_LOG(ERROR) << ms_op_type << "is a not supported activation type"; + prim->set_activation_type(mindspore::ActivationType::RELU6); + + return prim.release(); +} + +ops::PrimitiveC *TfliteLeakyReluParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Activation>(); + + prim->set_activation_type(mindspore::ActivationType::LEAKY_RELU); + + MS_ASSERT(tflite_op != nullptr); + const auto &tflite_attr = tflite_op->builtin_options.AsLeakyReluOptions(); + if (tflite_attr == nullptr) { + MS_LOG(ERROR) << "get LeakyRelu attr failed"; return nullptr; } - attr->type = kActivationTypeMap.find(GetMSOpType(tflite_op_type))->second; - if (attr->type == schema::ActivationType_LEAKY_RELU) { - const auto &tflite_attr = tflite_op->builtin_options.AsLeakyReluOptions(); - if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op: " << GetMSOpType(tflite_op_type) << " attr failed"; - return nullptr; - } - attr->alpha = tflite_attr->alpha; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Activation; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->set_alpha(tflite_attr->alpha); + + return prim.release(); +} + +ops::PrimitiveC *TflitePReLUParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::PReLUFusion>(); + + prim->set_channel_shared(true); + + return prim.release(); +} + +ops::PrimitiveC *TfliteTanhParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Activation>(); + + prim->set_activation_type(mindspore::ActivationType::TANH); + + return prim.release(); +} + +ops::PrimitiveC *TfliteHardSwishParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Activation>(); + + prim->set_activation_type(mindspore::ActivationType::HSWISH); + + return prim.release(); +} + +ops::PrimitiveC *TfliteLogisticParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Activation>(); + + prim->set_activation_type(mindspore::ActivationType::SIGMOID); + + return prim.release(); } -TfliteNodeRegister g_TfliteReluParser(tflite::BuiltinOperator_RELU, new TfliteActivationParser()); -TfliteNodeRegister g_TfliteRelu6Parser(tflite::BuiltinOperator_RELU6, new TfliteActivationParser()); -TfliteNodeRegister g_TfliteTanhParser(tflite::BuiltinOperator_TANH, new TfliteActivationParser()); -TfliteNodeRegister g_TfliteSwishParser(tflite::BuiltinOperator_HARD_SWISH, new TfliteActivationParser()); -TfliteNodeRegister g_tfliteLogisticParser(tflite::BuiltinOperator_LOGISTIC, new TfliteActivationParser()); -TfliteNodeRegister g_TfliteLeakyReluParser(tflite::BuiltinOperator_LEAKY_RELU, new TfliteActivationParser()); -} // namespace mindspore::lite +TfliteNodeRegister g_TfliteReluParser(tflite::BuiltinOperator_RELU, new TfliteReluParser()); +TfliteNodeRegister g_TfliteRelu6Parser(tflite::BuiltinOperator_RELU6, new TfliteRelu6Parser()); +TfliteNodeRegister g_TflitePReLUParser(tflite::BuiltinOperator_PRELU, new TflitePReLUParser()); +TfliteNodeRegister g_TfliteLeakyReluParser(tflite::BuiltinOperator_LEAKY_RELU, new TfliteLeakyReluParser()); +TfliteNodeRegister g_TfliteTanhParser(tflite::BuiltinOperator_TANH, new TfliteTanhParser()); +TfliteNodeRegister g_TfliteSwishParser(tflite::BuiltinOperator_HARD_SWISH, new TfliteHardSwishParser()); +TfliteNodeRegister g_tfliteLogisticParser(tflite::BuiltinOperator_LOGISTIC, new TfliteLogisticParser()); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h index 15977ab45b..11cb90c364 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_activation_parser.h @@ -23,14 +23,64 @@ #include "tools/converter/parser/tflite/tflite_node_parser.h" #include "tools/converter/parser/tflite/tflite_node_parser_registry.h" -namespace mindspore::lite { -class TfliteActivationParser : public TfliteNodeParser { +namespace mindspore { +namespace lite { +class TfliteReluParser : public TfliteNodeParser { public: - TfliteActivationParser() : TfliteNodeParser("node_name") {} + TfliteReluParser() : TfliteNodeParser("Relu") {} - lite::PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; -} // namespace mindspore::lite + +class TfliteRelu6Parser : public TfliteNodeParser { + public: + TfliteRelu6Parser() : TfliteNodeParser("Relu6") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteLeakyReluParser : public TfliteNodeParser { + public: + TfliteLeakyReluParser() : TfliteNodeParser("LeakyRelu") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TflitePReLUParser : public TfliteNodeParser { + public: + TflitePReLUParser() : TfliteNodeParser("PReLU") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteTanhParser : public TfliteNodeParser { + public: + TfliteTanhParser() : TfliteNodeParser("Tanh") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteHardSwishParser : public TfliteNodeParser { + public: + TfliteHardSwishParser() : TfliteNodeParser("HardSwish") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteLogisticParser : public TfliteNodeParser { + public: + TfliteLogisticParser() : TfliteNodeParser("Logistic") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; +} // namespace lite +} // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_ACTIVATION_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc index 6a7c6c1d0c..72a5a1d4dc 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.cc @@ -18,22 +18,16 @@ #include "tools/converter/parser/tflite/tflite_addn_parser.h" #include <vector> #include <memory> -#include <map> -#include "src/ops/addn.h" +#include "ops/addn.h" -namespace mindspore::lite { -lite::PrimitiveC *TfliteAddNParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto attr = std::make_unique<schema::AddNT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_AddN; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteAddNParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::AddN>(); + return prim.release(); } TfliteNodeRegister g_tfliteAddNParser(tflite::BuiltinOperator_ADD_N, new TfliteAddNParser()); -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.h index 12a613247f..9babfad541 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_addn_parser.h @@ -29,8 +29,8 @@ class TfliteAddNParser : public TfliteNodeParser { public: TfliteAddNParser() : TfliteNodeParser("AddN") {} - lite::PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc index bd304270a9..f8d240c4b8 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.cc @@ -18,26 +18,31 @@ #include <memory> #include <vector> #include <map> +#include "ops/fusion/arg_max_fusion.h" -namespace mindspore::lite { -PrimitiveC *TfliteArgmaxParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteArgmaxParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::ArgMaxFusion>(); + + prim->set_keep_dims(false); + prim->set_out_max_value(false); + prim->set_top_k(1); + + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); const auto &tflite_subgraph = tflite_model->subgraphs.front(); - std::unique_ptr<schema::ArgMaxT> attr = std::make_unique<schema::ArgMaxT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - - attr->outMaxValue = false; - attr->topK = 1; - attr->keepDims = false; - attr->axisType = 1; - - // get axis attr - auto axis_idx = tflite_op->inputs[1]; - auto buffer_idx = tflite_subgraph->tensors[axis_idx]->buffer; - auto &buf_data = tflite_model->buffers[buffer_idx]; + const auto &axis_tensor = tflite_subgraph->tensors.at(tflite_op->inputs[1]); + if (axis_tensor == nullptr) { + MS_LOG(ERROR) << "axis_tensor is nullptr"; + return nullptr; + } + const auto &buf_data = tflite_model->buffers.at(axis_tensor->buffer); if (buf_data == nullptr) { MS_LOG(ERROR) << "the buf data is null"; return nullptr; @@ -47,12 +52,11 @@ PrimitiveC *TfliteArgmaxParser::ParseLitePrimitive(const std::unique_ptr<tflite: MS_LOG(ERROR) << "the data is null"; return nullptr; } - attr->axis = *(static_cast<int32_t *>(static_cast<void *>(data_ptr))); - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_ArgMax; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->set_axis(*(static_cast<int64_t *>(static_cast<void *>(data_ptr)))); + + return prim.release(); } TfliteNodeRegister g_tfliteArgmaxParser(tflite::BuiltinOperator_ARG_MAX, new TfliteArgmaxParser()); -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.h index 2b0cf6ded3..61c663bf4a 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_argmax_parser.h @@ -29,8 +29,8 @@ class TfliteArgmaxParser : public TfliteNodeParser { public: TfliteArgmaxParser() : TfliteNodeParser("Argmax") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.cc index 1e4deebdb5..55a7dcc066 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.cc @@ -18,26 +18,31 @@ #include <memory> #include <vector> #include <map> +#include "ops/fusion/arg_min_fusion.h" -namespace mindspore::lite { -PrimitiveC *TfliteArgminParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteArgminParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::ArgMinFusion>(); + + prim->set_keep_dims(false); + prim->set_out_max_value(false); + prim->set_top_k(1); + + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); const auto &tflite_subgraph = tflite_model->subgraphs.front(); - std::unique_ptr<schema::ArgMinT> attr = std::make_unique<schema::ArgMinT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - - attr->outMaxValue = false; - attr->topK = 1; - attr->keepDims = false; - attr->axisType = 1; - - // get axis attr - auto axis_idx = tflite_op->inputs[1]; - auto buffer_idx = tflite_subgraph->tensors[axis_idx]->buffer; - auto &buf_data = tflite_model->buffers[buffer_idx]; + const auto &axis_tensor = tflite_subgraph->tensors.at(tflite_op->inputs[1]); + if (axis_tensor == nullptr) { + MS_LOG(ERROR) << "axis_tensor is nullptr"; + return nullptr; + } + const auto &buf_data = tflite_model->buffers.at(axis_tensor->buffer); if (buf_data == nullptr) { MS_LOG(ERROR) << "the buf data is null"; return nullptr; @@ -47,12 +52,11 @@ PrimitiveC *TfliteArgminParser::ParseLitePrimitive(const std::unique_ptr<tflite: MS_LOG(ERROR) << "the data is null"; return nullptr; } - attr->axis = *(static_cast<int32_t *>(static_cast<void *>(data_ptr))); - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_ArgMin; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->set_axis(*(static_cast<int64_t *>(static_cast<void *>(data_ptr)))); + + return prim.release(); } TfliteNodeRegister g_tfliteArgminParser(tflite::BuiltinOperator_ARG_MIN, new TfliteArgminParser()); -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.h index 7d18c75123..87a69b2029 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_argmin_parser.h @@ -23,14 +23,16 @@ #include "tools/converter/parser/tflite/tflite_node_parser.h" #include "tools/converter/parser/tflite/tflite_node_parser_registry.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { class TfliteArgminParser : public TfliteNodeParser { public: TfliteArgminParser() : TfliteNodeParser("Argmin") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_ARGMIN_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc index db6309b00b..1ff24bb3b5 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.cc @@ -17,348 +17,278 @@ #include "tools/converter/parser/tflite/tflite_arithmetic_parser.h" #include <vector> #include <memory> -#include <string> - -namespace mindspore::lite { -PrimitiveC *TfliteDoubleInputOpParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto tflite_op_type = (tflite_model->operator_codes[tflite_op->opcode_index])->builtin_code; - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (tflite_op_type == tflite::BuiltinOperator_ADD) { - MS_LOG(DEBUG) << "parse TfliteAddParser"; - auto attr = std::make_unique<schema::AddT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - const auto &tfliteAttr = tflite_op->builtin_options.AsAddOptions(); - if (nullptr == tfliteAttr) { - MS_LOG(ERROR) << "get op: " << tflite_op_type << " attr failed"; - return nullptr; - } - attr->activationType = GetActivationFunctionType(tfliteAttr->fused_activation_function); - primitive->value.type = schema::PrimitiveType_Add; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_SUB) { - MS_LOG(DEBUG) << "parse TfliteSubParser"; - auto attr = std::make_unique<schema::SubT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - const auto &tfliteAttr = tflite_op->builtin_options.AsSubOptions(); - if (nullptr == tfliteAttr) { - MS_LOG(ERROR) << "get op: " << tflite_op_type << " attr failed"; - return nullptr; - } - attr->activationType = GetActivationFunctionType(tfliteAttr->fused_activation_function); - primitive->value.type = schema::PrimitiveType_Sub; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_MUL) { - MS_LOG(DEBUG) << "parse TfliteMulParser"; - auto attr = std::make_unique<schema::MulT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - const auto &tfliteAttr = tflite_op->builtin_options.AsMulOptions(); - if (nullptr == tfliteAttr) { - MS_LOG(ERROR) << "get op: " << tflite_op_type << " attr failed"; - return nullptr; - } - attr->activationType = GetActivationFunctionType(tfliteAttr->fused_activation_function); - primitive->value.type = schema::PrimitiveType_Mul; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_DIV) { - MS_LOG(DEBUG) << "parse TfliteDivParser"; - auto attr = std::make_unique<schema::DivT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - const auto &tfliteAttr = tflite_op->builtin_options.AsDivOptions(); - if (nullptr == tfliteAttr) { - MS_LOG(ERROR) << "get op: " << tflite_op_type << " attr failed"; - return nullptr; - } - attr->activationType = GetActivationFunctionType(tfliteAttr->fused_activation_function); - primitive->value.type = schema::PrimitiveType_Div; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_FLOOR_DIV) { - MS_LOG(DEBUG) << "parse TfliteFloorDivParser"; - std::unique_ptr<schema::FloorDivT> attr = std::make_unique<schema::FloorDivT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_FloorDiv; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_FLOOR_MOD) { - MS_LOG(DEBUG) << "parse TfliteFloorModParser"; - auto attr = std::make_unique<schema::FloorModT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_FloorMod; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_SQUARED_DIFFERENCE) { - MS_LOG(DEBUG) << "parse TfliteSquaredDifferenceParser"; - auto attr = std::make_unique<schema::SquaredDifferenceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_SquaredDifference; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_POW) { - MS_LOG(DEBUG) << "parse TflitePowParser"; - auto attr = std::make_unique<schema::PowerT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - attr->power = 1.0f; - attr->scale = 1.0f; - attr->shift = 0.0f; - primitive->value.type = schema::PrimitiveType_Power; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_MAXIMUM) { - MS_LOG(DEBUG) << "parse TfliteMaximumParser"; - auto attr = std::make_unique<schema::MaximumT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Maximum; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_MINIMUM) { - MS_LOG(DEBUG) << "parse TfliteMinimumParser"; - auto attr = std::make_unique<schema::MinimumT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Minimum; - primitive->value.value = attr.release(); - } else { - MS_LOG(ERROR) << "op hasn't been supported"; +#include "ops/abs.h" +#include "ops/cos.h" +#include "ops/fusion/add_fusion.h" +#include "ops/fusion/mul_fusion.h" +#include "ops/fusion/div_fusion.h" +#include "ops/fusion/sub_fusion.h" +#include "ops/fusion/exp_fusion.h" +#include "ops/fusion/pow_fusion.h" +#include "ops/squared_difference.h" +#include "ops/square.h" +#include "ops/sqrt.h" +#include "ops/rsqrt.h" +#include "ops/sin.h" +#include "ops/log.h" +#include "ops/round.h" +#include "ops/neg.h" +#include "ops/maximum.h" +#include "ops/minimum.h" +#include "ops/floor.h" +#include "ops/floor_div.h" +#include "ops/floor_mod.h" +#include "ops/ceil.h" +#include "ops/equal.h" +#include "ops/greater.h" +#include "ops/greater_equal.h" +#include "ops/less.h" +#include "ops/less_equal.h" +#include "ops/not_equal.h" + +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteAddParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::AddFusion>(); + + MS_ASSERT(tflite_op != nullptr); + const auto &tflite_attr = tflite_op->builtin_options.AsAddOptions(); + if (tflite_attr == nullptr) { + MS_LOG(ERROR) << "get AddFusion attr failed"; return nullptr; } - return PrimitiveC::Create(primitive.release()); + prim->set_activation_type(GetActivationFunctionType(tflite_attr->fused_activation_function)); + + return prim.release(); } -PrimitiveC *TfliteSingleInputOpParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto tflite_op_type = (tflite_model->operator_codes[tflite_op->opcode_index])->builtin_code; - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (tflite_op_type == tflite::BuiltinOperator_ABS) { - MS_LOG(DEBUG) << "parse TfliteAbsParser"; - auto attr = std::make_unique<schema::AbsT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Abs; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_EXP) { - MS_LOG(DEBUG) << "parse TfliteExpParser"; - auto attr = std::make_unique<schema::ExpT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - attr->base = -1; // -1 represent base = e - attr->scale = 1; - attr->shift = 0; - primitive->value.type = schema::PrimitiveType_Exp; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_SQRT) { - MS_LOG(DEBUG) << "parse TfliteSqrtParser"; - auto attr = std::make_unique<schema::SqrtT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Sqrt; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_RSQRT) { - MS_LOG(DEBUG) << "parse TfliteRsqrtParser"; - auto attr = std::make_unique<schema::RsqrtT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Rsqrt; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_SQUARE) { - MS_LOG(DEBUG) << "parse TfliteSquareParser"; - auto attr = std::make_unique<schema::SquareT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Square; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_SIN) { - MS_LOG(DEBUG) << "parse TfliteSinParser"; - auto attr = std::make_unique<schema::SinT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Sin; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_COS) { - MS_LOG(DEBUG) << "parse TfliteCosParser"; - std::unique_ptr<schema::CosT> attr = std::make_unique<schema::CosT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Cos; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_LOG) { - MS_LOG(DEBUG) << "parse TfliteLogParser"; - auto attr = std::make_unique<schema::LogT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Log; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_ROUND) { - MS_LOG(DEBUG) << "parse TfliteRoundParser"; - auto attr = std::make_unique<schema::RoundT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Round; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_CEIL) { - MS_LOG(DEBUG) << "parse TfliteCeilParser"; - auto attr = std::make_unique<schema::CeilT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Ceil; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_FLOOR) { - MS_LOG(DEBUG) << "parse TfliteFloorParser"; - auto attr = std::make_unique<schema::FloorT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Floor; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_NEG) { - MS_LOG(DEBUG) << "parse TfliteNegParser"; - auto attr = std::make_unique<schema::NegT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Neg; - primitive->value.value = attr.release(); +ops::PrimitiveC *TfliteMulParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::MulFusion>(); + + MS_ASSERT(tflite_op != nullptr); + const auto &tflite_attr = tflite_op->builtin_options.AsMulOptions(); + if (tflite_attr == nullptr) { + MS_LOG(ERROR) << "get MulFusion attr failed"; + return nullptr; } - return PrimitiveC::Create(primitive.release()); + prim->set_activation_type(GetActivationFunctionType(tflite_attr->fused_activation_function)); + + return prim.release(); } -PrimitiveC *TfliteCompareOpParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto tflite_op_type = (tflite_model->operator_codes[tflite_op->opcode_index])->builtin_code; - auto primitive = std::make_unique<schema::PrimitiveT>(); - - if (tflite_op_type == tflite::BuiltinOperator_EQUAL) { - MS_LOG(DEBUG) << "parse TfliteEqualParser"; - std::unique_ptr<schema::EqualT> attr = std::make_unique<schema::EqualT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Equal; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_NOT_EQUAL) { - MS_LOG(DEBUG) << "parse TfliteNotEqualParser"; - std::unique_ptr<schema::NotEqualT> attr = std::make_unique<schema::NotEqualT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_NotEqual; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_GREATER) { - MS_LOG(DEBUG) << "parse TfliteGreaterParser"; - std::unique_ptr<schema::GreaterT> attr = std::make_unique<schema::GreaterT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Greater; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_GREATER_EQUAL) { - MS_LOG(DEBUG) << "parse TfliteGreaterEqualParser"; - std::unique_ptr<schema::GreaterEqualT> attr = std::make_unique<schema::GreaterEqualT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_GreaterEqual; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_LESS) { - MS_LOG(DEBUG) << "parse TfliteLessParser"; - std::unique_ptr<schema::LessT> attr = std::make_unique<schema::LessT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Less; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_LESS_EQUAL) { - MS_LOG(DEBUG) << "parse TfliteLessEqualParser"; - std::unique_ptr<schema::LessEqualT> attr = std::make_unique<schema::LessEqualT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_LessEqual; - primitive->value.value = attr.release(); +ops::PrimitiveC *TfliteDivParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::DivFusion>(); + + MS_ASSERT(tflite_op != nullptr); + const auto &tflite_attr = tflite_op->builtin_options.AsDivOptions(); + if (tflite_attr == nullptr) { + MS_LOG(ERROR) << "get DivFusion attr failed"; + return nullptr; } - return PrimitiveC::Create(primitive.release()); + prim->set_activation_type(GetActivationFunctionType(tflite_attr->fused_activation_function)); + + return prim.release(); +} + +ops::PrimitiveC *TfliteSubParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::SubFusion>(); + + MS_ASSERT(tflite_op != nullptr); + const auto &tflite_attr = tflite_op->builtin_options.AsSubOptions(); + if (tflite_attr == nullptr) { + MS_LOG(ERROR) << "get SubFusion attr failed"; + return nullptr; + } + prim->set_activation_type(GetActivationFunctionType(tflite_attr->fused_activation_function)); + + return prim.release(); +} + +ops::PrimitiveC *TfliteFloorDivParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::FloorDiv>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteFloorModParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::FloorMod>(); + return prim.release(); +} + +ops::PrimitiveC *TflitePowParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::PowFusion>(); + + prim->set_scale(1.0); + prim->set_shift(0.0); + + return prim.release(); +} + +ops::PrimitiveC *TfliteSquaredDifferenceParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::SquaredDifference>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteMaximumParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Maximum>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteMinimumParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Minimum>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteAbsParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Abs>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteCosParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Cos>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteFloorParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Floor>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteExpParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::ExpFusion>(); + + prim->set_base(-1.0); + prim->set_scale(1.0); + prim->set_shift(0.0); + + return prim.release(); +} + +ops::PrimitiveC *TfliteCeilParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Ceil>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteLogParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Log>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteRoundParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Round>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteSqrtParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Sqrt>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteRsqrtParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Rsqrt>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteSquareParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Square>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteSinParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Sin>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteNegParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Neg>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteEqualParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Equal>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteNotEqualParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::NotEqual>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteGreaterParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Greater>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteGreaterEqualParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::GreaterEqual>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteLessParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Less>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteLessEqualParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::LessEqual>(); + return prim.release(); } -TfliteNodeRegister g_tfliteAddParser(tflite::BuiltinOperator_ADD, new TfliteDoubleInputOpParser()); -TfliteNodeRegister g_tfliteSubParser(tflite::BuiltinOperator_SUB, new TfliteDoubleInputOpParser()); -TfliteNodeRegister g_TfliteMulParser(tflite::BuiltinOperator_MUL, new TfliteDoubleInputOpParser()); -TfliteNodeRegister g_TfliteDivParser(tflite::BuiltinOperator_DIV, new TfliteDoubleInputOpParser()); -TfliteNodeRegister g_tfliteFloorDivParser(tflite::BuiltinOperator_FLOOR_DIV, new TfliteDoubleInputOpParser()); -TfliteNodeRegister g_tfliteFloorModParser(tflite::BuiltinOperator_FLOOR_MOD, new TfliteDoubleInputOpParser()); -TfliteNodeRegister g_TflitePowParser(tflite::BuiltinOperator_POW, new TfliteDoubleInputOpParser()); +TfliteNodeRegister g_tfliteAddParser(tflite::BuiltinOperator_ADD, new TfliteAddParser()); +TfliteNodeRegister g_tfliteSubParser(tflite::BuiltinOperator_SUB, new TfliteSubParser()); +TfliteNodeRegister g_TfliteMulParser(tflite::BuiltinOperator_MUL, new TfliteMulParser()); +TfliteNodeRegister g_TfliteDivParser(tflite::BuiltinOperator_DIV, new TfliteDivParser()); +TfliteNodeRegister g_tfliteFloorDivParser(tflite::BuiltinOperator_FLOOR_DIV, new TfliteFloorDivParser()); +TfliteNodeRegister g_tfliteFloorModParser(tflite::BuiltinOperator_FLOOR_MOD, new TfliteFloorModParser()); +TfliteNodeRegister g_TflitePowParser(tflite::BuiltinOperator_POW, new TflitePowParser()); TfliteNodeRegister g_tfliteSquaredDifferenceParser(tflite::BuiltinOperator_SQUARED_DIFFERENCE, - new TfliteDoubleInputOpParser()); -TfliteNodeRegister g_TfliteMaximumParser(tflite::BuiltinOperator_MAXIMUM, new TfliteDoubleInputOpParser()); -TfliteNodeRegister g_TfliteMinimumParser(tflite::BuiltinOperator_MINIMUM, new TfliteDoubleInputOpParser()); - -TfliteNodeRegister g_TfliteAbsParser(tflite::BuiltinOperator_ABS, new TfliteSingleInputOpParser()); -TfliteNodeRegister g_TfliteExpParser(tflite::BuiltinOperator_EXP, new TfliteSingleInputOpParser()); -TfliteNodeRegister g_TfliteSqrtParser(tflite::BuiltinOperator_SQRT, new TfliteSingleInputOpParser()); -TfliteNodeRegister g_tfliteRsqrtParser(tflite::BuiltinOperator_RSQRT, new TfliteSingleInputOpParser()); -TfliteNodeRegister g_TfliteSquareParser(tflite::BuiltinOperator_SQUARE, new TfliteSingleInputOpParser()); -TfliteNodeRegister g_TfliteSinParser(tflite::BuiltinOperator_SIN, new TfliteSingleInputOpParser()); -TfliteNodeRegister g_TfliteCosParser(tflite::BuiltinOperator_COS, new TfliteSingleInputOpParser()); -TfliteNodeRegister g_TfliteLogParser(tflite::BuiltinOperator_LOG, new TfliteSingleInputOpParser()); -TfliteNodeRegister g_tfliteRoundParser(tflite::BuiltinOperator_ROUND, new TfliteSingleInputOpParser()); -TfliteNodeRegister g_TfliteCeilParser(tflite::BuiltinOperator_CEIL, new TfliteSingleInputOpParser()); -TfliteNodeRegister g_tfliteFloorParser(tflite::BuiltinOperator_FLOOR, new TfliteSingleInputOpParser()); -TfliteNodeRegister g_tfliteNegParser(tflite::BuiltinOperator_NEG, new TfliteSingleInputOpParser()); - -TfliteNodeRegister g_tfliteEqualParser(tflite::BuiltinOperator_EQUAL, new TfliteCompareOpParser()); -TfliteNodeRegister g_tfliteNotEqualParser(tflite::BuiltinOperator_NOT_EQUAL, new TfliteCompareOpParser()); -TfliteNodeRegister g_tfliteGreaterEParser(tflite::BuiltinOperator_GREATER, new TfliteCompareOpParser()); -TfliteNodeRegister g_tfliteGreaterEqualParser(tflite::BuiltinOperator_GREATER_EQUAL, new TfliteCompareOpParser()); -TfliteNodeRegister g_tfliteLessParser(tflite::BuiltinOperator_LESS, new TfliteCompareOpParser()); -TfliteNodeRegister g_tfliteLessEqualParser(tflite::BuiltinOperator_LESS_EQUAL, new TfliteCompareOpParser()); -} // namespace mindspore::lite + new TfliteSquaredDifferenceParser()); +TfliteNodeRegister g_TfliteMaximumParser(tflite::BuiltinOperator_MAXIMUM, new TfliteMaximumParser()); +TfliteNodeRegister g_TfliteMinimumParser(tflite::BuiltinOperator_MINIMUM, new TfliteMinimumParser()); +TfliteNodeRegister g_TfliteAbsParser(tflite::BuiltinOperator_ABS, new TfliteAbsParser()); +TfliteNodeRegister g_TfliteExpParser(tflite::BuiltinOperator_EXP, new TfliteExpParser()); +TfliteNodeRegister g_TfliteSqrtParser(tflite::BuiltinOperator_SQRT, new TfliteSqrtParser()); +TfliteNodeRegister g_tfliteRsqrtParser(tflite::BuiltinOperator_RSQRT, new TfliteRsqrtParser()); +TfliteNodeRegister g_TfliteSquareParser(tflite::BuiltinOperator_SQUARE, new TfliteSquareParser()); +TfliteNodeRegister g_TfliteSinParser(tflite::BuiltinOperator_SIN, new TfliteSinParser()); +TfliteNodeRegister g_TfliteCosParser(tflite::BuiltinOperator_COS, new TfliteCosParser()); +TfliteNodeRegister g_TfliteLogParser(tflite::BuiltinOperator_LOG, new TfliteLogParser()); +TfliteNodeRegister g_tfliteRoundParser(tflite::BuiltinOperator_ROUND, new TfliteRoundParser()); +TfliteNodeRegister g_TfliteCeilParser(tflite::BuiltinOperator_CEIL, new TfliteCeilParser()); +TfliteNodeRegister g_tfliteFloorParser(tflite::BuiltinOperator_FLOOR, new TfliteFloorParser()); +TfliteNodeRegister g_tfliteNegParser(tflite::BuiltinOperator_NEG, new TfliteNegParser()); +TfliteNodeRegister g_tfliteEqualParser(tflite::BuiltinOperator_EQUAL, new TfliteEqualParser()); +TfliteNodeRegister g_tfliteNotEqualParser(tflite::BuiltinOperator_NOT_EQUAL, new TfliteNotEqualParser()); +TfliteNodeRegister g_tfliteGreaterEParser(tflite::BuiltinOperator_GREATER, new TfliteGreaterParser()); +TfliteNodeRegister g_tfliteGreaterEqualParser(tflite::BuiltinOperator_GREATER_EQUAL, new TfliteGreaterEqualParser()); +TfliteNodeRegister g_tfliteLessParser(tflite::BuiltinOperator_LESS, new TfliteLessParser()); +TfliteNodeRegister g_tfliteLessEqualParser(tflite::BuiltinOperator_LESS_EQUAL, new TfliteLessEqualParser()); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h index 4ca09e71c0..284adea031 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_arithmetic_parser.h @@ -25,28 +25,228 @@ namespace mindspore { namespace lite { -class TfliteDoubleInputOpParser : public TfliteNodeParser { +class TfliteAddParser : public TfliteNodeParser { public: - TfliteDoubleInputOpParser() : TfliteNodeParser("node_name") {} + TfliteAddParser() : TfliteNodeParser("Add") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; -class TfliteSingleInputOpParser : public TfliteNodeParser { +class TfliteSubParser : public TfliteNodeParser { public: - TfliteSingleInputOpParser() : TfliteNodeParser("node_name") {} + TfliteSubParser() : TfliteNodeParser("Sub") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; -class TfliteCompareOpParser : public TfliteNodeParser { +class TfliteMulParser : public TfliteNodeParser { public: - TfliteCompareOpParser() : TfliteNodeParser("node_name") {} + TfliteMulParser() : TfliteNodeParser("Mul") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteDivParser : public TfliteNodeParser { + public: + TfliteDivParser() : TfliteNodeParser("Div") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteFloorDivParser : public TfliteNodeParser { + public: + TfliteFloorDivParser() : TfliteNodeParser("FloorDiv") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteFloorModParser : public TfliteNodeParser { + public: + TfliteFloorModParser() : TfliteNodeParser("FloorMod") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TflitePowParser : public TfliteNodeParser { + public: + TflitePowParser() : TfliteNodeParser("PowFusion") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteSquaredDifferenceParser : public TfliteNodeParser { + public: + TfliteSquaredDifferenceParser() : TfliteNodeParser("SquaredDifference") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteMaximumParser : public TfliteNodeParser { + public: + TfliteMaximumParser() : TfliteNodeParser("Maximum") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteMinimumParser : public TfliteNodeParser { + public: + TfliteMinimumParser() : TfliteNodeParser("Minimum") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteAbsParser : public TfliteNodeParser { + public: + TfliteAbsParser() : TfliteNodeParser("Abs") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteExpParser : public TfliteNodeParser { + public: + TfliteExpParser() : TfliteNodeParser("Exp") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteSqrtParser : public TfliteNodeParser { + public: + TfliteSqrtParser() : TfliteNodeParser("Sqrt") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteRsqrtParser : public TfliteNodeParser { + public: + TfliteRsqrtParser() : TfliteNodeParser("Rsqrt") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteSquareParser : public TfliteNodeParser { + public: + TfliteSquareParser() : TfliteNodeParser("Square") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteSinParser : public TfliteNodeParser { + public: + TfliteSinParser() : TfliteNodeParser("Sin") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteCosParser : public TfliteNodeParser { + public: + TfliteCosParser() : TfliteNodeParser("Cos") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteLogParser : public TfliteNodeParser { + public: + TfliteLogParser() : TfliteNodeParser("Log") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteRoundParser : public TfliteNodeParser { + public: + TfliteRoundParser() : TfliteNodeParser("Round") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteCeilParser : public TfliteNodeParser { + public: + TfliteCeilParser() : TfliteNodeParser("Ceil") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteFloorParser : public TfliteNodeParser { + public: + TfliteFloorParser() : TfliteNodeParser("Floor") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteNegParser : public TfliteNodeParser { + public: + TfliteNegParser() : TfliteNodeParser("Neg") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteEqualParser : public TfliteNodeParser { + public: + TfliteEqualParser() : TfliteNodeParser("Equal") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteNotEqualParser : public TfliteNodeParser { + public: + TfliteNotEqualParser() : TfliteNodeParser("NotEqual") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteGreaterParser : public TfliteNodeParser { + public: + TfliteGreaterParser() : TfliteNodeParser("Greater") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteGreaterEqualParser : public TfliteNodeParser { + public: + TfliteGreaterEqualParser() : TfliteNodeParser("GreaterEqual") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteLessParser : public TfliteNodeParser { + public: + TfliteLessParser() : TfliteNodeParser("Less") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteLessEqualParser : public TfliteNodeParser { + public: + TfliteLessEqualParser() : TfliteNodeParser("LessEqual") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc index 80526727e3..a447316570 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.cc @@ -19,33 +19,39 @@ #include <vector> #include <memory> #include <string> -#include <map> +#include "ops/batch_to_space.h" -namespace mindspore::lite { -PrimitiveC *TfliteBatchToSpaceParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteBatchToSpaceParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::BatchToSpace>(); + + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); const auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - std::unique_ptr<schema::BatchToSpaceT> attr = std::make_unique<schema::BatchToSpaceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - - if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, attr->blockShape)) { + std::vector<int64_t> blockShape; + if (GetTfliteData(tflite_op->inputs.at(1), tflite_subgraph->tensors, tflite_model->buffers, blockShape)) { MS_LOG(ERROR) << "get batchToSpace -> blockShape failed"; return nullptr; } - if (GetTfliteData(tflite_op->inputs[2], tflite_subgraph->tensors, tflite_model->buffers, attr->crops)) { + prim->set_block_size(blockShape); + + std::vector<std::vector<int64_t>> crops; + if (TransTfliteDataToVec2D(tflite_op->inputs.at(2), tflite_subgraph->tensors, tflite_model->buffers, crops)) { MS_LOG(ERROR) << "get batchToSpace -> crops failed"; return nullptr; } + prim->set_crops(crops); - primitive->value.type = schema::PrimitiveType_BatchToSpace; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteBatchToSpaceNDParser(tflite::BuiltinOperator_BATCH_TO_SPACE_ND, new TfliteBatchToSpaceParser()); -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h index 49b5d93e75..e38b048c8d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_batch_to_space_parser.h @@ -23,15 +23,16 @@ #include "tools/converter/parser/tflite/tflite_node_parser.h" #include "tools/converter/parser/tflite/tflite_node_parser_registry.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { class TfliteBatchToSpaceParser : public TfliteNodeParser { public: TfliteBatchToSpaceParser() : TfliteNodeParser("BatchToSpace") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; - -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_BATCH_TO_SPACE_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.cc index c7a1769941..41bd5dd4fb 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.cc @@ -18,29 +18,30 @@ #include "tools/converter/parser/tflite/tflite_broadcast_to_parser.h" #include <vector> #include <memory> +#include "ops/broadcast_to.h" -namespace mindspore::lite { -PrimitiveC *TfliteBroadcastToParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "op->primitive is null"; - return nullptr; - } - std::unique_ptr<schema::BroadcastToT> attr = std::make_unique<schema::BroadcastToT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteBroadcastToParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::BroadcastTo>(); + + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + const auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - - if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, attr->dst_shape)) { + std::vector<int64_t> dst_shape; + if (GetTfliteData(tflite_op->inputs.at(1), tflite_subgraph->tensors, tflite_model->buffers, dst_shape)) { MS_LOG(ERROR) << "get broadCastTo -> dst_shape failed"; return nullptr; } - primitive->value.type = schema::PrimitiveType_BroadcastTo; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->set_shape(dst_shape); + + return prim.release(); } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.h index e4df6b211e..e48aa6bf2b 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_broadcast_to_parser.h @@ -23,14 +23,15 @@ #include "tools/converter/parser/tflite/tflite_node_parser.h" #include "tools/converter/parser/tflite/tflite_node_parser_registry.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { class TfliteBroadcastToParser : public TfliteNodeParser { public: TfliteBroadcastToParser() : TfliteNodeParser("BroadcastTo") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; -} // namespace mindspore::lite - +} // namespace lite +} // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_BROADCAST_TO_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc index ee3dd1804a..4d9debf9fc 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.cc @@ -17,34 +17,32 @@ #include "tools/converter/parser/tflite/tflite_cast_parser.h" #include <vector> #include <memory> +#include "ops/cast.h" -namespace mindspore::lite { -PrimitiveC *TfliteCastParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - std::unique_ptr<schema::CastT> attr = std::make_unique<schema::CastT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteCastParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Cast>(); - const auto &in_tensor = tflite_subgraph->tensors[tflite_op->inputs[0]]; - if (in_tensor == nullptr) { - MS_LOG(ERROR) << "tensor is null"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + const auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - attr->srcT = GetTfliteDataType(in_tensor->type); const auto &out_tensor = tflite_subgraph->tensors[tflite_op->outputs[0]]; if (out_tensor == nullptr) { MS_LOG(ERROR) << "tensor is null"; return nullptr; } - attr->dstT = GetTfliteDataType(out_tensor->type); - primitive->value.type = schema::PrimitiveType_Cast; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + auto dstT = GetTfliteDataType(out_tensor->type); + prim->AddAttr("to", MakeValue(static_cast<int32_t>(dstT))); + + return prim.release(); } TfliteNodeRegister g_tfliteCastParser(tflite::BuiltinOperator_CAST, new TfliteCastParser()); -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.h index 10bea6c98b..ef01dfb204 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_cast_parser.h @@ -29,8 +29,8 @@ class TfliteCastParser : public TfliteNodeParser { public: TfliteCastParser() : TfliteNodeParser("Cast") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.cc index 076c562179..090d862b77 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.cc @@ -17,27 +17,25 @@ #include "tools/converter/parser/tflite/tflite_concat_parser.h" #include <vector> #include <memory> +#include "ops/concat.h" -namespace mindspore::lite { -PrimitiveC *TfliteConcatParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - std::unique_ptr<schema::ConcatT> attr = std::make_unique<schema::ConcatT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteConcatParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Concat>(); - const auto &tfliteAttr = tflite_op->builtin_options.AsConcatenationOptions(); - if (tfliteAttr == nullptr) { + MS_ASSERT(tflite_op != nullptr); + const auto &tflite_attr = tflite_op->builtin_options.AsConcatenationOptions(); + if (tflite_attr == nullptr) { MS_LOG(ERROR) << "get op concat attr failed"; return nullptr; } - attr->axis = tfliteAttr->axis; - primitive->value.type = schema::PrimitiveType_Concat; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->set_axis(tflite_attr->axis); + + return prim.release(); } TfliteNodeRegister g_tfliteConcatParser(tflite::BuiltinOperator_CONCATENATION, new TfliteConcatParser()); -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.h index 46246ecd39..3b2c4d2876 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_concat_parser.h @@ -29,8 +29,8 @@ class TfliteConcatParser : public TfliteNodeParser { public: TfliteConcatParser() : TfliteNodeParser("Concat") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.cc index b9a777863a..3e552883af 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.cc @@ -17,64 +17,131 @@ #include "tools/converter/parser/tflite/tflite_conv_parser.h" #include <vector> #include <memory> +#include "ops/fusion/conv2d_fusion.h" -namespace mindspore::lite { -lite::PrimitiveC *TfliteConvParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteConvParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Conv2DFusion>(); + + prim->set_pad({0, 0, 0, 0}); + prim->set_group(1); + prim->set_format(mindspore::Format::NHWC); + + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); const auto &tflite_subgraph = tflite_model->subgraphs.front(); - std::unique_ptr<schema::Conv2DT> attr = std::make_unique<schema::Conv2DT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - const auto &tflite_attr = tflite_op->builtin_options.AsConv2DOptions(); if (tflite_attr == nullptr) { MS_LOG(ERROR) << "get conv attr failed"; return nullptr; } - attr->group = 1; - attr->strideW = tflite_attr->stride_w; - attr->strideH = tflite_attr->stride_h; - attr->dilateH = tflite_attr->dilation_h_factor; - attr->dilateW = tflite_attr->dilation_w_factor; - attr->padMode = GetPadMode(tflite_attr->padding); - attr->format = schema::Format::Format_NHWC; - attr->activationType = GetActivationFunctionType(tflite_attr->fused_activation_function); - - // get the conv op weight tensor - auto weight_index = tflite_op->inputs[1]; - const auto &weight_tensor = tflite_subgraph->tensors[weight_index]; + prim->set_stride({tflite_attr->stride_h, tflite_attr->stride_w}); + prim->set_dilation({tflite_attr->dilation_h_factor, tflite_attr->dilation_w_factor}); + auto padMode = GetPadMode(tflite_attr->padding); + prim->set_pad_mode(padMode); + prim->set_activation_type(GetActivationFunctionType(tflite_attr->fused_activation_function)); + + // get weight tensor + const auto &weight_tensor = tflite_subgraph->tensors.at(tflite_op->inputs[1]); if (weight_tensor == nullptr) { MS_LOG(ERROR) << "the weight tensor is null"; return nullptr; } auto weight_shape = weight_tensor->shape; - attr->channelIn = weight_shape[3]; - attr->channelOut = weight_shape[0]; - attr->kernelH = weight_shape[1]; - attr->kernelW = weight_shape[2]; + prim->set_in_channel(weight_shape[3]); + prim->set_out_channel(weight_shape[0]); + prim->set_kernel_size({weight_shape[1], weight_shape[2]}); // calculate pad params - auto data_index = tflite_op->inputs[0]; - const auto &data_tensor = tflite_subgraph->tensors[data_index]; + const auto &dataTensor = tflite_subgraph->tensors.at(tflite_op->inputs[0]); std::vector<int64_t> params; - int status = - getPaddingParam(data_tensor, attr->padMode, attr->strideH, attr->strideW, attr->kernelH, attr->kernelW, &params); + int status = getPaddingParam(dataTensor, padMode, tflite_attr->stride_h, tflite_attr->stride_w, weight_shape[1], + weight_shape[2], &params); if (status != RET_OK && status != RET_NO_CHANGE) { MS_LOG(ERROR) << "get padding params failed"; return nullptr; } else if (status == RET_OK) { - attr->padUp = params.at(0); - attr->padDown = params.at(1); - attr->padLeft = params.at(2); - attr->padRight = params.at(3); + prim->set_pad_list(params); } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Conv2D; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); +} + +ops::PrimitiveC *TfliteDepthwiseConv2DParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Conv2DFusion>(); + if (prim == nullptr) { + MS_LOG(ERROR) << "new Conv2DFusion failed"; + return nullptr; + } + + prim->set_pad({0, 0, 0, 0}); + prim->set_format(mindspore::Format::NHWC); + + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + const auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; + return nullptr; + } + const auto &tflite_attr = tflite_op->builtin_options.AsDepthwiseConv2DOptions(); + if (tflite_attr == nullptr) { + MS_LOG(ERROR) << "get op de attr failed"; + return nullptr; + } + prim->set_stride({tflite_attr->stride_h, tflite_attr->stride_w}); + prim->set_dilation({tflite_attr->dilation_h_factor, tflite_attr->dilation_w_factor}); + auto padMode = GetPadMode(tflite_attr->padding); + prim->set_pad_mode(padMode); + prim->set_activation_type(GetActivationFunctionType(tflite_attr->fused_activation_function)); + + // get weight tensor + const auto &weight_tensor = tflite_subgraph->tensors.at(tflite_op->inputs.at(1)); + if (weight_tensor == nullptr) { + MS_LOG(ERROR) << "the weight tensor is null"; + return nullptr; + } + auto weight_shape = weight_tensor->shape; + prim->set_kernel_size({weight_shape[1], weight_shape[2]}); + prim->set_in_channel(weight_shape[3]); + prim->set_group(weight_shape[3] / tflite_attr->depth_multiplier); + + // get data tensor + const auto &data_tensor = tflite_subgraph->tensors.at(tflite_op->inputs.at(0)); + if (data_tensor == nullptr) { + MS_LOG(ERROR) << "data_tensor is nullptr"; + return nullptr; + } + auto data_shape = data_tensor->shape; + if (!data_shape.empty()) { + prim->set_out_channel(data_shape[3] * tflite_attr->depth_multiplier); + } + + // calculate pad params + std::vector<int64_t> params; + int status = getPaddingParam(data_tensor, padMode, tflite_attr->stride_h, tflite_attr->stride_w, weight_shape[1], + weight_shape[2], &params); + if (status != RET_OK && status != RET_NO_CHANGE) { + MS_LOG(ERROR) << "get padding params failed"; + return nullptr; + } else if (status == RET_OK) { + prim->set_pad_list(params); + } + prim->AddAttr(ops::kIsDepthWise, MakeValue<bool>(true)); + + return prim.release(); } TfliteNodeRegister g_tfliteConv2DParser(tflite::BuiltinOperator_CONV_2D, new TfliteConvParser()); -} // namespace mindspore::lite +TfliteNodeRegister g_tfliteDepthwiseConv2DParser(tflite::BuiltinOperator_DEPTHWISE_CONV_2D, + new TfliteDepthwiseConv2DParser()); + +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.h index a13c0b7aa2..b1f62d4e75 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_conv_parser.h @@ -23,14 +23,24 @@ #include "tools/converter/parser/tflite/tflite_node_parser.h" #include "tools/converter/parser/tflite/tflite_node_parser_registry.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { class TfliteConvParser : public TfliteNodeParser { public: TfliteConvParser() : TfliteNodeParser("Conv2D") {} - lite::PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; -} // namespace mindspore::lite + +class TfliteDepthwiseConv2DParser : public TfliteNodeParser { + public: + TfliteDepthwiseConv2DParser() : TfliteNodeParser("DepthwiseConv2D") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; +} // namespace lite +} // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CONV_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_conv_transpose_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_conv_transpose_parser.cc new file mode 100644 index 0000000000..71525dc79e --- /dev/null +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_conv_transpose_parser.cc @@ -0,0 +1,78 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tools/converter/parser/tflite/tflite_conv_transpose_parser.h" +#include <vector> +#include <memory> +#include "ops/fusion/conv2d_transpose_fusion.h" + +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteDeConvParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Conv2dTransposeFusion>(); + + prim->set_pad({0, 0, 0, 0}); + prim->set_group(1); + prim->set_format(mindspore::Format::NHWC); + prim->set_activation_type(mindspore::ActivationType::NO_ACTIVATION); + prim->set_dilation({1, 1}); + + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + const auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; + return nullptr; + } + const auto &tflite_attr = tflite_op->builtin_options.AsTransposeConvOptions(); + if (tflite_attr == nullptr) { + MS_LOG(ERROR) << "get deconv attr failed"; + return nullptr; + } + prim->set_stride({tflite_attr->stride_h, tflite_attr->stride_w}); + auto padMode = GetPadMode(tflite_attr->padding); + prim->set_pad_mode(padMode); + + // get weight tensor + const auto &weight_tensor = tflite_subgraph->tensors.at(tflite_op->inputs.at(1)); + if (weight_tensor == nullptr) { + MS_LOG(ERROR) << "the weight tensor is null"; + return nullptr; + } + auto weight_shape = weight_tensor->shape; + prim->set_in_channel(weight_shape[3]); + prim->set_out_channel(weight_shape[0]); + prim->set_kernel_size({weight_shape[1], weight_shape[2]}); + + // calculate pad params + const auto &data_tensor = tflite_subgraph->tensors.at(tflite_op->inputs.at(2)); + std::vector<int64_t> params; + int status = getPaddingParam(data_tensor, padMode, tflite_attr->stride_h, tflite_attr->stride_w, weight_shape[1], + weight_shape[2], &params); + if (status != RET_OK && status != RET_NO_CHANGE) { + MS_LOG(ERROR) << "get padding params failed"; + return nullptr; + } else if (status == RET_OK) { + prim->set_pad_list(params); + } + + return prim.release(); +} + +TfliteNodeRegister g_tfliteDeConv2DParser(tflite::BuiltinOperator_TRANSPOSE_CONV, new TfliteDeConvParser()); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_conv_transpose_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_conv_transpose_parser.h new file mode 100644 index 0000000000..8782280e55 --- /dev/null +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_conv_transpose_parser.h @@ -0,0 +1,38 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_DECONV_PARSER_H +#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_DECONV_PARSER_H + +#include <memory> +#include <vector> +#include <map> +#include "tools/converter/parser/tflite/tflite_node_parser.h" +#include "tools/converter/parser/tflite/tflite_node_parser_registry.h" + +namespace mindspore { +namespace lite { +class TfliteDeConvParser : public TfliteNodeParser { + public: + TfliteDeConvParser() : TfliteNodeParser("DeConv2D") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_DECONV_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_converter.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_converter.cc deleted file mode 100644 index 34f28d7306..0000000000 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_converter.cc +++ /dev/null @@ -1,22 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/parser/tflite/tflite_converter.h" -#include "tools/converter/parser/tflite/tflite_model_parser.h" - -namespace mindspore::lite { -TfliteConverter::TfliteConverter() { modelParser = new TfliteModelParser(); } -} // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_converter.h b/mindspore/lite/tools/converter/parser/tflite/tflite_converter.h index eba2150e7c..cb87ea9cc4 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_converter.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_converter.h @@ -22,13 +22,20 @@ #include <map> #include "tools/converter/converter.h" #include "tools/converter/graphdef_transform.h" +#include "tools/converter/parser/tflite/tflite_model_parser.h" namespace mindspore::lite { class TfliteConverter : public Converter { public: - TfliteConverter(); + TfliteConverter() = default; ~TfliteConverter() override = default; + + FuncGraphPtr BuildFuncGraph(const std::string &model_file, const std::string &weight_file, + schema::QuantType quant_type) override { + TfliteModelParser parser; + return parser.Parse(model_file, weight_file, quant_type); + } }; } // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.cc index c373dad616..67f03a21a3 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.cc @@ -20,233 +20,165 @@ #include <vector> #include "flatbuffers/flexbuffers.h" +#include "ops/audio_spectrogram.h" +#include "ops/custom_extract_features.h" +#include "ops/custom_normalize.h" +#include "ops/custom_predict.h" +#include "ops/detection_post_process.h" +#include "ops/identity.h" +#include "ops/fft_real.h" +#include "ops/fft_imag.h" +#include "ops/mfcc.h" +#include "ops/rfft.h" + namespace mindspore { namespace lite { -STATUS TfliteCustomParser::DetectPostProcess(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op) { - std::unique_ptr<schema::DetectionPostProcessT> attr = std::make_unique<schema::DetectionPostProcessT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; +ops::PrimitiveC *TfliteCustomParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + const auto &custom_attr = tflite_op->custom_options; + const auto &opnode = tflite_model->operator_codes.at(tflite_op->opcode_index); + if (opnode == nullptr) { + MS_LOG(ERROR) << "opnode is null"; + return nullptr; + } + const auto &custom_type = opnode->custom_code; + if (custom_type == "TFLite_Detection_PostProcess") { + return DetectPostProcess(custom_attr, tflite_op); + } else if (custom_type == "Predict") { + return Predict(custom_attr); + } else if (custom_type == "Normalize") { + return Normalize(); + } else if (custom_type == "ExtractFeatures") { + return ExtractFeatures(); + } else if (custom_type == "AudioSpectrogram") { + return AudioSpectrogram(custom_attr); + } else if (custom_type == "Mfcc") { + return Mfcc(custom_attr); + } else if (custom_type == "FlexRFFT") { + return Rfft(custom_attr, tflite_op, tflite_model); + } else if (custom_type == "FlexReal") { + return FftReal(); + } else if (custom_type == "FlexImag") { + return FftImag(); + } else { + MS_LOG(ERROR) << "custom type : " << custom_type << " is not supported"; + return nullptr; } +} + +ops::PrimitiveC *TfliteCustomParser::DetectPostProcess(const std::vector<uint8_t> &custom_attr, + const std::unique_ptr<tflite::OperatorT> &tflite_op) { + auto prim = std::make_unique<ops::DetectionPostProcess>(); + + prim->set_format(mindspore::Format::NHWC); + prim->set_input_size(tflite_op->inputs.size()); auto attr_map = flexbuffers::GetRoot(custom_attr).AsMap(); - attr->format = schema::Format::Format_NHWC; - attr->inputSize = tflite_op->inputs.size(); - attr->hScale = attr_map["h_scale"].AsFloat(); - attr->wScale = attr_map["w_scale"].AsFloat(); - attr->xScale = attr_map["x_scale"].AsFloat(); - attr->yScale = attr_map["y_scale"].AsFloat(); - attr->NmsIouThreshold = attr_map["nms_iou_threshold"].AsFloat(); - attr->NmsScoreThreshold = attr_map["nms_score_threshold"].AsFloat(); - attr->MaxDetections = attr_map["max_detections"].AsInt32(); + prim->set_scale({attr_map["h_scale"].AsFloat(), attr_map["w_scale"].AsFloat(), attr_map["x_scale"].AsFloat(), + attr_map["y_scale"].AsFloat()}); + prim->set_nms_iou_threshold(attr_map["nms_iou_threshold"].AsFloat()); + prim->set_nms_score_threshold(attr_map["nms_score_threshold"].AsFloat()); + prim->set_max_detections(attr_map["max_detections"].AsInt64()); if (attr_map["detections_per_class"].IsNull()) { - attr->DetectionsPerClass = 100; + prim->set_detections_per_class(100); } else { - attr->DetectionsPerClass = attr_map["detections_per_class"].AsInt32(); + prim->set_detections_per_class(attr_map["detections_per_class"].AsInt64()); } - attr->MaxClassesPerDetection = attr_map["max_classes_per_detection"].AsInt32(); - attr->NumClasses = attr_map["num_classes"].AsInt32(); + prim->set_max_classes_per_detection(attr_map["max_classes_per_detection"].AsInt64()); + prim->set_num_classes(attr_map["num_classes"].AsInt64()); if (attr_map["use_regular_nms"].IsNull()) { - attr->UseRegularNms = false; + prim->set_use_regular_nms(false); } else { - attr->UseRegularNms = attr_map["use_regular_nms"].AsBool(); + prim->set_use_regular_nms(attr_map["use_regular_nms"].AsBool()); } if (attr_map["_output_quantized"].IsNull()) { - attr->OutQuantized = false; + prim->set_out_quantized(false); } else { - attr->OutQuantized = attr_map["_output_quantized"].AsBool(); + prim->set_out_quantized(attr_map["_output_quantized"].AsBool()); } - op->primitive->value.type = schema::PrimitiveType_DetectionPostProcess; - op->primitive->value.value = attr.release(); - return RET_OK; + return prim.release(); } -STATUS TfliteCustomParser::AudioSpectrogram(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op) { - std::unique_ptr<schema::AudioSpectrogramT> attr = std::make_unique<schema::AudioSpectrogramT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TfliteCustomParser::AudioSpectrogram(const std::vector<uint8_t> &custom_attr) { + auto prim = std::make_unique<ops::AudioSpectrogram>(); + auto attr_map = flexbuffers::GetRoot(custom_attr).AsMap(); - attr->windowSize = attr_map["window_size"].AsInt64(); - attr->stride = attr_map["stride"].AsInt64(); - attr->magSquare = attr_map["magnitude_squared"].AsBool(); + prim->set_window_size(attr_map["window_size"].AsInt64()); + prim->set_stride(attr_map["stride"].AsInt64()); + prim->set_mag_square(attr_map["magnitude_squared"].AsBool()); - op->primitive->value.type = schema::PrimitiveType_AudioSpectrogram; - op->primitive->value.value = attr.release(); - return RET_OK; + return prim.release(); } -STATUS TfliteCustomParser::Mfcc(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op) { - std::unique_ptr<schema::MfccT> attr = std::make_unique<schema::MfccT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } +ops::PrimitiveC *TfliteCustomParser::Mfcc(const std::vector<uint8_t> &custom_attr) { + auto prim = std::make_unique<ops::Mfcc>(); + auto attr_map = flexbuffers::GetRoot(custom_attr).AsMap(); - attr->freqUpperLimit = attr_map["upper_frequency_limit"].AsInt64(); - attr->freqLowerLimit = attr_map["lower_frequency_limit"].AsInt64(); - attr->filterBankChannelNum = attr_map["filterbank_channel_count"].AsInt64(); - attr->dctCoeffNum = attr_map["dct_coefficient_count"].AsInt64(); - - op->primitive->value.type = schema::PrimitiveType_Mfcc; - op->primitive->value.value = attr.release(); - return RET_OK; + prim->set_freq_upper_limit(attr_map["upper_frequency_limit"].AsFloat()); + prim->set_freq_lower_limit(attr_map["lower_frequency_limit"].AsFloat()); + prim->set_filter_bank_channel_num(attr_map["filterbank_channel_count"].AsInt64()); + prim->set_dct_coeff_num(attr_map["dct_coefficient_count"].AsInt64()); + + return prim.release(); } -STATUS TfliteCustomParser::Predict(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op) { - std::unique_ptr<schema::CustomPredictT> attr = std::make_unique<schema::CustomPredictT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - attr->outputNum = reinterpret_cast<const int *>(custom_attr.data())[0]; - attr->weightThreshold = reinterpret_cast<const float *>(custom_attr.data())[1]; - op->primitive->value.type = schema::PrimitiveType_CustomPredict; - op->primitive->value.value = attr.release(); - return RET_OK; +ops::PrimitiveC *TfliteCustomParser::Predict(const std::vector<uint8_t> &custom_attr) { + auto prim = std::make_unique<ops::CustomPredict>(); + + prim->set_output_num(reinterpret_cast<const int64_t *>(custom_attr.data())[0]); + prim->set_weight_threshold(reinterpret_cast<const float *>(custom_attr.data())[1]); + + return prim.release(); } -STATUS TfliteCustomParser::Normalize(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op) { - std::unique_ptr<schema::CustomNormalizeT> attr = std::make_unique<schema::CustomNormalizeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - op->primitive->value.type = schema::PrimitiveType_CustomNormalize; - op->primitive->value.value = attr.release(); - return RET_OK; +ops::PrimitiveC *TfliteCustomParser::Normalize() { + auto prim = std::make_unique<ops::CustomNormalize>(); + return prim.release(); } -STATUS TfliteCustomParser::ExtractFeatures(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op) { - std::unique_ptr<schema::CustomExtractFeaturesT> attr = std::make_unique<schema::CustomExtractFeaturesT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - op->primitive->value.type = schema::PrimitiveType_CustomExtractFeatures; - op->primitive->value.value = attr.release(); - return RET_OK; +ops::PrimitiveC *TfliteCustomParser::ExtractFeatures() { + auto prim = std::make_unique<ops::CustomExtractFeatures>(); + return prim.release(); } -STATUS TfliteCustomParser::Rfft(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model, - const std::unique_ptr<tflite::SubGraphT> &tflite_subgraph) { - std::unique_ptr<schema::RfftT> attr = std::make_unique<schema::RfftT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; +ops::PrimitiveC *TfliteCustomParser::Rfft(const std::vector<uint8_t> &custom_attr, + const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Rfft>(); + + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph failed"; + return nullptr; } - std::vector<int> fft_length; + std::vector<int64_t> fft_length; if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, fft_length)) { MS_LOG(ERROR) << "rfft -> fftLength get failed"; - return RET_ERROR; - } - attr->fftLength = fft_length[0]; - op->primitive->value.type = schema::PrimitiveType_Rfft; - op->primitive->value.value = attr.release(); - return RET_OK; -} - -STATUS TfliteCustomParser::FftReal(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op) { - std::unique_ptr<schema::FftRealT> attr = std::make_unique<schema::FftRealT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; + return nullptr; } - op->primitive->value.type = schema::PrimitiveType_FftReal; - op->primitive->value.value = attr.release(); - return RET_OK; -} + prim->set_fft_length(fft_length[0]); -STATUS TfliteCustomParser::FftImag(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op) { - std::unique_ptr<schema::FftImagT> attr = std::make_unique<schema::FftImagT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - op->primitive->value.type = schema::PrimitiveType_FftImag; - op->primitive->value.value = attr.release(); - return RET_OK; + return prim.release(); } -STATUS TfliteCustomParser::Identity(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op) { - std::unique_ptr<schema::IdentityT> attr = std::make_unique<schema::IdentityT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - op->primitive->value.type = schema::PrimitiveType_Identity; - op->primitive->value.value = attr.release(); - return RET_OK; +ops::PrimitiveC *TfliteCustomParser::FftReal() { + auto prim = std::make_unique<ops::FftReal>(); + return prim.release(); } -STATUS TfliteCustomParser::BatchMatMul(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op) { - std::unique_ptr<schema::MatMulT> attr = std::make_unique<schema::MatMulT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return RET_NULL_PTR; - } - attr->transposeA = false; - attr->transposeB = false; - op->primitive->value.type = schema::PrimitiveType_MatMul; - op->primitive->value.value = attr.release(); - return RET_OK; +ops::PrimitiveC *TfliteCustomParser::FftImag() { + auto prim = std::make_unique<ops::FftImag>(); + return prim.release(); } -PrimitiveC *TfliteCustomParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto op = new schema::CNodeT; - op->primitive = std::make_unique<schema::PrimitiveT>(); - if (op->primitive == nullptr) { - MS_LOG(ERROR) << "op->primitive is null"; - return nullptr; - } - const auto &custom_attr = tflite_op->custom_options; - const auto &opcode_index = tflite_op->opcode_index; - const auto &custom_type = tflite_model->operator_codes[opcode_index]->custom_code; - int status = RET_OK; - if (custom_type == "TFLite_Detection_PostProcess") { - status = DetectPostProcess(custom_attr, op, tflite_op); - } else if (custom_type == "Predict") { - status = Predict(custom_attr, op, tflite_op); - } else if (custom_type == "Normalize") { - status = Normalize(custom_attr, op, tflite_op); - } else if (custom_type == "ExtractFeatures") { - status = ExtractFeatures(custom_attr, op, tflite_op); - } else if (custom_type == "AudioSpectrogram") { - status = AudioSpectrogram(custom_attr, op, tflite_op); - } else if (custom_type == "Mfcc") { - status = Mfcc(custom_attr, op, tflite_op); - } else if (custom_type == "FlexRFFT") { - status = Rfft(custom_attr, op, tflite_op, tflite_model, tflite_subgraph); - } else if (custom_type == "FlexReal") { - status = FftReal(custom_attr, op, tflite_op); - } else if (custom_type == "FlexImag") { - status = FftImag(custom_attr, op, tflite_op); - } else { - MS_LOG(ERROR) << "the custom op hasn't been supported now"; - status = RET_NOT_FIND_OP; - } - if (status != RET_OK) { - return nullptr; - } - auto primitive = op->primitive.release(); - delete op; - return PrimitiveC::Create(primitive); +ops::PrimitiveC *TfliteCustomParser::Identity() { + auto prim = std::make_unique<ops::Identity>(); + return prim.release(); } TfliteNodeRegister g_tfliteCustomParser(tflite::BuiltinOperator_CUSTOM, new TfliteCustomParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.h index 6ddc296f44..c712da80ab 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_custom_parser.h @@ -23,48 +23,38 @@ #include "tools/converter/parser/tflite/tflite_node_parser.h" #include "tools/converter/parser/tflite/tflite_node_parser_registry.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { class TfliteCustomParser : public TfliteNodeParser { public: TfliteCustomParser() : TfliteNodeParser("Custom") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; - static STATUS DetectPostProcess(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op); + static ops::PrimitiveC *DetectPostProcess(const std::vector<uint8_t> &custom_attr, + const std::unique_ptr<tflite::OperatorT> &tflite_op); - static STATUS AudioSpectrogram(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op); + static ops::PrimitiveC *AudioSpectrogram(const std::vector<uint8_t> &custom_attr); - static STATUS Mfcc(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op); + static ops::PrimitiveC *Mfcc(const std::vector<uint8_t> &custom_attr); - static STATUS Predict(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op); + static ops::PrimitiveC *Predict(const std::vector<uint8_t> &custom_attr); - static STATUS Normalize(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op); + static ops::PrimitiveC *Normalize(); - static STATUS ExtractFeatures(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op); + static ops::PrimitiveC *ExtractFeatures(); - STATUS Rfft(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op, const std::unique_ptr<tflite::ModelT> &tflite_model, - const std::unique_ptr<tflite::SubGraphT> &tflite_subgraph); + ops::PrimitiveC *Rfft(const std::vector<uint8_t> &custom_attr, const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model); - static STATUS FftReal(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op); + static ops::PrimitiveC *FftReal(); - static STATUS FftImag(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op); + static ops::PrimitiveC *FftImag(); - static STATUS Identity(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op); - - static STATUS BatchMatMul(const std::vector<uint8_t> &custom_attr, schema::CNodeT *op, - const std::unique_ptr<tflite::OperatorT> &tflite_op); + static ops::PrimitiveC *Identity(); }; -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CUSTOM_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.cc deleted file mode 100644 index b612c13599..0000000000 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.cc +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/parser/tflite/tflite_deconv_parser.h" -#include <vector> -#include <memory> - -namespace mindspore::lite { -PrimitiveC *TfliteDeConvParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - auto &tflite_subgraph = tflite_model->subgraphs.front(); - std::unique_ptr<schema::DeConv2DT> attr = std::make_unique<schema::DeConv2DT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - const auto &tflite_attr = tflite_op->builtin_options.AsTransposeConvOptions(); - if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op deconv attr failed"; - return nullptr; - } - - attr->group = 1; - attr->strideW = tflite_attr->stride_w; - attr->strideH = tflite_attr->stride_h; - attr->dilateH = 1; - attr->dilateW = 1; - attr->padMode = GetPadMode(tflite_attr->padding); - attr->format = schema::Format::Format_NHWC; - attr->activationType = schema::ActivationType_NO_ACTIVATION; - - // get the conv op weight tensor - auto weight_index = tflite_op->inputs[1]; - const auto &weight_tensor = tflite_subgraph->tensors[weight_index]; - if (weight_tensor == nullptr) { - MS_LOG(ERROR) << "the weight tensor is null"; - return nullptr; - } - auto weight_shape = weight_tensor->shape; - attr->channelIn = weight_shape[3]; - attr->channelOut = weight_shape[0]; - attr->kernelH = weight_shape[1]; - attr->kernelW = weight_shape[2]; - - // calculate pad params - auto data_index = tflite_op->inputs[2]; - const auto &data_tensor = tflite_subgraph->tensors[data_index]; - std::vector<int64_t> params; - int status = - getPaddingParam(data_tensor, attr->padMode, attr->strideH, attr->strideW, attr->kernelH, attr->kernelW, &params); - if (status != RET_OK && status != RET_NO_CHANGE) { - MS_LOG(ERROR) << "get padding params failed"; - return nullptr; - } else if (status == RET_OK) { - attr->padUp = params.at(0); - attr->padDown = params.at(1); - attr->padLeft = params.at(2); - attr->padRight = params.at(3); - } - primitive->value.type = schema::PrimitiveType_DeConv2D; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); -} - -TfliteNodeRegister g_tfliteDeConv2DParser(tflite::BuiltinOperator_TRANSPOSE_CONV, new TfliteDeConvParser()); -} // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.h deleted file mode 100644 index ceb3ce95a4..0000000000 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_deconv_parser.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_DECONV_PARSER_H -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_DECONV_PARSER_H - -#include <memory> -#include <vector> -#include <map> -#include "tools/converter/parser/tflite/tflite_node_parser.h" -#include "tools/converter/parser/tflite/tflite_node_parser_registry.h" - -namespace mindspore::lite { -class TfliteDeConvParser : public TfliteNodeParser { - public: - TfliteDeConvParser() : TfliteNodeParser("DeConv2D") {} - - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; -}; -} // namespace mindspore::lite - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_DECONV_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.cc index 9292151208..0534c33e1d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.cc @@ -18,28 +18,25 @@ #include "tools/converter/parser/tflite/tflite_depth_to_space_parser.h" #include <vector> #include <memory> +#include "ops/depth_to_space.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteDepthToSpaceParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - std::unique_ptr<schema::DepthToSpaceT> attr = std::make_unique<schema::DepthToSpaceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *TfliteDepthToSpaceParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::DepthToSpace>(); + prim->set_format(mindspore::Format::NHWC); + + MS_ASSERT(tflite_op != nullptr); const auto &tflite_attr = tflite_op->builtin_options.AsDepthToSpaceOptions(); if (tflite_attr == nullptr) { MS_LOG(ERROR) << "get op depthtospace attr failed"; return nullptr; } - attr->blockSize = tflite_attr->block_size; - attr->format = schema::Format::Format_NHWC; - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_DepthToSpace; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->set_block_size(tflite_attr->block_size); + + return prim.release(); } TfliteNodeRegister g_tfliteDepthToSpaceParser(tflite::BuiltinOperator_DEPTH_TO_SPACE, new TfliteDepthToSpaceParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.h index 39082e5f34..a6a7126383 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_depth_to_space_parser.h @@ -29,8 +29,8 @@ class TfliteDepthToSpaceParser : public TfliteNodeParser { public: TfliteDepthToSpaceParser() : TfliteNodeParser("DepthToSpace") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc deleted file mode 100644 index 976c469220..0000000000 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.cc +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/parser/tflite/tflite_depthwise_conv_parser.h" -#include <vector> -#include <memory> - -namespace mindspore::lite { -lite::PrimitiveC *TfliteDepthwiseConv2DParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - MS_LOG(DEBUG) << "parse TfliteDepthwiseConv2DParser"; - std::unique_ptr<schema::DepthwiseConv2DT> attr = std::make_unique<schema::DepthwiseConv2DT>(); - const auto &tflite_subgraph = tflite_model->subgraphs.front(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - const auto &tflite_attr = tflite_op->builtin_options.AsDepthwiseConv2DOptions(); - if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op de attr failed"; - return nullptr; - } - attr->strideW = tflite_attr->stride_w; - attr->strideH = tflite_attr->stride_h; - attr->dilateH = tflite_attr->dilation_h_factor; - attr->dilateW = tflite_attr->dilation_w_factor; - attr->padMode = GetPadMode(tflite_attr->padding); - attr->format = schema::Format::Format_NHWC; - attr->activationType = GetActivationFunctionType(tflite_attr->fused_activation_function); - attr->channelMultiplier = tflite_attr->depth_multiplier; - - // get the data tensor - auto data_index = tflite_op->inputs[1]; - const auto &data_tensor = tflite_subgraph->tensors[data_index]; - if (data_tensor == nullptr) { - MS_LOG(ERROR) << "the data tensor is null"; - return nullptr; - } - auto data_shape = data_tensor->shape; - attr->channelIn = data_shape[3]; - - // get the weight tensor - auto weight_index = tflite_op->inputs[1]; - const auto &weight_tensor = tflite_subgraph->tensors[weight_index]; - if (weight_tensor == nullptr) { - MS_LOG(ERROR) << "the weight tensor is null"; - return nullptr; - } - auto weight_shape = weight_tensor->shape; - attr->kernelH = weight_shape[1]; - attr->kernelW = weight_shape[2]; - - // calculate pad params - std::vector<int64_t> params; - int status = - getPaddingParam(data_tensor, attr->padMode, attr->strideH, attr->strideW, attr->kernelH, attr->kernelW, &params); - if (status != RET_OK && status != RET_NO_CHANGE) { - MS_LOG(ERROR) << "get padding params failed"; - return nullptr; - } else if (status == RET_OK) { - attr->padUp = params.at(0); - attr->padDown = params.at(1); - attr->padLeft = params.at(2); - attr->padRight = params.at(3); - } - - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_DepthwiseConv2D; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); -} - -TfliteNodeRegister g_tfliteDepthwiseConv2DParser(tflite::BuiltinOperator_DEPTHWISE_CONV_2D, - new TfliteDepthwiseConv2DParser()); -} // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.h deleted file mode 100644 index fda28855d3..0000000000 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_depthwise_conv_parser.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_DEPTHWISE_CONV_PARSER_H -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_DEPTHWISE_CONV_PARSER_H - -#include <vector> -#include <memory> -#include <map> -#include "tools/converter/parser/tflite/tflite_node_parser.h" -#include "tools/converter/parser/tflite/tflite_node_parser_registry.h" - -namespace mindspore::lite { -class TfliteDepthwiseConv2DParser : public TfliteNodeParser { - public: - TfliteDepthwiseConv2DParser() : TfliteNodeParser("DepthwiseConv2D") {} - - lite::PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; -}; -} // namespace mindspore::lite - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_CONV_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.cc index 5f8f277575..e9876cf40d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.cc @@ -16,46 +16,40 @@ #include "tools/converter/parser/tflite/tflite_dequantize_parser.h" #include <vector> #include <memory> +#include "ops/quant_dtype_cast.h" +#include "ops/cast.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteDequantizeParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { +ops::PrimitiveC *TfliteDequantizeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - const auto &in_tensor = tflite_subgraph->tensors[tflite_op->inputs[0]]; + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; + return nullptr; + } + const auto &in_tensor = tflite_subgraph->tensors[tflite_op->inputs.at(0)]; if (in_tensor == nullptr) { MS_LOG(ERROR) << "input tensor is null"; return nullptr; } - const auto &out_tensor = tflite_subgraph->tensors[tflite_op->outputs[0]]; + const auto &out_tensor = tflite_subgraph->tensors[tflite_op->outputs.at(0)]; if (out_tensor == nullptr) { MS_LOG(ERROR) << "output tensor is null"; return nullptr; } if ((GetTfliteDataType(in_tensor->type) == kNumberTypeInt8 || GetTfliteDataType(in_tensor->type) == kNumberTypeUInt8)) { - std::unique_ptr<schema::QuantDTypeCastT> attr = std::make_unique<schema::QuantDTypeCastT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - attr->srcT = GetTfliteDataType(in_tensor->type); - attr->dstT = GetTfliteDataType(out_tensor->type); - primitive->value.value = attr.release(); - primitive->value.type = schema::PrimitiveType_QuantDTypeCast; + auto prim = std::make_unique<ops::QuantDTypeCast>(); + prim->set_src_t(GetTfliteDataType(in_tensor->type)); + prim->set_dst_t(GetTfliteDataType(out_tensor->type)); + return prim.release(); } else { - std::unique_ptr<schema::CastT> attr = std::make_unique<schema::CastT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - attr->srcT = GetTfliteDataType(in_tensor->type); - attr->dstT = GetTfliteDataType(out_tensor->type); - primitive->value.value = attr.release(); - primitive->value.type = schema::PrimitiveType_Cast; + auto prim = std::make_unique<ops::Cast>(); + auto dstT = GetTfliteDataType(out_tensor->type); + prim->AddAttr("to", MakeValue(static_cast<int32_t>(dstT))); + return prim.release(); } - return PrimitiveC::Create(primitive.release()); } TfliteNodeRegister g_tfliteDequantizeParser(tflite::BuiltinOperator_DEQUANTIZE, new TfliteDequantizeParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.h index 0f10bc922d..58eb481446 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_dequantize_parser.h @@ -28,8 +28,8 @@ class TfliteDequantizeParser : public TfliteNodeParser { public: TfliteDequantizeParser() : TfliteNodeParser("Dequantize") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.cc index bbf5139744..701f21cf7a 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.cc @@ -17,33 +17,16 @@ #include "tools/converter/parser/tflite/tflite_expand_dims_parser.h" #include <vector> #include <memory> +#include "ops/expand_dims.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteExpandDimsParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "op->primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::ExpandDimsT> attr = std::make_unique<schema::ExpandDimsT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - std::vector<int> dims; - if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, dims)) { - MS_LOG(ERROR) << "get expand_dims -> dim failed"; - return nullptr; - } - attr->dim = dims[0]; - primitive->value.type = schema::PrimitiveType_ExpandDims; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *TfliteExpandDimsParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::ExpandDims>(); + return prim.release(); } + TfliteNodeRegister g_tfliteExpandDimsParser(tflite::BuiltinOperator_EXPAND_DIMS, new TfliteExpandDimsParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.h index 4c4be4891c..ea3bafe827 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_expand_dims_parser.h @@ -29,8 +29,8 @@ class TfliteExpandDimsParser : public TfliteNodeParser { public: TfliteExpandDimsParser() : TfliteNodeParser("ExpandDims") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.cc index 6c366ef787..4631db572c 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.cc @@ -17,37 +17,14 @@ #include "tools/converter/parser/tflite/tflite_fill_parser.h" #include <vector> #include <memory> +#include "ops/fill.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteFillParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "op->primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::FillT> attr = std::make_unique<schema::FillT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - if (tflite_op->inputs.size() > 1) { - const auto &tflite_model_buffers = tflite_model->buffers; - const auto &data = tflite_model_buffers.at(tflite_op->inputs[1])->data; - if (!data.empty() && - GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, attr->dims)) { - MS_LOG(ERROR) << "get fill -> dims failed"; - return nullptr; - } - } - - primitive->value.type = schema::PrimitiveType_Fill; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *TfliteFillParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Fill>(); + return prim.release(); } TfliteNodeRegister g_tfliteFillParser(tflite::BuiltinOperator_FILL, new TfliteFillParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.h index bb0adcbcdf..264b72c16c 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_fill_parser.h @@ -29,8 +29,8 @@ class TfliteFillParser : public TfliteNodeParser { public: TfliteFillParser() : TfliteNodeParser("Fill") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc index cb1099a33a..79ff5cfbc9 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.cc @@ -17,39 +17,27 @@ #include "tools/converter/parser/tflite/tflite_fullyconnected_parser.h" #include <vector> #include <memory> +#include "ops/fusion/full_connection.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteFullyConnectedParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } +ops::PrimitiveC *TfliteFullyConnectedParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::FullConnection>(); - std::unique_ptr<schema::FullConnectionT> attr = std::make_unique<schema::FullConnectionT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } + prim->set_axis(1); + prim->set_use_axis(false); + prim->set_has_bias(tflite_op->inputs.size() > 2 && tflite_op->inputs.at(2) != -1); + MS_ASSERT(tflite_op != nullptr); const auto &tflite_attr = tflite_op->builtin_options.AsFullyConnectedOptions(); if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op fully connect attr failed"; + MS_LOG(ERROR) << "get FullConnection attr failed"; return nullptr; } + prim->set_activation_type(GetActivationFunctionType(tflite_attr->fused_activation_function)); - bool hasBias = tflite_op->inputs.size() > 2 && tflite_op->inputs[2] != -1; - - attr->hasBias = hasBias; - attr->axis = 1; - attr->useAxis = false; - attr->activationType = GetActivationFunctionType(tflite_attr->fused_activation_function); - - primitive->value.type = schema::PrimitiveType_FullConnection; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteFullyConnectedParser(tflite::BuiltinOperator_FULLY_CONNECTED, diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.h index 1150a29a4e..fd50c3b578 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_fullyconnected_parser.h @@ -29,8 +29,8 @@ class TfliteFullyConnectedParser : public TfliteNodeParser { public: TfliteFullyConnectedParser() : TfliteNodeParser("FullyConnected") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.cc index ba8811fc21..4d0a71c741 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.cc @@ -17,26 +17,15 @@ #include "tools/converter/parser/tflite/tflite_gather_nd_parser.h" #include <vector> #include <memory> +#include "ops/gather_nd.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteGatherNdParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "op->primitive is null"; - return nullptr; - } - std::unique_ptr<schema::GatherNdT> attr = std::make_unique<schema::GatherNdT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - primitive->value.type = schema::PrimitiveType_GatherNd; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *TfliteGatherNdParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::GatherNd>(); + return prim.release(); } TfliteNodeRegister g_tfliteGatherNdParser(tflite::BuiltinOperator_GATHER_ND, new TfliteGatherNdParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.h index b07fa9f058..008a7c5801 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_nd_parser.h @@ -29,8 +29,8 @@ class TfliteGatherNdParser : public TfliteNodeParser { public: TfliteGatherNdParser() : TfliteNodeParser("GatherND") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.cc index 9aaf91d533..7533c4dce0 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.cc @@ -17,34 +17,23 @@ #include "tools/converter/parser/tflite/tflite_gather_parser.h" #include <vector> #include <memory> +#include "ops/gather.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteGatherParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "op->primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::GatherT> attr = std::make_unique<schema::GatherT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *TfliteGatherParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Gather>(); + MS_ASSERT(tfliteOp != nullptr); const auto &tflite_attr = tflite_op->builtin_options.AsGatherOptions(); if (tflite_attr == nullptr) { MS_LOG(ERROR) << "get op gather attr failed"; return nullptr; } - attr->axis = tflite_attr->axis; - attr->batchDims = 0; + prim->AddAttr("axis", MakeValue(static_cast<int32_t>(tflite_attr->axis))); - primitive->value.type = schema::PrimitiveType_Gather; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteGatherParser(tflite::BuiltinOperator_GATHER, new TfliteGatherParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.h index 6485058427..a8eb06a8e7 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_gather_parser.h @@ -29,8 +29,8 @@ class TfliteGatherParser : public TfliteNodeParser { public: TfliteGatherParser() : TfliteNodeParser("Gather") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_hashtable_lookup_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_hashtable_lookup_parser.cc index 0b96b0a276..a9320d1f99 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_hashtable_lookup_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_hashtable_lookup_parser.cc @@ -17,26 +17,14 @@ #include "tools/converter/parser/tflite/tflite_hashtable_lookup_parser.h" #include <vector> #include <memory> +#include "ops/hashtable_lookup.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteHashtableLookupParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::HashtableLookupT> attr = std::make_unique<schema::HashtableLookupT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - primitive->value.type = schema::PrimitiveType_HashtableLookup; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *TfliteHashtableLookupParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::HashtableLookup>(); + return prim.release(); } TfliteNodeRegister g_tfliteHashtableLookupParser(tflite::BuiltinOperator_HASHTABLE_LOOKUP, diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_hashtable_lookup_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_hashtable_lookup_parser.h index fc24430806..0e245dd427 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_hashtable_lookup_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_hashtable_lookup_parser.h @@ -29,8 +29,8 @@ class TfliteHashtableLookupParser : public TfliteNodeParser { public: TfliteHashtableLookupParser() : TfliteNodeParser("HashtableLookup") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.cc index b7e3aaf8bd..15d8bc6674 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.cc @@ -18,29 +18,26 @@ #include "tools/converter/parser/tflite/tflite_l2norm_parser.h" #include <vector> #include <memory> +#include "ops/fusion/l2_normalize_fusion.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteL2NormParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - std::unique_ptr<schema::L2NormT> attr = std::make_unique<schema::L2NormT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - const auto &tflite_attr = tflite_op->builtin_options.AsL2NormOptions(); - attr->axis = {-1}; - attr->epsilon = 1e-6f; - attr->activationType = GetActivationFunctionType(tflite_attr->fused_activation_function); +ops::PrimitiveC *TfliteL2NormParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::L2NormalizeFusion>(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; + prim->set_axis({-1}); + prim->set_epsilon(1e-6f); + + MS_ASSERT(tflite_op != nullptr); + const auto &tflite_attr = tflite_op->builtin_options.AsL2NormOptions(); + if (tflite_attr == nullptr) { + MS_LOG(ERROR) << "get L2NormalizeFusion attr failed"; return nullptr; } - primitive->value.type = schema::PrimitiveType_L2Norm; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->set_activation_type(GetActivationFunctionType(tflite_attr->fused_activation_function)); + + return prim.release(); } TfliteNodeRegister g_tfliteL2NormParser(tflite::BuiltinOperator_L2_NORMALIZATION, new TfliteL2NormParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.h index 7539d52f7d..7b5604f8e6 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_l2norm_parser.h @@ -29,8 +29,8 @@ class TfliteL2NormParser : public TfliteNodeParser { public: TfliteL2NormParser() : TfliteNodeParser("L2_NORMALIZATION") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc index f65cdc093b..9591fc42d4 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.cc @@ -17,51 +17,33 @@ #include "tools/converter/parser/tflite/tflite_logical_parser.h" #include <vector> #include <memory> -#include <string> +#include "ops/logical_and.h" +#include "ops/logical_not.h" +#include "ops/logical_or.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteLogicalParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - auto tflite_op_type = (tflite_model->operator_codes[tflite_op->opcode_index])->builtin_code; - if (tflite_op_type == tflite::BuiltinOperator_LOGICAL_AND) { - MS_LOG(DEBUG) << "parse TfliteLogicalAndParser"; - std::unique_ptr<schema::LogicalAndT> attr = std::make_unique<schema::LogicalAndT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_LogicalAnd; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_LOGICAL_NOT) { - MS_LOG(DEBUG) << "parse TfliteLogicalNotParser"; - std::unique_ptr<schema::LogicalNotT> attr = std::make_unique<schema::LogicalNotT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_LogicalNot; - primitive->value.value = attr.release(); - } else if (tflite_op_type == tflite::BuiltinOperator_LOGICAL_OR) { - MS_LOG(DEBUG) << "parse TfliteLogicalOrParser"; - std::unique_ptr<schema::LogicalOrT> attr = std::make_unique<schema::LogicalOrT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_LogicalOr; - primitive->value.value = attr.release(); - } - return PrimitiveC::Create(primitive.release()); + +ops::PrimitiveC *TfliteLogicalAndParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::LogicalAnd>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteLogicalNotParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::LogicalNot>(); + return prim.release(); +} + +ops::PrimitiveC *TfliteLogicalOrParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::LogicalOr>(); + return prim.release(); } -TfliteNodeRegister g_tfliteLogicalAndParser(tflite::BuiltinOperator_LOGICAL_AND, new TfliteLogicalParser()); -TfliteNodeRegister g_tfliteLogicalNotParser(tflite::BuiltinOperator_LOGICAL_NOT, new TfliteLogicalParser()); -TfliteNodeRegister g_tfliteLogicalOrParser(tflite::BuiltinOperator_LOGICAL_OR, new TfliteLogicalParser()); +TfliteNodeRegister g_tfliteLogicalAndParser(tflite::BuiltinOperator_LOGICAL_AND, new TfliteLogicalAndParser()); +TfliteNodeRegister g_tfliteLogicalNotParser(tflite::BuiltinOperator_LOGICAL_NOT, new TfliteLogicalNotParser()); +TfliteNodeRegister g_tfliteLogicalOrParser(tflite::BuiltinOperator_LOGICAL_OR, new TfliteLogicalOrParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h index 9573f0d4cf..278ac53699 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_logical_parser.h @@ -23,14 +23,32 @@ #include "tools/converter/parser/tflite/tflite_node_parser.h" #include "tools/converter/parser/tflite/tflite_node_parser_registry.h" -namespace mindspore::lite { -class TfliteLogicalParser : public TfliteNodeParser { +namespace mindspore { +namespace lite { +class TfliteLogicalAndParser : public TfliteNodeParser { public: - TfliteLogicalParser() : TfliteNodeParser("node_name") {} + TfliteLogicalAndParser() : TfliteNodeParser("LogicalAnd") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; -} // namespace mindspore::lite + +class TfliteLogicalNotParser : public TfliteNodeParser { + public: + TfliteLogicalNotParser() : TfliteNodeParser("LogicalNot") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; + +class TfliteLogicalOrParser : public TfliteNodeParser { + public: + TfliteLogicalOrParser() : TfliteNodeParser("LogicalOr") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; +} // namespace lite +} // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_LOGICAL_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.cc index af6f2c2558..d98ba7e991 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.cc @@ -17,36 +17,26 @@ #include "tools/converter/parser/tflite/tflite_lrn_parser.h" #include <vector> #include <memory> +#include "ops/lrn.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteLRNParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::LocalResponseNormalizationT> attr = std::make_unique<schema::LocalResponseNormalizationT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *TfliteLRNParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::LRN>(); + MS_ASSERT(tflite_op != nullptr); const auto &tflite_attr = tflite_op->builtin_options.AsLocalResponseNormalizationOptions(); if (tflite_attr == nullptr) { MS_LOG(ERROR) << "get op LRN attr failed"; return nullptr; } - attr->depth_radius = tflite_attr->radius; - attr->alpha = tflite_attr->alpha; - attr->beta = tflite_attr->beta; - attr->bias = tflite_attr->bias; + prim->set_depth_radius(tflite_attr->radius); + prim->set_alpha(tflite_attr->alpha); + prim->set_beta(tflite_attr->beta); + prim->set_bias(tflite_attr->bias); - primitive->value.type = schema::PrimitiveType_LocalResponseNormalization; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteLRNParser(tflite::BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION, new TfliteLRNParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.h index 840b2bf67a..ed76ed6a14 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_lrn_parser.h @@ -29,8 +29,8 @@ class TfliteLRNParser : public TfliteNodeParser { public: TfliteLRNParser() : TfliteNodeParser("LocalResponseNorm") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_lsh_projection_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_lsh_projection_parser.cc index e66fa261f7..9b5f64085a 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_lsh_projection_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_lsh_projection_parser.cc @@ -17,37 +17,32 @@ #include "tools/converter/parser/tflite/tflite_lsh_projection_parser.h" #include <vector> #include <memory> +#include "ops/lsh_projection.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteLshProjectionParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } +ops::PrimitiveC *TfliteLshProjectionParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::LshProjection>(); - std::unique_ptr<schema::LshProjectionT> attr = std::make_unique<schema::LshProjectionT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + MS_ASSERT(tflite_op != nullptr); + const auto &tflite_attr = tflite_op->builtin_options.AsLSHProjectionOptions(); + if (tflite_attr == nullptr) { + MS_LOG(ERROR) << "get op LshProjection attr failed"; return nullptr; } - - const auto &tflite_attr = tflite_op->builtin_options.AsLSHProjectionOptions(); switch (tflite_attr->type) { case tflite::LSHProjectionType_SPARSE: - attr->type = schema::LshProjectionType_SPARSE; + prim->set_type(mindspore::LshProjectionType::SPARSE); break; case tflite::LSHProjectionType_DENSE: - attr->type = schema::LshProjectionType_DENSE; + prim->set_type(mindspore::LshProjectionType::DENSE); break; default: - attr->type = schema::LshProjectionType_UNKNOWN; + prim->set_type(mindspore::LshProjectionType::UNKNOWN); } - primitive->value.type = schema::PrimitiveType_LshProjection; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } TfliteNodeRegister g_tfliteLshProjectionParser(tflite::BuiltinOperator_LSH_PROJECTION, new TfliteLshProjectionParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_lsh_projection_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_lsh_projection_parser.h index f3759d6ef6..7e4dc5bc5b 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_lsh_projection_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_lsh_projection_parser.h @@ -29,8 +29,8 @@ class TfliteLshProjectionParser : public TfliteNodeParser { public: TfliteLshProjectionParser() : TfliteNodeParser("LshProjection") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_matmul_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_matmul_parser.cc index 42e352594b..5bffe4b7b4 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_matmul_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_matmul_parser.cc @@ -18,28 +18,25 @@ #include <vector> #include <memory> #include <map> +#include "ops/mat_mul.h" -namespace mindspore::lite { -PrimitiveC *TfliteMatMulParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteMatMulParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::MatMul>(); - std::unique_ptr<schema::MatMulT> attr = std::make_unique<schema::MatMulT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + MS_ASSERT(tflite_op != nullptr); + const auto &tflite_attr = tflite_op->builtin_options.AsBatchMatMulOptions(); + if (tflite_attr == nullptr) { + MS_LOG(ERROR) << "get op LRN attr failed"; return nullptr; } - const auto &tflite_attr = tflite_op->builtin_options.AsBatchMatMulOptions(); - attr->transposeA = tflite_attr->adj_x; - attr->transposeB = tflite_attr->adj_y; - primitive->value.type = schema::PrimitiveType_MatMul; - primitive->value.value = attr.release(); + prim->set_transpose_a(tflite_attr->adj_x); + prim->set_transpose_b(tflite_attr->adj_y); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_matmul_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_matmul_parser.h index d23c8a429d..d7ac6bb725 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_matmul_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_matmul_parser.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_MATMUL_PARSER_H #define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_MATMUL_PARSER_H @@ -23,14 +22,16 @@ #include "tools/converter/parser/tflite/tflite_node_parser.h" #include "tools/converter/parser/tflite/tflite_node_parser_registry.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { class TfliteMatMulParser : public TfliteNodeParser { public: TfliteMatMulParser() : TfliteNodeParser("MatMul") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_SLICE_PARSER_H +#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_MATMUL_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc index 4629ca3d84..e43fe9aa92 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.cc @@ -19,11 +19,16 @@ #include <memory> #include <algorithm> #include <utility> +#include "tools/converter/converter_flags.h" #include "src/param_value_lite.h" #include "src/common/file_utils.h" +#include "ops/return.h" +#include "ops/make_tuple.h" +#include "ops/tuple_get_item.h" +#include "ops/primitive_c.h" +#include "ir/func_graph.h" namespace mindspore::lite { - std::unique_ptr<tflite::ModelT> TfliteModelParser::ReadTfliteModel(const char *model_path) { size_t size = 0; tflite_model_buf_ = ReadFile(model_path, &size); @@ -106,21 +111,24 @@ STATUS TfliteModelParser::ConvertOps() { auto op_name = op_type + "-" + std::to_string(op_idx); op_idx++; // parse primitive + MS_LOG(INFO) << "parse node :" << op_name; auto node_parser = TfliteNodeParserRegistry::GetInstance()->GetNodeParser(tflite_op_type); if (node_parser == nullptr) { NoSupportOp::GetInstance()->InsertOp(op_type); status = (status == RET_OK ? RET_NOT_FIND_OP : status); continue; } - if (status != RET_OK) { continue; } - auto primitiveC = node_parser->ParseLitePrimitive(op, tflite_model_); - if (primitiveC == nullptr) { - MS_LOG(ERROR) << "parse node " << op_name << " parser failed"; - continue; + std::vector<AnfNodePtr> op_inputs; + auto primitiveC = node_parser->Parse(op, tflite_model_); + if (primitiveC != nullptr) { + op_inputs = {NewValueNode(std::shared_ptr<ops::PrimitiveC>(primitiveC))}; + } else { + MS_LOG(ERROR) << "parse failed for node: " << op_name; + return RET_ERROR; } status = ConvertOpQuantParams(op.get(), primitiveC); @@ -129,7 +137,6 @@ STATUS TfliteModelParser::ConvertOps() { continue; } - std::vector<AnfNodePtr> op_inputs = {NewValueNode(std::shared_ptr<lite::PrimitiveC>(primitiveC))}; // parse inputs for (int i = 0; i < static_cast<int>(op->inputs.size()); i++) { auto input_idx = op->inputs.at(i); @@ -194,7 +201,7 @@ STATUS TfliteModelParser::SetTensorQuantParam(const tflite::TensorT *tflite_tens for (size_t i = 0; i < tflite_tensor->quantization->scale.size(); i++) { std::unique_ptr<schema::QuantParamT> quant_param = std::make_unique<QuantParamT>(); if (quant_param == nullptr) { - MS_LOG(ERROR) << "quant_param is null"; + MS_LOG(ERROR) << "new quant_param failed"; return RET_NULL_PTR; } @@ -221,7 +228,7 @@ STATUS TfliteModelParser::SetTensorQuantParam(const tflite::TensorT *tflite_tens return RET_OK; } -STATUS TfliteModelParser::ConvertOpQuantParams(const tflite::OperatorT *op, lite::PrimitiveC *primitive_c) { +STATUS TfliteModelParser::ConvertOpQuantParams(const tflite::OperatorT *op, ops::PrimitiveC *primitive_c) { if (op == nullptr) { MS_LOG(ERROR) << "tflite op is null, get quant params failed."; return RET_NULL_PTR; @@ -233,10 +240,11 @@ STATUS TfliteModelParser::ConvertOpQuantParams(const tflite::OperatorT *op, lite } int round_type = 1; - if (primitive_c->primitiveT()->value.type == PrimitiveType_Conv2D) { + if (primitive_c->name() == "Conv2D" || primitive_c->name() == "Conv2DFusion") { round_type = 2; } const auto &tflite_subgraph = tflite_model_->subgraphs.front(); + auto quant_params_holder = std::make_shared<QuantParamHolder>(); for (auto input_idx : op->inputs) { if (input_idx < 0) { input_idx += tflite_subgraph->tensors.size(); @@ -248,7 +256,7 @@ STATUS TfliteModelParser::ConvertOpQuantParams(const tflite::OperatorT *op, lite MS_LOG(ERROR) << "set input tensor quant param failed."; return status; } - primitive_c->AddInputQuantParam(quant_params); + quant_params_holder->AddInputQuantParam(quant_params); } for (auto output_idx : op->outputs) { if (output_idx < 0) { @@ -261,8 +269,9 @@ STATUS TfliteModelParser::ConvertOpQuantParams(const tflite::OperatorT *op, lite MS_LOG(ERROR) << "set output tensor quant param failed."; return status; } - primitive_c->AddOutputQuantParam(quant_params); + quant_params_holder->AddOutputQuantParam(quant_params); } + primitive_c->AddAttr("quant_params", quant_params_holder); return RET_OK; } @@ -290,9 +299,9 @@ STATUS TfliteModelParser::ConvertGraphOutputs() { const auto &tflite_subgraph = tflite_model_->subgraphs.front(); if (tflite_subgraph->outputs.size() > 1) { std::vector<AnfNodePtr> make_tuple_inputs; - auto make_tuple_prim_ptr = GetMakeTuplePrim(); + auto make_tuple_prim_ptr = std::make_shared<ops::MakeTuple>(); if (make_tuple_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetMakeTuplePrim return nullptr"; + MS_LOG(ERROR) << "new MakeTuple failed"; return RET_NULL_PTR; } auto make_tuple_prim = NewValueNode(make_tuple_prim_ptr); @@ -310,9 +319,9 @@ STATUS TfliteModelParser::ConvertGraphOutputs() { make_tuple_cnode->set_fullname_with_scope("return tuple"); std::vector<AnfNodePtr> op_inputs; - auto return_prim_ptr = GetReturnPrim(); + auto return_prim_ptr = std::make_shared<ops::Return>(); if (return_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetReturnPrim return nullptr"; + MS_LOG(ERROR) << "new Return failed"; return RET_NULL_PTR; } auto value_node = NewValueNode(return_prim_ptr); @@ -322,9 +331,9 @@ STATUS TfliteModelParser::ConvertGraphOutputs() { cnode->set_fullname_with_scope("Return"); func_graph_->set_return(cnode); } else { - auto returnPrim = GetReturnPrim(); + auto returnPrim = std::make_shared<ops::Return>(); if (returnPrim == nullptr) { - MS_LOG(ERROR) << "GetReturnPrim return nullptr"; + MS_LOG(ERROR) << "new Return failed"; return RET_NULL_PTR; } int outputNode = tflite_subgraph->outputs.front() < 0 @@ -420,9 +429,9 @@ STATUS TfliteModelParser::ConvertOutputTensor(const tflite::OperatorT *op, const [](const int32_t &value) { return static_cast<int64_t>(value); }); auto type_ptr = TypeIdToType(GetTfliteDataType(tensor->type)); abstract_list.emplace_back(std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector)); - auto tuple_get_item_prim_ptr = GetTupleGetItemPrim(); + auto tuple_get_item_prim_ptr = std::make_shared<ops::TupleGetItem>(); if (tuple_get_item_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetTupleGetItemPrim return nullptr"; + MS_LOG(ERROR) << "new TupleGetItem failed"; return RET_NULL_PTR; } auto tuple_get_item_prim = NewValueNode(tuple_get_item_prim_ptr); @@ -437,9 +446,4 @@ STATUS TfliteModelParser::ConvertOutputTensor(const tflite::OperatorT *op, const } return RET_OK; } - -MetaGraphT *TfliteModelParser::ParseToFb(const std::string &model_file, const std::string &weight_file, - const QuantType &quant_type) { - return nullptr; -} } // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.h index 646b5b41c3..1d800b3489 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_model_parser.h @@ -13,8 +13,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef LITE_TFLITE_MODEL_PARSER_H -#define LITE_TFLITE_MODEL_PARSER_H +#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_MODEL_PARSER_H +#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_MODEL_PARSER_H #include <string> #include <unordered_map> @@ -24,7 +24,8 @@ #include "tools/converter/parser/tflite/tflite_node_parser_registry.h" #include "tools/common/tensor_util.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { class TfliteModelParser : public ModelParser { public: TfliteModelParser() = default; @@ -33,8 +34,6 @@ class TfliteModelParser : public ModelParser { FuncGraphPtr Parse(const std::string &model_file, const std::string &weight_file, const QuantType &quant_type) override; - MetaGraphT *ParseToFb(const std::string &model_file, const std::string &weight_file, - const QuantType &quant_type) override; private: std::unordered_map<int, AnfNodePtr> nodes_; @@ -44,12 +43,13 @@ class TfliteModelParser : public ModelParser { std::unique_ptr<tflite::ModelT> ReadTfliteModel(const char *model_path); STATUS ConvertConstTensor(const tflite::TensorT *tensor, Parameter *parameter, const std::string &tensor_name); STATUS ConvertOutputTensor(const tflite::OperatorT *op, const CNodePtr &dst_cnode); - STATUS ConvertOpQuantParams(const tflite::OperatorT *op, lite::PrimitiveC *primitive_c); + STATUS ConvertOpQuantParams(const tflite::OperatorT *op, ops::PrimitiveC *primitive_c); STATUS ConvertOps(); STATUS ConvertGraphInputs(); STATUS ConvertGraphOutputs(); static STATUS SetTensorQuantParam(const tflite::TensorT *tflite_tensor, std::vector<QuantParamT> *quant_params, int round_type = 1); }; -} // namespace mindspore::lite -#endif // LITE_TFLITE_MODEL_PARSER_H +} // namespace lite +} // namespace mindspore +#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_MODEL_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h index 3691cdbe5e..42524d618d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_node_parser.h @@ -22,7 +22,6 @@ #include <map> #include <memory> #include <utility> -#include "src/ops/primitive_c.h" #include "src/common/log_adapter.h" #include "schema/inner/model_generated.h" #include "schema/schema_generated.h" @@ -30,16 +29,18 @@ #include "ir/dtype/type_id.h" #include "include/errorcode.h" #include "tools/converter/parser/tflite/tflite_util.h" +#include "ops/primitive_c.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { class TfliteNodeParser { public: explicit TfliteNodeParser(const std::string &node_name) : name(node_name) {} virtual ~TfliteNodeParser() = default; - virtual lite::PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { + virtual ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { return nullptr; } @@ -122,9 +123,99 @@ class TfliteNodeParser { return RET_OK; } + template <typename T> + STATUS TransTfliteDataToVec2D(const int32_t tensor_index, + const std::vector<std::unique_ptr<tflite::TensorT>> &tflite_tensors, + const std::vector<std::unique_ptr<tflite::BufferT>> &tflite_model_buffer, + std::vector<std::vector<T>> &vec) { + const auto &tensor = tflite_tensors[tensor_index]; + if (tensor == nullptr) { + MS_LOG(ERROR) << "tensor is null"; + return RET_NULL_PTR; + } + + int32_t count = 1; + std::for_each(tensor->shape.begin(), tensor->shape.end(), [&](int32_t sha) { count *= sha; }); + auto &buf_data = tflite_model_buffer[tensor->buffer]; + if (buf_data == nullptr) { + MS_LOG(ERROR) << "buf_data is null"; + return RET_NULL_PTR; + } + auto data_ptr = buf_data->data.data(); + if (data_ptr == nullptr) { + MS_LOG(DEBUG) << "data is not a constant"; + return RET_NO_CHANGE; + } + + vec.resize(count / 2, std::vector<T>(2)); + switch (tensor->type) { + case tflite::TensorType_UINT8: { + for (int i = 0; i < count / 2; i++) { + uint8_t data = *(static_cast<uint8_t *>(static_cast<void *>(data_ptr + 2 * i * sizeof(uint8_t)))); + vec[i][0] = static_cast<T>(data); + data = *(static_cast<uint8_t *>(static_cast<void *>(data_ptr + (2 * i + 1) * sizeof(uint8_t)))); + vec[i][1] = static_cast<T>(data); + i += 2; + } + break; + } + case tflite::TensorType_INT8: { + for (int i = 0; i < count / 2; i++) { + uint8_t data = *(static_cast<int8_t *>(static_cast<void *>(data_ptr + 2 * i * sizeof(int8_t)))); + vec[i][0] = static_cast<T>(data); + data = *(static_cast<int8_t *>(static_cast<void *>(data_ptr + (2 * i + 1) * sizeof(int8_t)))); + vec[i][1] = static_cast<T>(data); + } + break; + } + case tflite::TensorType_INT16: { + for (int i = 0; i < count / 2; i++) { + uint8_t data = *(static_cast<int16_t *>(static_cast<void *>(data_ptr + 2 * i * sizeof(int16_t)))); + vec[i][0] = static_cast<T>(data); + data = *(static_cast<int16_t *>(static_cast<void *>(data_ptr + (2 * i + 1) * sizeof(int16_t)))); + vec[i][1] = static_cast<T>(data); + } + break; + } + case tflite::TensorType_INT32: { + for (int i = 0; i < count / 2; i++) { + uint8_t data = *(static_cast<int32_t *>(static_cast<void *>(data_ptr + 2 * i * sizeof(int32_t)))); + vec[i][0] = static_cast<T>(data); + data = *(static_cast<int32_t *>(static_cast<void *>(data_ptr + (2 * i + 1) * sizeof(int32_t)))); + vec[i][1] = static_cast<T>(data); + } + break; + } + case tflite::TensorType_INT64: { + for (int i = 0; i < count / 2; i++) { + uint8_t data = *(static_cast<int64_t *>(static_cast<void *>(data_ptr + 2 * i * sizeof(int64_t)))); + vec[i][0] = static_cast<T>(data); + data = *(static_cast<int64_t *>(static_cast<void *>(data_ptr + (2 * i + 1) * sizeof(int64_t)))); + vec[i][1] = static_cast<T>(data); + } + break; + } + case tflite::TensorType_FLOAT32: { + for (int i = 0; i < count / 2; i++) { + uint8_t data = *(static_cast<float *>(static_cast<void *>(data_ptr + 2 * i * sizeof(float)))); + vec[i][0] = static_cast<T>(data); + data = *(static_cast<float *>(static_cast<void *>(data_ptr + (2 * i + 1) * sizeof(float)))); + vec[i][1] = static_cast<T>(data); + } + break; + } + default: { + MS_LOG(ERROR) << "wrong tensor type : " << tensor->type; + return RET_ERROR; + } + } + return RET_OK; + } + protected: const std::string &name; }; -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_NODE_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.cc index f3f08eb329..8ec9db2f81 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.cc @@ -17,40 +17,23 @@ #include "tools/converter/parser/tflite/tflite_one_hot_parser.h" #include <vector> #include <memory> +#include "ops/one_hot.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteOneHotParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "op->primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::OneHotT> attr = std::make_unique<schema::OneHotT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *TfliteOneHotParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::OneHot>(); + MS_ASSERT(tflite_op != nullptr); const auto &tflite_attr = tflite_op->builtin_options.AsOneHotOptions(); if (tflite_attr == nullptr) { MS_LOG(ERROR) << "get op onehot attr failed"; return nullptr; } - auto axis = tflite_attr->axis; - const auto &tensor = tflite_subgraph->tensors[tflite_op->inputs[0]]; - if (tensor == nullptr) { - MS_LOG(ERROR) << "tensor is null"; - return nullptr; - } - attr->axis = axis; + prim->set_axis(tflite_attr->axis); - primitive->value.type = schema::PrimitiveType_OneHot; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteOneHotParser(tflite::BuiltinOperator_ONE_HOT, new TfliteOneHotParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.h index d3a74d4741..a421bf28f0 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_one_hot_parser.h @@ -29,8 +29,8 @@ class TfliteOneHotParser : public TfliteNodeParser { public: TfliteOneHotParser() : TfliteNodeParser("OneHot") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc index 68b9ced562..4823fd5022 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.cc @@ -18,51 +18,52 @@ #include <vector> #include <memory> #include <string> +#include "ops/fusion/pad_fusion.h" namespace mindspore { namespace lite { -PrimitiveC *TflitePadParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; +ops::PrimitiveC *TflitePadParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::PadFusion>(); + + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + const auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - - std::unique_ptr<schema::PadT> attr = std::make_unique<schema::PadT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + auto &opcode = tflite_model->operator_codes.at(tflite_op->opcode_index); + if (opcode == nullptr) { + MS_LOG(ERROR) << "opcode is nullptr"; return nullptr; } - auto tflite_op_type = (tflite_model->operator_codes[tflite_op->opcode_index])->builtin_code; + auto tflite_op_type = opcode->builtin_code; if (tflite_op_type == tflite::BuiltinOperator_PAD) { - const auto &tflite_attr = tflite_op->builtin_options.AsPadOptions(); - if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op pad attr failed"; - return nullptr; - } - attr->paddingMode = schema::PaddingMode_CONSTANT; - attr->constantValue = 0.0f; - if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, attr->paddings)) { - MS_LOG(ERROR) << "get pad -> paddings failed"; + prim->set_padding_mode(mindspore::PaddingMode::CONSTANT); + prim->set_constant_value(0.0); + + std::vector<std::vector<int64_t>> paddings; + if (TransTfliteDataToVec2D(tflite_op->inputs.at(1), tflite_subgraph->tensors, tflite_model->buffers, paddings)) { + MS_LOG(ERROR) << "get Pad -> paddings failed"; return nullptr; } + prim->set_paddings(paddings); } else if (tflite_op_type == tflite::BuiltinOperator_MIRROR_PAD) { const auto &tflite_attr = tflite_op->builtin_options.AsMirrorPadOptions(); if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op pad attr failed"; + MS_LOG(ERROR) << "get MirrorPad attr failed"; return nullptr; } switch (tflite_attr->mode) { case tflite::MirrorPadMode_REFLECT: - attr->paddingMode = schema::PaddingMode_REFLECT; + prim->set_padding_mode(mindspore::PaddingMode::REFLECT); break; case tflite::MirrorPadMode_SYMMETRIC: - attr->paddingMode = schema::PaddingMode_SYMMETRIC; + prim->set_padding_mode(mindspore::PaddingMode::SYMMETRIC); break; default: - MS_LOG(ERROR) << "paddingmode:" << tflite_attr->mode << " don't support"; + MS_LOG(ERROR) << "paddingMode:" << tflite_attr->mode << " is not supported"; return nullptr; } } else { @@ -70,9 +71,7 @@ PrimitiveC *TflitePadParser::ParseLitePrimitive(const std::unique_ptr<tflite::Op return nullptr; } - primitive->value.type = schema::PrimitiveType_Pad; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tflitePadParser(tflite::BuiltinOperator_PAD, new TflitePadParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.h index 9a5648356a..05f92091dd 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_pad_parser.h @@ -29,8 +29,8 @@ class TflitePadParser : public TfliteNodeParser { public: TflitePadParser() : TfliteNodeParser("Pad") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.cc index 1bdc80acb2..b44e31c8d3 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.cc @@ -18,60 +18,94 @@ #include <vector> #include <memory> #include <string> +#include "ops/fusion/avg_pool_fusion.h" +#include "ops/fusion/max_pool_fusion.h" -namespace mindspore::lite { -lite::PrimitiveC *TflitePoolingParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteAvgPoolParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::AvgPoolFusion>(); + + prim->set_format(mindspore::Format::NHWC); + prim->set_round_mode(mindspore::RoundMode::FLOOR); + prim->set_global(false); + + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); const auto &tflite_subgraph = tflite_model->subgraphs.front(); - std::unique_ptr<schema::PoolingT> attr = std::make_unique<schema::PoolingT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; + return nullptr; + } + const auto &tflite_attr = tflite_op->builtin_options.AsPool2DOptions(); + if (tflite_attr == nullptr) { + MS_LOG(ERROR) << "get op: conv attr failed"; + return nullptr; + } + prim->set_kernel_size({tflite_attr->filter_height, tflite_attr->filter_width}); + prim->set_strides({tflite_attr->stride_h, tflite_attr->stride_w}); + auto padMode = GetPadMode(tflite_attr->padding); + prim->set_pad_mode(padMode); + prim->set_activation_type(GetActivationFunctionType(tflite_attr->fused_activation_function)); + + // calculate pad params + const auto &dataTensor = tflite_subgraph->tensors.at(tflite_op->inputs.at(0)); + std::vector<int64_t> params; + int status = getPaddingParam(dataTensor, padMode, tflite_attr->stride_h, tflite_attr->stride_w, + tflite_attr->filter_height, tflite_attr->filter_width, &params); + if (status != RET_OK && status != RET_NO_CHANGE) { + MS_LOG(ERROR) << "get padding params failed"; return nullptr; + } else if (status == RET_OK) { + prim->set_pad(params); } - auto tflite_op_type = (tflite_model->operator_codes[tflite_op->opcode_index])->builtin_code; - if (tflite_op_type == tflite::BuiltinOperator_AVERAGE_POOL_2D) { - attr->poolingMode = schema::PoolMode_MEAN_POOLING; - } else if (tflite_op_type == tflite::BuiltinOperator_MAX_POOL_2D) { - attr->poolingMode = schema::PoolMode_MAX_POOLING; + return prim.release(); +} + +ops::PrimitiveC *TfliteMaxPoolParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::MaxPoolFusion>(); + + prim->set_format(mindspore::Format::NHWC); + prim->set_round_mode(mindspore::RoundMode::FLOOR); + prim->set_global(false); + + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + const auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; + return nullptr; } const auto &tflite_attr = tflite_op->builtin_options.AsPool2DOptions(); if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op pooling attr failed"; + MS_LOG(ERROR) << "get op: conv attr failed"; return nullptr; } - attr->windowW = tflite_attr->filter_width; - attr->windowH = tflite_attr->filter_height; - attr->strideW = tflite_attr->stride_w; - attr->strideH = tflite_attr->stride_h; - attr->padMode = GetPadMode(tflite_attr->padding); - attr->format = schema::Format::Format_NHWC; - - attr->global = false; - attr->roundMode = schema::RoundMode_FLOOR; - attr->activationType = GetActivationFunctionType(tflite_attr->fused_activation_function); + prim->set_kernel_size({tflite_attr->filter_height, tflite_attr->filter_width}); + prim->set_strides({tflite_attr->stride_h, tflite_attr->stride_w}); + auto padMode = GetPadMode(tflite_attr->padding); + prim->set_pad_mode(padMode); + prim->set_activation_type(GetActivationFunctionType(tflite_attr->fused_activation_function)); // calculate pad params - auto data_index = tflite_op->inputs[0]; - const auto &data_tensor = tflite_subgraph->tensors[data_index]; + const auto &dataTensor = tflite_subgraph->tensors.at(tflite_op->inputs.at(0)); std::vector<int64_t> params; - int status = - getPaddingParam(data_tensor, attr->padMode, attr->strideH, attr->strideW, attr->windowH, attr->windowW, &params); + int status = getPaddingParam(dataTensor, padMode, tflite_attr->stride_h, tflite_attr->stride_w, + tflite_attr->filter_height, tflite_attr->filter_width, &params); if (status != RET_OK && status != RET_NO_CHANGE) { MS_LOG(ERROR) << "get padding params failed"; return nullptr; } else if (status == RET_OK) { - attr->padUp = params.at(0); - attr->padDown = params.at(1); - attr->padLeft = params.at(2); - attr->padRight = params.at(3); + prim->set_pad(params); } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Pooling; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } -TfliteNodeRegister g_tfliteMeanPoolingParser(tflite::BuiltinOperator_AVERAGE_POOL_2D, new TflitePoolingParser()); -TfliteNodeRegister g_tfliteMaxPoolingParser(tflite::BuiltinOperator_MAX_POOL_2D, new TflitePoolingParser()); -} // namespace mindspore::lite +TfliteNodeRegister g_tfliteMeanPoolingParser(tflite::BuiltinOperator_AVERAGE_POOL_2D, new TfliteAvgPoolParser()); +TfliteNodeRegister g_tfliteMaxPoolingParser(tflite::BuiltinOperator_MAX_POOL_2D, new TfliteMaxPoolParser()); +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.h index 58d2c1869a..23e64c1da9 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_pooling_parser.h @@ -23,14 +23,24 @@ #include "tools/converter/parser/tflite/tflite_node_parser.h" #include "tools/converter/parser/tflite/tflite_node_parser_registry.h" -namespace mindspore::lite { -class TflitePoolingParser : public TfliteNodeParser { +namespace mindspore { +namespace lite { +class TfliteAvgPoolParser : public TfliteNodeParser { public: - TflitePoolingParser() : TfliteNodeParser("node_name") {} + TfliteAvgPoolParser() : TfliteNodeParser("avg_pool") {} - lite::PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; -} // namespace mindspore::lite + +class TfliteMaxPoolParser : public TfliteNodeParser { + public: + TfliteMaxPoolParser() : TfliteNodeParser("max_pool") {} + + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; +}; +} // namespace lite +} // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_POOLING_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_prelu_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_prelu_parser.cc deleted file mode 100644 index ce4ebc61fa..0000000000 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_prelu_parser.cc +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * distributed under the License is distributed on an AS - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "tools/converter/parser/tflite/tflite_prelu_parser.h" -#include <vector> -#include <memory> - -namespace mindspore { -namespace lite { -PrimitiveC *TflitePReLUParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::PReLUT> attr = std::make_unique<schema::PReLUT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - attr->channelShared = true; - primitive->value.type = schema::PrimitiveType_PReLU; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); -} - -TfliteNodeRegister g_tflitePReLUParser(tflite::BuiltinOperator_PRELU, new TflitePReLUParser()); -} // namespace lite -} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_prelu_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_prelu_parser.h deleted file mode 100644 index 5c83b82c22..0000000000 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_prelu_parser.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_PRELU_PARSER_H -#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_PRELU_PARSER_H - -#include <memory> -#include <vector> -#include <map> -#include "tools/converter/parser/tflite/tflite_node_parser.h" -#include "tools/converter/parser/tflite/tflite_node_parser_registry.h" - -namespace mindspore { -namespace lite { -class TflitePReLUParser : public TfliteNodeParser { - public: - TflitePReLUParser() : TfliteNodeParser("PRELU") {} - - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; -}; -} // namespace lite -} // namespace mindspore - -#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_PRELU_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.cc index 4351a6cb16..87a232b59d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.cc @@ -16,18 +16,20 @@ #include "tools/converter/parser/tflite/tflite_quantize_parser.h" #include <vector> #include <memory> +#include "ops/cast.h" +#include "ops/quant_dtype_cast.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteQuantizeParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { +ops::PrimitiveC *TfliteQuantizeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is null"; return nullptr; } - const auto &in_tensor = tflite_subgraph->tensors[tflite_op->inputs[0]]; if (in_tensor == nullptr) { MS_LOG(ERROR) << "input tensor is null"; @@ -38,29 +40,20 @@ PrimitiveC *TfliteQuantizeParser::ParseLitePrimitive(const std::unique_ptr<tflit MS_LOG(ERROR) << "output tensor is null"; return nullptr; } - if ((GetTfliteDataType(out_tensor->type) == kNumberTypeInt8 || - GetTfliteDataType(out_tensor->type) == kNumberTypeUInt8)) { - std::unique_ptr<schema::QuantDTypeCastT> attr = std::make_unique<schema::QuantDTypeCastT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - attr->srcT = GetTfliteDataType(in_tensor->type); - attr->dstT = GetTfliteDataType(out_tensor->type); - primitive->value.type = schema::PrimitiveType_QuantDTypeCast; - primitive->value.value = attr.release(); + + auto in_tensor_type = GetTfliteDataType(in_tensor->type); + auto out_tensor_type = GetTfliteDataType(out_tensor->type); + if (out_tensor_type == kNumberTypeInt8 || out_tensor_type == kNumberTypeUInt8) { + auto prim = std::make_unique<ops::QuantDTypeCast>(); + prim->set_src_t(in_tensor_type); + prim->set_dst_t(out_tensor_type); + return prim.release(); } else { - std::unique_ptr<schema::CastT> attr = std::make_unique<schema::CastT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - attr->srcT = GetTfliteDataType(in_tensor->type); - attr->dstT = GetTfliteDataType(out_tensor->type); - primitive->value.type = schema::PrimitiveType_Cast; - primitive->value.value = attr.release(); + auto prim = std::make_unique<ops::Cast>(); + auto dstT = GetTfliteDataType(out_tensor->type); + prim->AddAttr("to", MakeValue(static_cast<int32_t>(dstT))); + return prim.release(); } - return PrimitiveC::Create(primitive.release()); } TfliteNodeRegister g_tfliteQuantizeParser(tflite::BuiltinOperator_QUANTIZE, new TfliteQuantizeParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.h index e799b0b6f4..38d030b6f6 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_quantize_parser.h @@ -28,8 +28,8 @@ class TfliteQuantizeParser : public TfliteNodeParser { public: TfliteQuantizeParser() : TfliteNodeParser("Quantize") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.cc index 34d3221550..831ea9ed4d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.cc @@ -17,45 +17,43 @@ #include "tools/converter/parser/tflite/tflite_range_parser.h" #include <vector> #include <memory> +#include "ops/range.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteRangeParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } +ops::PrimitiveC *TfliteRangeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Range>(); - std::unique_ptr<schema::RangeT> attr = std::make_unique<schema::RangeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + prim->set_d_type(0); + + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + const auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - - attr->dType = 0; - std::vector<int> limit; - std::vector<int> delta; - int status = GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, limit); + std::vector<int64_t> limit; + std::vector<int64_t> delta; + int status = GetTfliteData(tflite_op->inputs.at(1), tflite_subgraph->tensors, tflite_model->buffers, limit); if (status != RET_OK && status != RET_NO_CHANGE) { - MS_LOG(ERROR) << "range -> limit get failed"; + MS_LOG(ERROR) << "get range -> limit failed"; return nullptr; - } else if (status == RET_OK) { - status = GetTfliteData(tflite_op->inputs[2], tflite_subgraph->tensors, tflite_model->buffers, delta); + } + if (status == RET_OK) { + status = GetTfliteData(tflite_op->inputs.at(2), tflite_subgraph->tensors, tflite_model->buffers, delta); if (status != RET_OK && status != RET_NO_CHANGE) { - MS_LOG(ERROR) << "stridedSlice -> end get failed"; + MS_LOG(ERROR) << "get range -> delta failed"; return nullptr; } } if (status == RET_OK) { - attr->limit = limit.front(); - attr->delta = delta.front(); + prim->set_limit(limit.front()); + prim->set_delta(delta.front()); } - primitive->value.type = schema::PrimitiveType_Range; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } TfliteNodeRegister g_tfliteRangeParser(tflite::BuiltinOperator_RANGE, new TfliteRangeParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.h index 7b294d7630..4ebeeda575 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_range_parser.h @@ -29,8 +29,8 @@ class TfliteRangeParser : public TfliteNodeParser { public: TfliteRangeParser() : TfliteNodeParser("Range") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.cc index d4538eaf93..121527a217 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.cc @@ -17,26 +17,14 @@ #include "tools/converter/parser/tflite/tflite_rank_parser.h" #include <vector> #include <memory> +#include "ops/rank.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteRankParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::RankT> attr = std::make_unique<schema::RankT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - primitive->value.type = schema::PrimitiveType_Rank; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *TfliteRankParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Rank>(); + return prim.release(); } TfliteNodeRegister g_tfliteRankParser(tflite::BuiltinOperator_RANK, new TfliteRankParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.h index 499cc6e630..b8c4882526 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_rank_parser.h @@ -29,8 +29,8 @@ class TfliteRankParser : public TfliteNodeParser { public: TfliteRankParser() : TfliteNodeParser("Rank") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc index 9bc4b43cc4..1865a50df7 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.cc @@ -17,62 +17,39 @@ #include "tools/converter/parser/tflite/tflite_reduce_parser.h" #include <vector> #include <memory> -#include <string> +#include "ops/fusion/reduce_fusion.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteReduceParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::ReduceT> attr = std::make_unique<schema::ReduceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *TfliteReduceParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::ReduceFusion>(); + MS_ASSERT(tflite_op != nullptr); const auto &tflite_attr = tflite_op->builtin_options.AsReducerOptions(); if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op reduce attr failed"; + MS_LOG(ERROR) << "get reduce attr failed"; return nullptr; } - attr->keepDims = tflite_attr->keep_dims; + prim->set_keep_dims(tflite_attr->keep_dims); auto tflite_op_type = (tflite_model->operator_codes[tflite_op->opcode_index])->builtin_code; if (tflite_op_type == tflite::BuiltinOperator_REDUCE_MAX) { - MS_LOG(DEBUG) << "parse TfliteReduceMaxParser"; - attr->mode = schema::ReduceMode_ReduceMax; + prim->set_mode(mindspore::ReduceMode::Reduce_Max); } else if (tflite_op_type == tflite::BuiltinOperator_REDUCE_MIN) { - MS_LOG(DEBUG) << "parse TfliteReduceMinParser"; - attr->mode = schema::ReduceMode_ReduceMin; + prim->set_mode(mindspore::ReduceMode::Reduce_Min); } else if (tflite_op_type == tflite::BuiltinOperator_REDUCE_PROD) { - MS_LOG(DEBUG) << "parse TfliteReduceProdParser"; - attr->mode = schema::ReduceMode_ReduceProd; + prim->set_mode(mindspore::ReduceMode::Reduce_Prod); } else if (tflite_op_type == tflite::BuiltinOperator_SUM) { - MS_LOG(DEBUG) << "parse TfliteSumParser"; - attr->mode = schema::ReduceMode_ReduceSum; + prim->set_mode(mindspore::ReduceMode::Reduce_Sum); } else if (tflite_op_type == tflite::BuiltinOperator_MEAN) { - MS_LOG(DEBUG) << "parse TfliteMeanParser"; - attr->mode = schema::ReduceMode_ReduceMean; - } else if (tflite_op_type == tflite::BuiltinOperator_REDUCE_ANY) { - // attr->mode; - MS_LOG(ERROR) << "ms-lite haven't supported REDUCE_ANY now"; - return nullptr; - } - - if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, attr->axes)) { - MS_LOG(ERROR) << "get reduce -> axes failed"; + prim->set_mode(mindspore::ReduceMode::Reduce_Mean); + } else { + MS_LOG(ERROR) << "unsupported reduce mode:" << tflite_op_type; return nullptr; } - primitive->value.type = schema::PrimitiveType_Reduce; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_TfliteSumParser(tflite::BuiltinOperator_SUM, new TfliteReduceParser()); @@ -80,6 +57,5 @@ TfliteNodeRegister g_TfliteMeanParser(tflite::BuiltinOperator_MEAN, new TfliteRe TfliteNodeRegister g_TfliteReduceMaxParser(tflite::BuiltinOperator_REDUCE_MAX, new TfliteReduceParser()); TfliteNodeRegister g_TfliteReduceMinParser(tflite::BuiltinOperator_REDUCE_MIN, new TfliteReduceParser()); TfliteNodeRegister g_TfliteReduceProdParser(tflite::BuiltinOperator_REDUCE_PROD, new TfliteReduceParser()); -TfliteNodeRegister g_TfliteReduceAnyParser(tflite::BuiltinOperator_REDUCE_ANY, new TfliteReduceParser()); } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h index f4e949651e..31c412d79a 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reduce_parser.h @@ -28,8 +28,8 @@ class TfliteReduceParser : public TfliteNodeParser { public: TfliteReduceParser() : TfliteNodeParser("node_name") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc index b42b24a170..27f2545ac7 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.cc @@ -17,52 +17,34 @@ #include "tools/converter/parser/tflite/tflite_reshape_parser.h" #include <vector> #include <memory> +#include "ops/reshape.h" -namespace mindspore::lite { -lite::PrimitiveC *TfliteReshapeParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - const auto &tflite_subgraph = tflite_model->subgraphs.front(); - std::unique_ptr<schema::ReshapeT> attr = std::make_unique<schema::ReshapeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteReshapeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tfliteOp, + const std::unique_ptr<tflite::ModelT> &tfliteModel) { + auto prim = std::make_unique<ops::Reshape>(); + + MS_ASSERT(tfliteOp != nullptr); + MS_ASSERT(tfliteModel != nullptr); + std::vector<int32_t> shape; + const auto &tflite_subgraph = tfliteModel->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - - const auto &tflite_attr = tflite_op->builtin_options.AsReshapeOptions(); - if (tflite_attr == nullptr) { - if (tflite_op->inputs.size() < 2) { - MS_LOG(ERROR) << "expected two input tensors, but got: " << tflite_op->inputs.size(); - return nullptr; - } - auto shape_tensor_index = tflite_op->inputs[1]; - const auto &shape_tensor = tflite_subgraph->tensors[shape_tensor_index]; - if (shape_tensor == nullptr) { - MS_LOG(ERROR) << "shape_tensor is null"; - return nullptr; - } - auto &buf_data = tflite_model->buffers[shape_tensor->buffer]; - if (buf_data == nullptr) { - MS_LOG(ERROR) << "buf_data is null"; - return nullptr; - } - if (!buf_data->data.empty()) { - if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, attr->shape)) { - MS_LOG(ERROR) << "get reshape -> shape failed"; - return nullptr; - } - } - } else { - attr->format = schema::Format::Format_NHWC; - attr->shape.resize(tflite_attr->new_shape.size()); + const auto &tflite_attr = tfliteOp->builtin_options.AsReshapeOptions(); + if (tflite_attr != nullptr) { + shape.resize(tflite_attr->new_shape.size()); for (size_t i = 0; i < tflite_attr->new_shape.size(); ++i) { - attr->shape[i] = tflite_attr->new_shape[i]; + shape[i] = tflite_attr->new_shape[i]; } + prim->AddAttr("shape", MakeValue(shape)); } - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_Reshape; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + + return prim.release(); } TfliteNodeRegister g_tfliteReshapeParser(tflite::BuiltinOperator_RESHAPE, new TfliteReshapeParser()); -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.h index 88f9cfe4f2..c713188be8 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reshape_parser.h @@ -23,14 +23,16 @@ #include "tools/converter/parser/tflite/tflite_node_parser.h" #include "tools/converter/parser/tflite/tflite_node_parser_registry.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { class TfliteReshapeParser : public TfliteNodeParser { public: TfliteReshapeParser() : TfliteNodeParser("Reshape") {} - lite::PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_RESHAPE_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc index a504fbd7af..51a854ce46 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.cc @@ -19,24 +19,25 @@ #include <memory> #include <string> #include <vector> +#include "ops/resize.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteResizeParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } +ops::PrimitiveC *TfliteResizeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Resize>(); - std::unique_ptr<schema::ResizeT> attr = std::make_unique<schema::ResizeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + prim->set_format(mindspore::Format::NHWC); + prim->set_preserve_aspect_ratio(false); + prim->set_coordinate_transform_mode(mindspore::CoordinateTransformMode::ASYMMETRIC); + + MS_ASSERT(tfliteOp != nullptr); + MS_ASSERT(tfliteModel != nullptr); + auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - attr->coordinateTransformMode = schema::CoordinateTransformMode_ASYMMETRIC; auto tflite_op_type = (tflite_model->operator_codes[tflite_op->opcode_index])->builtin_code; if (tflite_op_type == tflite::BuiltinOperator_RESIZE_BILINEAR) { MS_LOG(DEBUG) << "parse TfliteResizeBilinearParser"; @@ -46,13 +47,13 @@ PrimitiveC *TfliteResizeParser::ParseLitePrimitive(const std::unique_ptr<tflite: return nullptr; } if (tfliteAttr->align_corners) { - attr->coordinateTransformMode = schema::CoordinateTransformMode_ALIGN_CORNERS; + prim->set_coordinate_transform_mode(mindspore::CoordinateTransformMode::ALIGN_CORNERS); } if (tfliteAttr->half_pixel_centers) { MS_LOG(ERROR) << "Does not support half pixel centers"; return nullptr; } - attr->method = schema::ResizeMethod_LINEAR; + prim->set_method(mindspore::ResizeMethod::LINEAR); } else if (tflite_op_type == tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR) { MS_LOG(DEBUG) << "parse TfliteResizeNearestNeighborParser"; const auto &tfliteAttr = tflite_op->builtin_options.AsResizeNearestNeighborOptions(); @@ -61,22 +62,19 @@ PrimitiveC *TfliteResizeParser::ParseLitePrimitive(const std::unique_ptr<tflite: return nullptr; } if (tfliteAttr->align_corners) { - attr->coordinateTransformMode = schema::CoordinateTransformMode_ALIGN_CORNERS; + prim->set_coordinate_transform_mode(mindspore::CoordinateTransformMode::ALIGN_CORNERS); } if (tfliteAttr->half_pixel_centers) { MS_LOG(ERROR) << "Does not support half pixel centers"; return nullptr; } - attr->method = schema::ResizeMethod_NEAREST; - attr->nearestMode = schema::NearestMode_NORMAL; + prim->set_method(mindspore::ResizeMethod::NEAREST); + prim->set_nearest_mode(mindspore::NearestMode::NORMAL); } else { MS_LOG(ERROR) << "wrong resize type"; return nullptr; } - attr->format = schema::Format::Format_NHWC; - attr->preserveAspectRatio = false; - auto tfliteResizeTensorIndex = tflite_op->inputs[1]; const auto &shape_tensor = tflite_subgraph->tensors[tfliteResizeTensorIndex]; if (shape_tensor == nullptr) { @@ -93,13 +91,11 @@ PrimitiveC *TfliteResizeParser::ParseLitePrimitive(const std::unique_ptr<tflite: if (buffData != nullptr) { auto height = buffData[0]; auto width = buffData[1]; - attr->newWidth = width; - attr->newHeight = height; + prim->set_new_width(width); + prim->set_new_height(height); } - primitive->value.type = schema::PrimitiveType_Resize; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteResizeBilinearParser(tflite::BuiltinOperator_RESIZE_BILINEAR, new TfliteResizeParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h index d9c76fa7d0..90aeb2cb37 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_resize_parser.h @@ -26,10 +26,10 @@ namespace mindspore::lite { class TfliteResizeParser : public TfliteNodeParser { public: - TfliteResizeParser() : TfliteNodeParser("node_name") {} + TfliteResizeParser() : TfliteNodeParser("resize_bilinear") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace mindspore::lite diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.cc index 21b1fafae7..c42fd90c62 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.cc @@ -17,32 +17,29 @@ #include "tools/converter/parser/tflite/tflite_reverse_parser.h" #include <vector> #include <memory> +#include "ops/reverse_v2.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteReverseParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } +ops::PrimitiveC *TfliteReverseParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::ReverseV2>(); - std::unique_ptr<schema::ReverseT> attr = std::make_unique<schema::ReverseT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + const auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - - if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, attr->axis)) { + std::vector<int64_t> axis; + if (GetTfliteData(tflite_op->inputs.at(1), tflite_subgraph->tensors, tflite_model->buffers, axis)) { MS_LOG(ERROR) << "get reverse -> axis failed"; return nullptr; } + prim->set_axis(axis); - primitive->value.type = schema::PrimitiveType_Reverse; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteReverseParser(tflite::BuiltinOperator_REVERSE_V2, new TfliteReverseParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.h index dd6b87d375..f18b91bdf9 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_parser.h @@ -29,8 +29,8 @@ class TfliteReverseParser : public TfliteNodeParser { public: TfliteReverseParser() : TfliteNodeParser("reverse") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.cc index ca0e4cf243..150796ee56 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.cc @@ -18,34 +18,24 @@ #include "tools/converter/parser/tflite/tflite_reverse_sequence_parser.h" #include <vector> #include <memory> +#include "ops/reverse_sequence.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteReverseSequenceParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::ReverseSequenceT> attr = std::make_unique<schema::ReverseSequenceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *TfliteReverseSequenceParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::ReverseSequence>(); + MS_ASSERT(tflite_op != nullptr); const auto &tflite_attr = tflite_op->builtin_options.AsReverseSequenceOptions(); if (tflite_attr == nullptr) { MS_LOG(ERROR) << "get op reverse attr failed"; return nullptr; } - attr->seqAxis = tflite_attr->seq_dim; - attr->batchAxis = tflite_attr->batch_dim; + prim->set_seq_dim(tflite_attr->seq_dim); + prim->set_batch_dim(tflite_attr->batch_dim); - primitive->value.type = schema::PrimitiveType_ReverseSequence; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteReverseSequenceParser(tflite::BuiltinOperator_REVERSE_SEQUENCE, diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.h index 0118360222..dcde927ac2 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_reverse_sequence_parser.h @@ -29,8 +29,8 @@ class TfliteReverseSequenceParser : public TfliteNodeParser { public: TfliteReverseSequenceParser() : TfliteNodeParser("ReverseSequence") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc index 44cba14764..678ba30d48 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.cc @@ -17,31 +17,14 @@ #include "tools/converter/parser/tflite/tflite_scatter_nd_parser.h" #include <vector> #include <memory> +#include "ops/scatter_nd.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteScatterNdParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::ScatterNDT> attr = std::make_unique<schema::ScatterNDT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - const auto &tflite_attr = tflite_op->builtin_options.AsScatterNdOptions(); - if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op ScatterNd attr failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_ScatterND; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *TfliteScatterNdParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::ScatterNd>(); + return prim.release(); } TfliteNodeRegister g_tfliteScatterNdParser(tflite::BuiltinOperator_SCATTER_ND, new TfliteScatterNdParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.h index 0c3a294b74..36b5d3ae3f 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_scatter_nd_parser.h @@ -29,8 +29,8 @@ class TfliteScatterNdParser : public TfliteNodeParser { public: TfliteScatterNdParser() : TfliteNodeParser("ScatterNd") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.cc index 7e1b390de3..38110e35e6 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.cc @@ -17,26 +17,14 @@ #include "tools/converter/parser/tflite/tflite_shape_parser.h" #include <vector> #include <memory> +#include "ops/shape.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteShapeParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::ShapeT> attr = std::make_unique<schema::ShapeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - primitive->value.type = schema::PrimitiveType_Shape; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *TfliteShapeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Shape>(); + return prim.release(); } TfliteNodeRegister g_tfliteShapeParser(tflite::BuiltinOperator_SHAPE, new TfliteShapeParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.h index 5e783348b4..270a562c99 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_shape_parser.h @@ -29,8 +29,8 @@ class TfliteShapeParser : public TfliteNodeParser { public: TfliteShapeParser() : TfliteNodeParser("Shape") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_skip_gram_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_skip_gram_parser.cc index 5e6efaaaa8..a767941121 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_skip_gram_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_skip_gram_parser.cc @@ -17,35 +17,25 @@ #include "tools/converter/parser/tflite/tflite_skip_gram_parser.h" #include <vector> #include <memory> +#include "ops/skip_gram.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteSkipGramParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::SkipGramT> attr = std::make_unique<schema::SkipGramT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *TfliteSkipGramParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::SkipGram>(); + MS_ASSERT(tflite_op != nullptr); const auto &tflite_attr = tflite_op->builtin_options.AsSkipGramOptions(); if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op attr failed"; + MS_LOG(ERROR) << "get SkipGram attr failed"; return nullptr; } - attr->includeAllGrams = tflite_attr->include_all_ngrams; - attr->maxSkipSize = tflite_attr->max_skip_size; - attr->ngramSize = tflite_attr->ngram_size; + prim->set_include_all_grams(tflite_attr->include_all_ngrams); + prim->set_max_skip_size(tflite_attr->max_skip_size); + prim->set_ngram_size(tflite_attr->ngram_size); - primitive->value.type = schema::PrimitiveType_SkipGram; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteSkiGramParser(tflite::BuiltinOperator_SKIP_GRAM, new TfliteSkipGramParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_skip_gram_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_skip_gram_parser.h index c52ce1f203..115601478e 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_skip_gram_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_skip_gram_parser.h @@ -29,8 +29,8 @@ class TfliteSkipGramParser : public TfliteNodeParser { public: TfliteSkipGramParser() : TfliteNodeParser("SkipGram") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.cc index dd6c92fe22..a95f69deb8 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.cc @@ -17,43 +17,33 @@ #include "tools/converter/parser/tflite/tflite_slice_parser.h" #include <vector> #include <memory> +#include "ops/fusion/slice_fusion.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteSliceParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } +ops::PrimitiveC *TfliteSliceParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::SliceFusion>(); - std::unique_ptr<schema::SliceT> attr = std::make_unique<schema::SliceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + const auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - - attr->format = schema::Format::Format_NHWC; - - if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, attr->begin)) { + std::vector<int64_t> begin; + if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, begin)) { MS_LOG(ERROR) << "get slice -> begin failed"; return nullptr; } - if (GetTfliteData(tflite_op->inputs[2], tflite_subgraph->tensors, tflite_model->buffers, attr->size)) { - MS_LOG(ERROR) << "get slice -> size failed"; - return nullptr; - } - std::vector<int> axes; - axes.clear(); - for (size_t i = 0; i < attr->begin.size(); ++i) { + std::vector<int64_t> axes; + for (size_t i = 0; i < begin.size(); ++i) { axes.push_back(i); } - attr->axes = axes; - primitive->value.type = schema::PrimitiveType_Slice; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + prim->set_axes(axes); + + return prim.release(); } TfliteNodeRegister g_tfliteSliceParser(tflite::BuiltinOperator_SLICE, new TfliteSliceParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.h index e18511edac..1dc0ad0d09 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_slice_parser.h @@ -29,8 +29,8 @@ class TfliteSliceParser : public TfliteNodeParser { public: TfliteSliceParser() : TfliteNodeParser("Slice") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.cc index b5e30d7635..32bf3d7024 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.cc @@ -17,23 +17,19 @@ #include "tools/converter/parser/tflite/tflite_softmax_parser.h" #include <vector> #include <memory> +#include "ops/softmax.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { +ops::PrimitiveC *TfliteSoftmaxParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Softmax>(); -PrimitiveC *TfliteSoftmaxParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - std::unique_ptr<schema::SoftMaxT> attr = std::make_unique<schema::SoftMaxT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } + prim->set_axis({-1}); - attr->axis = -1; - auto primitive = std::make_unique<schema::PrimitiveT>(); - primitive->value.type = schema::PrimitiveType_SoftMax; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteSoftmaxParser(tflite::BuiltinOperator_SOFTMAX, new TfliteSoftmaxParser()); -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.h index 4d060a3b09..5322d36fc2 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_softmax_parser.h @@ -23,14 +23,16 @@ #include "tools/converter/parser/tflite/tflite_node_parser.h" #include "tools/converter/parser/tflite/tflite_node_parser_registry.h" -namespace mindspore::lite { +namespace mindspore { +namespace lite { class TfliteSoftmaxParser : public TfliteNodeParser { public: TfliteSoftmaxParser() : TfliteNodeParser("Softmax") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; -} // namespace mindspore::lite +} // namespace lite +} // namespace mindspore #endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TFLITE_SOFTMAX_PARSER_H diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.cc index db6524d7ec..1f392bf5b4 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.cc @@ -18,36 +18,35 @@ #include "tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.h" #include <vector> #include <memory> +#include "ops/space_to_batch_nd.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteSpaceToBatchNDParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } +ops::PrimitiveC *TfliteSpaceToBatchNDParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::SpaceToBatchND>(); - std::unique_ptr<schema::SpaceToBatchNDT> attr = std::make_unique<schema::SpaceToBatchNDT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + const auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - - if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, attr->blockShape)) { + std::vector<int64_t> blockShape; + if (GetTfliteData(tflite_op->inputs.at(1), tflite_subgraph->tensors, tflite_model->buffers, blockShape)) { MS_LOG(ERROR) << "get spaceToBatchND -> blockShape failed"; return nullptr; } - if (GetTfliteData(tflite_op->inputs[2], tflite_subgraph->tensors, tflite_model->buffers, attr->paddings)) { + prim->set_block_shape(blockShape); + std::vector<std::vector<int64_t>> paddings; + if (TransTfliteDataToVec2D(tflite_op->inputs.at(2), tflite_subgraph->tensors, tflite_model->buffers, paddings)) { MS_LOG(ERROR) << "get spaceToBatchND -> paddings failed"; return nullptr; } + prim->set_paddings(paddings); - primitive->value.type = schema::PrimitiveType_SpaceToBatchND; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteSpaceToBatchNDParser(tflite::BuiltinOperator_SPACE_TO_BATCH_ND, diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.h index e7b0e4ae40..5799507271 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_batch_nd_parser.h @@ -29,8 +29,8 @@ class TfliteSpaceToBatchNDParser : public TfliteNodeParser { public: TfliteSpaceToBatchNDParser() : TfliteNodeParser("SpaceToBatchND") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.cc index dabe71094b..eff2d304fb 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.cc @@ -18,34 +18,25 @@ #include "tools/converter/parser/tflite/tflite_space_to_depth_parser.h" #include <vector> #include <memory> +#include "ops/space_to_depth.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteSpaceToDepthParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } +ops::PrimitiveC *TfliteSpaceToDepthParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::SpaceToDepth>(); - std::unique_ptr<schema::SpaceToDepthT> attr = std::make_unique<schema::SpaceToDepthT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } + prim->set_format(mindspore::Format::NHWC); + MS_ASSERT(tflite_op != nullptr); const auto &tflite_attr = tflite_op->builtin_options.AsSpaceToDepthOptions(); if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op space to depth attr failed"; + MS_LOG(ERROR) << "get SpaceToDepth attr failed"; return nullptr; } - attr->blockSize = tflite_attr->block_size; - attr->format = schema::Format::Format_NHWC; + prim->set_block_size(tflite_attr->block_size); - primitive->value.type = schema::PrimitiveType_SpaceToDepth; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteSpaceToDepthParser(tflite::BuiltinOperator_SPACE_TO_DEPTH, new TfliteSpaceToDepthParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.h index 7fc2bfdf0b..7f6f57e0f4 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_space_to_depth_parser.h @@ -29,8 +29,8 @@ class TfliteSpaceToDepthParser : public TfliteNodeParser { public: TfliteSpaceToDepthParser() : TfliteNodeParser("SpaceToDepth") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.cc index 44f7f06371..4fb7fc8c3f 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.cc @@ -18,26 +18,14 @@ #include "tools/converter/parser/tflite/tflite_sparse_to_dense_parser.h" #include <vector> #include <memory> +#include "ops/sparse_to_dense.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteSparseToDenseParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::SparseToDenseT> attr = std::make_unique<schema::SparseToDenseT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - primitive->value.type = schema::PrimitiveType_SparseToDense; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *TfliteSparseToDenseParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::SparseToDense>(); + return prim.release(); } TfliteNodeRegister g_tfliteSparseToDenseParser(tflite::BuiltinOperator_SPARSE_TO_DENSE, diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.h index 78a91f4c0b..b5d551da29 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_sparse_to_dense_parser.h @@ -29,8 +29,8 @@ class TfliteSparseToDenseParser : public TfliteNodeParser { public: TfliteSparseToDenseParser() : TfliteNodeParser("SparseToDense") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.cc index b43ca08ece..5b9a93bda3 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.cc @@ -18,65 +18,70 @@ #include <vector> #include <memory> #include <map> +#include "ops/split.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteSplitParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - auto &tflite_subgraph = tflite_model->subgraphs.front(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } +ops::PrimitiveC *TfliteSplitParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Split>(); - std::unique_ptr<schema::SplitT> attr = std::make_unique<schema::SplitT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + const auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - const auto &tflite_attr = tflite_op->builtin_options.AsSplitOptions(); if (tflite_attr == nullptr) { MS_LOG(ERROR) << "get op split attr failed"; return nullptr; } auto num_splits = tflite_attr->num_splits; - - const auto &shape_tensor = tflite_subgraph->tensors[tflite_op->inputs[1]]; + const auto &shape_tensor = tflite_subgraph->tensors.at(tflite_op->inputs.at(1)); if (shape_tensor == nullptr) { MS_LOG(ERROR) << "shape_tensor is null"; return nullptr; } const auto tensor_shape = shape_tensor->shape; - const auto &axis_tensor = tflite_subgraph->tensors[tflite_op->inputs[0]]; + const auto &axis_tensor = tflite_subgraph->tensors.at(tflite_op->inputs.at(0)); if (axis_tensor == nullptr) { MS_LOG(ERROR) << "axis_tensor is null"; return nullptr; } - auto axis = *(reinterpret_cast<int32_t *>(tflite_model->buffers[axis_tensor->buffer]->data.data())); + auto &axis_buf_data = tflite_model->buffers.at(axis_tensor->buffer); + if (axis_buf_data == nullptr) { + MS_LOG(ERROR) << "buf_data is null"; + return nullptr; + } + auto axis = *(reinterpret_cast<int32_t *>(axis_buf_data->data.data())); if (axis < 0) { axis += tensor_shape.size(); } - if (axis >= static_cast<int>(tensor_shape.size())) { + if (axis >= static_cast<int32_t>(tensor_shape.size())) { MS_LOG(ERROR) << "axis value is too large"; return nullptr; } - attr->splitDim = axis; - if (tensor_shape[axis] % num_splits != 0 && tensor_shape[axis] / num_splits != 0) { + prim->set_axis(axis); + if (num_splits == 0) { + MS_LOG(ERROR) << "divide-by-zero error: num_splits should not be zero"; + return nullptr; + } + if (tensor_shape.at(axis) % num_splits != 0 && tensor_shape.at(axis) / num_splits != 0) { MS_LOG(ERROR) << "num_splits can't divide tensor's length at axis " << axis; return nullptr; } - attr->numberSplit = num_splits; + prim->set_output_num(num_splits); + std::vector<int64_t> size_splits; if (tensor_shape[axis] / num_splits != 0) { for (int i = 0; i < num_splits; i++) { - attr->sizeSplits.push_back(tensor_shape[axis] / num_splits); + size_splits.push_back(tensor_shape[axis] / num_splits); } } + prim->set_size_splits(size_splits); - primitive->value.type = schema::PrimitiveType_Split; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteSplitParser(tflite::BuiltinOperator_SPLIT, new TfliteSplitParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.h index e696f27641..fdeb008f0b 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_split_parser.h @@ -29,8 +29,8 @@ class TfliteSplitParser : public TfliteNodeParser { public: TfliteSplitParser() : TfliteNodeParser("Split") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.cc index f0fc6cf380..98cb137648 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.cc @@ -18,60 +18,62 @@ #include <vector> #include <memory> #include <map> +#include "ops/split.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteSplitVParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } +ops::PrimitiveC *TfliteSplitVParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Split>(); - std::unique_ptr<schema::SplitT> attr = std::make_unique<schema::SplitT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + const auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } - const auto &tflite_attr = tflite_op->builtin_options.AsSplitVOptions(); if (tflite_attr == nullptr) { MS_LOG(ERROR) << "get op splitv attr failed"; return nullptr; } - attr->numberSplit = tflite_attr->num_splits; + prim->set_output_num(tflite_attr->num_splits); - if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, attr->sizeSplits)) { + std::vector<int64_t> size_splits; + if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, size_splits)) { MS_LOG(ERROR) << "get spliteV -> sizeSplits failed"; return nullptr; } + prim->set_size_splits(size_splits); - const auto &tensor = tflite_subgraph->tensors[tflite_op->inputs[0]]; + const auto &tensor = tflite_subgraph->tensors.at(tflite_op->inputs.at(0)); if (tensor == nullptr) { MS_LOG(ERROR) << "tensor_shape is null"; return nullptr; } auto tensor_shape = tensor->shape; - const auto &axis_tensor = tflite_subgraph->tensors[tflite_op->inputs[2]]; + const auto &axis_tensor = tflite_subgraph->tensors.at(tflite_op->inputs.at(2)); if (axis_tensor == nullptr) { MS_LOG(ERROR) << "axis_tensor is null"; return nullptr; } - auto axis = *(reinterpret_cast<int32_t *>(tflite_model->buffers[axis_tensor->buffer]->data.data())); + auto &axis_buf_data = tflite_model->buffers.at(axis_tensor->buffer); + if (axis_buf_data == nullptr) { + MS_LOG(ERROR) << "buf_data is null"; + return nullptr; + } + auto axis = *(reinterpret_cast<int32_t *>(axis_buf_data->data.data())); if (axis < 0) { axis += tensor_shape.size(); } - if (axis >= static_cast<int>(tensor_shape.size())) { + if (axis >= static_cast<int32_t>(tensor_shape.size())) { MS_LOG(ERROR) << "axis value is too large"; return nullptr; } - attr->splitDim = axis; + prim->set_axis(static_cast<int64_t>(axis)); - primitive->value.type = schema::PrimitiveType_Split; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteSplitVParser(tflite::BuiltinOperator_SPLIT_V, new TfliteSplitVParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.h index b0586a5d1d..9459e81414 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_split_v_parser.h @@ -29,8 +29,8 @@ class TfliteSplitVParser : public TfliteNodeParser { public: TfliteSplitVParser() : TfliteNodeParser("SplitV") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.cc index 7befade9b7..18dada76df 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.cc @@ -17,33 +17,28 @@ #include "tools/converter/parser/tflite/tflite_squeeze_parser.h" #include <vector> #include <memory> +#include <algorithm> +#include "ops/squeeze.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteSqueezeParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::SqueezeT> attr = std::make_unique<schema::SqueezeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *TfliteSqueezeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Squeeze>(); + MS_ASSERT(tflite_op != nullptr); const auto &tflite_attr = tflite_op->builtin_options.AsSqueezeOptions(); if (tflite_attr == nullptr) { MS_LOG(ERROR) << "get op squeeze attr failed"; return nullptr; } - attr->axis = tflite_attr->squeeze_dims; + std::vector<int64_t> dims_vector; + (void)std::transform(tflite_attr->squeeze_dims.begin(), tflite_attr->squeeze_dims.end(), + std::back_inserter(dims_vector), + [](const int64_t &value) { return static_cast<int64_t>(value); }); + prim->set_axis(dims_vector); - primitive->value.type = schema::PrimitiveType_Squeeze; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteSqueezeParser(tflite::BuiltinOperator_SQUEEZE, new TfliteSqueezeParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.h index 571bfa8945..326874d279 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_squeeze_parser.h @@ -29,8 +29,8 @@ class TfliteSqueezeParser : public TfliteNodeParser { public: TfliteSqueezeParser() : TfliteNodeParser("Squeeze") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.cc index 7146ce5d55..92bc7d25ae 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.cc @@ -17,21 +17,19 @@ #include "tools/converter/parser/tflite/tflite_stack_parser.h" #include <vector> #include <memory> +#include "ops/stack.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteStackParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } +ops::PrimitiveC *TfliteStackParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Stack>(); - std::unique_ptr<schema::StackT> attr = std::make_unique<schema::StackT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; + MS_ASSERT(tflite_op != nullptr); + MS_ASSERT(tflite_model != nullptr); + const auto &tflite_subgraph = tflite_model->subgraphs.front(); + if (tflite_subgraph == nullptr) { + MS_LOG(ERROR) << "tflite_subgraph is nullptr"; return nullptr; } @@ -40,14 +38,9 @@ PrimitiveC *TfliteStackParser::ParseLitePrimitive(const std::unique_ptr<tflite:: MS_LOG(ERROR) << "get op stack attr failed"; return nullptr; } - attr->axis = tflite_attr->axis; - attr->n = tflite_attr->values_count; - attr->isScale.assign(tflite_subgraph->tensors[tflite_op->inputs[0]]->shape.begin(), - tflite_subgraph->tensors[tflite_op->inputs[0]]->shape.end()); + prim->set_axis(tflite_attr->axis); - primitive->value.type = schema::PrimitiveType_Stack; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteStackParser(tflite::BuiltinOperator_PACK, new TfliteStackParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.h index b452eef11b..b282bfd0be 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_stack_parser.h @@ -29,8 +29,8 @@ class TfliteStackParser : public TfliteNodeParser { public: TfliteStackParser() : TfliteNodeParser("Stack") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.cc index 5699d98d45..ad26e09802 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.cc @@ -17,58 +17,27 @@ #include "tools/converter/parser/tflite/tflite_strided_slice_parser.h" #include <vector> #include <memory> +#include "ops/strided_slice.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteStridedSliceParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::StridedSliceT> attr = std::make_unique<schema::StridedSliceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *TfliteStridedSliceParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::StridedSlice>(); + MS_ASSERT(tflite_op != nullptr); const auto &tflite_attr = tflite_op->builtin_options.AsStridedSliceOptions(); if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op strideslice attr failed"; - return nullptr; - } - attr->beginMask = tflite_attr->begin_mask; - attr->endMask = tflite_attr->end_mask; - attr->ellipsisMask = tflite_attr->ellipsis_mask; - attr->newAxisMask = tflite_attr->new_axis_mask; - attr->shrinkAxisMask = tflite_attr->shrink_axis_mask; - - int status = GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, attr->begin); - if (status != RET_OK && status != RET_NO_CHANGE) { - MS_LOG(ERROR) << "stridedSlice -> begin get failed"; + MS_LOG(ERROR) << "get strideslice attr failed"; return nullptr; - } else if (status == RET_OK) { - status = GetTfliteData(tflite_op->inputs[2], tflite_subgraph->tensors, tflite_model->buffers, attr->end); - if (status != RET_OK && status != RET_NO_CHANGE) { - MS_LOG(ERROR) << "stridedSlice -> end get failed"; - return nullptr; - } else if (status == RET_OK) { - status = GetTfliteData(tflite_op->inputs[3], tflite_subgraph->tensors, tflite_model->buffers, attr->stride); - if (status != RET_OK && status != RET_NO_CHANGE) { - MS_LOG(ERROR) << "stridedSlice -> stride get failed"; - return nullptr; - } - } } - attr->isScale.assign(tflite_subgraph->tensors[tflite_op->inputs[0]]->shape.begin(), - tflite_subgraph->tensors[tflite_op->inputs[0]]->shape.end()); + prim->set_begin_mask(tflite_attr->begin_mask); + prim->set_end_mask(tflite_attr->end_mask); + prim->set_ellipsis_mask(tflite_attr->ellipsis_mask); + prim->set_new_axis_mask(tflite_attr->new_axis_mask); + prim->set_shrink_axis_mask(tflite_attr->shrink_axis_mask); - primitive->value.type = schema::PrimitiveType_StridedSlice; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteStridedSliceParser(tflite::BuiltinOperator_STRIDED_SLICE, new TfliteStridedSliceParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.h index a59fe2a47f..99baab1116 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_strided_slice_parser.h @@ -29,8 +29,8 @@ class TfliteStridedSliceParser : public TfliteNodeParser { public: TfliteStridedSliceParser() : TfliteNodeParser("StridedSlice") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.cc index 3a5dc26ace..68ab204faf 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.cc @@ -18,25 +18,14 @@ #include "tools/converter/parser/tflite/tflite_tile_parser.h" #include <vector> #include <memory> +#include "ops/fusion/tile_fusion.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteTileParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::TileT> attr = std::make_unique<schema::TileT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - primitive->value.type = schema::PrimitiveType_Tile; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *TfliteTileParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::TileFusion>(); + return prim.release(); } TfliteNodeRegister g_tfliteTileParser(tflite::BuiltinOperator_TILE, new TfliteTileParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.h index 33f9076437..37cb979dad 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_tile_parser.h @@ -29,8 +29,8 @@ class TfliteTileParser : public TfliteNodeParser { public: TfliteTileParser() : TfliteNodeParser("Tile") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.cc index 26ff69f977..98a560b51c 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.cc @@ -18,36 +18,17 @@ #include "tools/converter/parser/tflite/tflite_topk_v2_parser.h" #include <vector> #include <memory> -#include <map> +#include "ops/fusion/topk_fusion.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteTopKV2Parser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } +ops::PrimitiveC *TfliteTopKV2Parser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::TopKFusion>(); - std::unique_ptr<schema::TopKT> attr = std::make_unique<schema::TopKT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } + prim->set_sorted(true); - attr->sorted = true; - std::vector<int32_t> k; - if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, k)) { - MS_LOG(ERROR) << "get topKV2 -> k failed"; - return nullptr; - } - attr->k = k.front(); - - primitive->value.type = schema::PrimitiveType_TopK; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteTopKV2Parser(tflite::BuiltinOperator_TOPK_V2, new TfliteTopKV2Parser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.h index 1ad18105be..2cb750837a 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_topk_v2_parser.h @@ -29,8 +29,8 @@ class TfliteTopKV2Parser : public TfliteNodeParser { public: TfliteTopKV2Parser() : TfliteNodeParser("TopKV2") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.cc index b105a39e61..5589dca5ec 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.cc @@ -17,32 +17,14 @@ #include "tools/converter/parser/tflite/tflite_transpose_parser.h" #include <vector> #include <memory> +#include "ops/transpose.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteTransposeParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::TransposeT> attr = std::make_unique<schema::TransposeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - if (GetTfliteData(tflite_op->inputs[1], tflite_subgraph->tensors, tflite_model->buffers, attr->perm)) { - MS_LOG(ERROR) << "get transpose -> perm failed"; - return nullptr; - } - - primitive->value.type = schema::PrimitiveType_Transpose; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *TfliteTransposeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Transpose>(); + return prim.release(); } TfliteNodeRegister g_tfliteTransposeParser(tflite::BuiltinOperator_TRANSPOSE, new TfliteTransposeParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.h index 56f9db2ae9..033b382529 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_transpose_parser.h @@ -29,8 +29,8 @@ class TfliteTransposeParser : public TfliteNodeParser { public: TfliteTransposeParser() : TfliteNodeParser("Transpose") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.cc index 4a28d1dc17..2c501e3a92 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.cc @@ -18,32 +18,14 @@ #include "tools/converter/parser/tflite/tflite_unique_parser.h" #include <vector> #include <memory> +#include "ops/unique.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteUniqueParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::UniqueT> attr = std::make_unique<schema::UniqueT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - const auto &tflite_attr = tflite_op->builtin_options.AsUniqueOptions(); - if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op unique attr failed"; - return nullptr; - } - - primitive->value.type = schema::PrimitiveType_Unique; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *TfliteUniqueParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Unique>(); + return prim.release(); } TfliteNodeRegister g_tfliteUniqueParser(tflite::BuiltinOperator_UNIQUE, new TfliteUniqueParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.h index 98cb61ce6b..e500a80b5a 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_unique_parser.h @@ -29,8 +29,8 @@ class TfliteUniqueParser : public TfliteNodeParser { public: TfliteUniqueParser() : TfliteNodeParser("Unique") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.cc index 7110da2adc..7ec782424f 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.cc @@ -18,33 +18,27 @@ #include "tools/converter/parser/tflite/tflite_unstack_parser.h" #include <vector> #include <memory> +#include "ops/unstack.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteUnstackParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::UnstackT> attr = std::make_unique<schema::UnstackT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; +ops::PrimitiveC *TfliteUnstackParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = new (std::nothrow) ops::Unstack(); + if (prim == nullptr) { + MS_LOG(ERROR) << "new Unpack failed"; return nullptr; } + MS_ASSERT(tflite_op != nullptr); const auto &tflite_attr = tflite_op->builtin_options.AsUnpackOptions(); if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op unstack attr failed"; + MS_LOG(ERROR) << "get Unpack attr failed"; return nullptr; } - attr->axis = tflite_attr->axis; + prim->set_axis(tflite_attr->axis); - primitive->value.type = schema::PrimitiveType_Unstack; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim; } TfliteNodeRegister g_tfliteUnstackParser(tflite::BuiltinOperator_UNPACK, new TfliteUnstackParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.h index b3b189dccb..3c0cd61995 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_unstack_parser.h @@ -29,8 +29,8 @@ class TfliteUnstackParser : public TfliteNodeParser { public: TfliteUnstackParser() : TfliteNodeParser("Unstack") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc index ae10a28b66..f7f24916cc 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_util.cc @@ -126,11 +126,11 @@ std::map<tflite::BuiltinOperator, std::string> tfMsOpTypeMap{ {tflite::BuiltinOperator_WHILE, "While"}, }; -std::map<tflite::ActivationFunctionType, schema::ActivationType> tfMsActivationFunctionMap{ - {tflite::ActivationFunctionType_NONE, schema::ActivationType_NO_ACTIVATION}, - {tflite::ActivationFunctionType_RELU, schema::ActivationType_RELU}, - {tflite::ActivationFunctionType_RELU6, schema::ActivationType_RELU6}, - {tflite::ActivationFunctionType_TANH, schema::ActivationType_TANH}, +std::map<tflite::ActivationFunctionType, mindspore::ActivationType> tfMsActivationFunctionMap{ + {tflite::ActivationFunctionType_NONE, mindspore::ActivationType::NO_ACTIVATION}, + {tflite::ActivationFunctionType_RELU, mindspore::ActivationType::RELU}, + {tflite::ActivationFunctionType_RELU6, mindspore::ActivationType::RELU6}, + {tflite::ActivationFunctionType_TANH, mindspore::ActivationType::TANH}, }; std::map<int, TypeId> type_map = { @@ -141,7 +141,7 @@ std::map<int, TypeId> type_map = { {tflite::TensorType_BOOL, TypeId::kNumberTypeBool}, {tflite::TensorType_STRING, TypeId::kObjectTypeString}, {tflite::TensorType_COMPLEX64, TypeId::kNumberTypeComplex64}}; -schema::ActivationType GetActivationFunctionType(tflite::ActivationFunctionType tfliteAFType) { +mindspore::ActivationType GetActivationFunctionType(tflite::ActivationFunctionType tfliteAFType) { return tfMsActivationFunctionMap.at(tfliteAFType); } @@ -161,23 +161,23 @@ TypeId GetTfliteDataType(const tflite::TensorType &tflite_data_type) { return iter->second; } -schema::PadMode GetPadMode(tflite::Padding tflite_padmode) { +std::string GetPadModeStr(tflite::Padding tflite_padmode) { if (tflite_padmode == tflite::Padding_SAME) { - return schema::PadMode_SAME_UPPER; + return "same"; } else if (tflite_padmode == tflite::Padding_VALID) { - return schema::PadMode_VALID; + return "valid"; } else { - return schema::PadMode_NOTSET; + return "pad"; } } -std::string GetPadModeStr(tflite::Padding tflite_padmode) { +mindspore::PadMode GetPadMode(tflite::Padding tflite_padmode) { if (tflite_padmode == tflite::Padding_SAME) { - return "same"; + return mindspore::PadMode::SAME; } else if (tflite_padmode == tflite::Padding_VALID) { - return "valid"; + return mindspore::PadMode::VALID; } else { - return "pad"; + return mindspore::PadMode::PAD; } } @@ -203,7 +203,7 @@ size_t GetDataTypeSize(const TypeId &data_type) { } } -STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor, schema::PadMode pad_mode, int strideH, +STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor, mindspore::PadMode pad_mode, int strideH, int strideW, int windowH, int windowW, std::vector<int64_t> *params) { if (tensor == nullptr) { MS_LOG(ERROR) << "the input tensor is null"; @@ -217,7 +217,7 @@ STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor, schema::P int padDown = 0; int padLeft = 0; int padRight = 0; - if (pad_mode == schema::PadMode_SAME_UPPER) { + if (pad_mode == mindspore::PadMode::SAME) { auto shape = tensor->shape; int H_input = shape.at(1); int W_input = shape.at(2); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_util.h b/mindspore/lite/tools/converter/parser/tflite/tflite_util.h index a2769e35a0..951baced2d 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_util.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_util.h @@ -27,22 +27,23 @@ #include "schema/inner/ops_generated.h" #include "ir/dtype/type_id.h" #include "include/errorcode.h" +#include "mindspore/core/utils/check_convert_utils.h" namespace mindspore { namespace lite { -schema::PadMode GetPadMode(tflite::Padding tflite_padmode); - std::string GetPadModeStr(tflite::Padding tflite_padmode); +mindspore::PadMode GetPadMode(tflite::Padding tflite_padmode); + size_t GetDataTypeSize(const TypeId &data_type); -schema::ActivationType GetActivationFunctionType(tflite::ActivationFunctionType tfliteAFType); +mindspore::ActivationType GetActivationFunctionType(tflite::ActivationFunctionType tfliteAFType); std::string GetMSOpType(tflite::BuiltinOperator tfliteOpType); TypeId GetTfliteDataType(const tflite::TensorType &tflite_data_type); -STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor, schema::PadMode pad_mode, int strideH, +STATUS getPaddingParam(const std::unique_ptr<tflite::TensorT> &tensor, mindspore::PadMode pad_mode, int strideH, int strideW, int windowH, int windowW, std::vector<int64_t> *params); void Split(const std::string &src_str, std::vector<std::string> *dst_str, const std::string &chr); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.cc index 4cb0d19411..37565db6e5 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.cc @@ -18,32 +18,14 @@ #include "tools/converter/parser/tflite/tflite_where_parser.h" #include <vector> #include <memory> +#include "ops/where.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteWhereParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto &tflite_subgraph = tflite_model->subgraphs.front(); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::WhereT> attr = std::make_unique<schema::WhereT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - if (GetTfliteData(tflite_op->inputs[0], tflite_subgraph->tensors, tflite_model->buffers, attr->condition)) { - MS_LOG(ERROR) << "get where -> condition failed"; - return nullptr; - } - - primitive->value.type = schema::PrimitiveType_Where; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *TfliteWhereParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::Where>(); + return prim.release(); } TfliteNodeRegister g_tfliteWhereParser(tflite::BuiltinOperator_WHERE, new TfliteWhereParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.h index a8aa878172..3dc445b454 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_where_parser.h @@ -29,8 +29,8 @@ class TfliteWhereParser : public TfliteNodeParser { public: TfliteWhereParser() : TfliteNodeParser("Where") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.cc index 01904ac38f..2cff2d4fda 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.cc @@ -18,35 +18,24 @@ #include "tools/converter/parser/tflite/tflite_while_parser.h" #include <vector> #include <memory> +#include "ops/while.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteWhileParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::WhileT> attr = std::make_unique<schema::WhileT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } +ops::PrimitiveC *TfliteWhileParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::While>(); + MS_ASSERT(tflite_op != nullptr); const auto &tflite_attr = tflite_op->builtin_options.AsWhileOptions(); if (tflite_attr == nullptr) { - MS_LOG(ERROR) << "get op while attr failed"; + MS_LOG(ERROR) << "get While attr failed"; return nullptr; } + prim->set_cond_subgraph_index(tflite_attr->cond_subgraph_index); + prim->set_body_subgraph_index(tflite_attr->body_subgraph_index); - attr->condSubgraphIndex = tflite_attr->cond_subgraph_index; - attr->bodySubgraphIndex = tflite_attr->body_subgraph_index; - - primitive->value.type = schema::PrimitiveType_While; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); + return prim.release(); } TfliteNodeRegister g_tfliteWhileParser(tflite::BuiltinOperator_WHILE, new TfliteWhileParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.h index 6a45caf110..3b199cf269 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_while_parser.h @@ -29,8 +29,8 @@ class TfliteWhileParser : public TfliteNodeParser { public: TfliteWhileParser() : TfliteNodeParser("While") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.cc b/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.cc index 2132c53d8f..11d5674ab3 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.cc +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.cc @@ -18,26 +18,14 @@ #include "tools/converter/parser/tflite/tflite_zeros_like_parser.h" #include <vector> #include <memory> +#include "ops/zeros_like.h" namespace mindspore { namespace lite { -PrimitiveC *TfliteZerosLikeParser::ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) { - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "primitive is null"; - return nullptr; - } - - std::unique_ptr<schema::ZerosLikeT> attr = std::make_unique<schema::ZerosLikeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new op failed"; - return nullptr; - } - - primitive->value.type = schema::PrimitiveType_ZerosLike; - primitive->value.value = attr.release(); - return PrimitiveC::Create(primitive.release()); +ops::PrimitiveC *TfliteZerosLikeParser::Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) { + auto prim = std::make_unique<ops::ZerosLike>(); + return prim.release(); } TfliteNodeRegister g_tfliteZerosLikeParser(tflite::BuiltinOperator_ZEROS_LIKE, new TfliteZerosLikeParser()); diff --git a/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.h b/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.h index ac67faf158..d415014c3b 100644 --- a/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.h +++ b/mindspore/lite/tools/converter/parser/tflite/tflite_zeros_like_parser.h @@ -29,8 +29,8 @@ class TfliteZerosLikeParser : public TfliteNodeParser { public: TfliteZerosLikeParser() : TfliteNodeParser("ZerosLike") {} - PrimitiveC *ParseLitePrimitive(const std::unique_ptr<tflite::OperatorT> &tflite_op, - const std::unique_ptr<tflite::ModelT> &tflite_model) override; + ops::PrimitiveC *Parse(const std::unique_ptr<tflite::OperatorT> &tflite_op, + const std::unique_ptr<tflite::ModelT> &tflite_model) override; }; } // namespace lite } // namespace mindspore diff --git a/mindspore/lite/tools/converter/quant_param_holder.h b/mindspore/lite/tools/converter/quant_param_holder.h new file mode 100644 index 0000000000..830e3dc543 --- /dev/null +++ b/mindspore/lite/tools/converter/quant_param_holder.h @@ -0,0 +1,160 @@ +/** + * Copyright 2020 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_QUANT_PARAM_CONTEXT_H +#define MINDSPORE_LITE_TOOLS_CONVERTER_QUANT_PARAM_CONTEXT_H + +#include <vector> +#include <memory> +#include "ir/anf.h" +#include "schema/inner/model_generated.h" + +namespace mindspore { +namespace lite { +using QuantParamsVector = std::vector<std::vector<schema::QuantParamT>>; +class QuantParamHolder : public Value { + public: + QuantParamHolder() = default; + + ~QuantParamHolder() override = default; + + MS_DECLARE_PARENT(QuantParamHolder, Value); + + bool operator==(const Value &rhs) const override { // unused + if (rhs.isa<QuantParamHolder>()) { + auto other_holder = dynamic_cast<const QuantParamHolder &>(rhs); + auto input_quant_params_rhs = other_holder.input_quant_params(); + auto output_quant_params_rhs = other_holder.output_quant_params(); + if (input_quant_params_rhs.size() != this->input_quant_param_.size() || + output_quant_params_rhs.size() != this->output_quant_param_.size()) { + return false; + } + for (size_t i = 0; i < input_quant_params_rhs.size(); ++i) { + if (input_quant_params_rhs.at(i).size() != this->input_quant_param_.at(i).size()) { + return false; + } + auto *params = reinterpret_cast<const char *>(this->input_quant_param_.at(i).data()); + auto *params_rhs = reinterpret_cast<const char *>(input_quant_params_rhs.at(i).data()); + for (size_t j = 0; j < input_quant_params_rhs.at(i).size() * sizeof(schema::QuantParamT); ++j) { + if (params[j] != params_rhs[j]) { + return false; + } + } + } + for (size_t i = 0; i < output_quant_params_rhs.size(); ++i) { + if (output_quant_params_rhs.at(i).size() != this->output_quant_param_.at(i).size()) { + return false; + } + auto *params = reinterpret_cast<const char *>(this->output_quant_param_.at(i).data()); + auto *params_rhs = reinterpret_cast<const char *>(output_quant_params_rhs.at(i).data()); + for (size_t j = 0; j < output_quant_params_rhs.at(i).size() * sizeof(schema::QuantParamT); ++j) { + if (params[j] != params_rhs[j]) { + return false; + } + } + } + } else { + return false; + } + return true; + } + + void set_quant_type(const schema::QuantType &quant_type) { quant_type_ = quant_type; } + + schema::QuantType quant_type() const { return quant_type_; } + + void set_input_quant_params(const QuantParamsVector &input_quant_param) { + this->input_quant_param_ = input_quant_param; + } + + void set_input_quant_param(const size_t &index, const std::vector<schema::QuantParamT> &input_quant_param) { + if (index > this->input_quant_param_.size()) { + std::vector<schema::QuantParamT> place_quant(1); + this->input_quant_param_.insert(this->input_quant_param_.end(), index + 1 - input_quant_param_.size(), + place_quant); + } + this->input_quant_param_.at(index) = input_quant_param; + } + + void set_output_quant_params(const std::vector<std::vector<schema::QuantParamT>> &output_quant_param) { + this->output_quant_param_ = output_quant_param; + } + + void set_output_quant_param(const size_t &index, const std::vector<schema::QuantParamT> &output_quant_param) { + if (index > this->output_quant_param_.size()) { + std::vector<schema::QuantParamT> place_quant(1); + this->output_quant_param_.insert(this->output_quant_param_.end(), index + 1 - output_quant_param_.size(), + place_quant); + } + this->output_quant_param_.at(index) = output_quant_param; + } + + void set_enable_huffman_code(bool enable_huffman_code) { enable_huffman_code_ = enable_huffman_code; } + + bool enable_huffman_code() const { return enable_huffman_code_; } + + void AddInputQuantParam(const std::vector<schema::QuantParamT> &quant_param) { + this->input_quant_param_.emplace_back(quant_param); + } + + std::vector<std::vector<schema::QuantParamT>> input_quant_params() const { return this->input_quant_param_; } + + void AddOutputQuantParam(const std::vector<schema::QuantParamT> &quant_param) { + this->output_quant_param_.emplace_back(quant_param); + } + + std::vector<std::vector<schema::QuantParamT>> output_quant_params() const { return this->output_quant_param_; } + + void ClearInputOutputQuantParam() { + input_quant_param_.clear(); + output_quant_param_.clear(); + } + + bool IsInputQuantParamsInited() { + if (this->input_quant_param_.empty()) { + return false; + } + for (auto &quant_param : this->input_quant_param_) { + if (!quant_param.front().inited) { + return false; + } + } + return true; + } + + bool IsOutputQuantParamsInited() { + if (this->output_quant_param_.empty()) { + return false; + } + for (auto &quant_param : this->output_quant_param_) { + if (!quant_param.front().inited) { + return false; + } + } + return true; + } + + private: + schema::QuantType quant_type_{schema::QuantType_QUANT_NONE}; + QuantParamsVector input_quant_param_; + QuantParamsVector output_quant_param_; + bool enable_huffman_code_ = false; +}; +using QuantParamHolderPtr = std::shared_ptr<QuantParamHolder>; +} // namespace lite +} // namespace mindspore + +#endif // MINDSPORE_LITE_TOOLS_CONVERTER_QUANT_PARAM_CONTEXT_H diff --git a/mindspore/lite/tools/converter/quantizer/bitpacking.h b/mindspore/lite/tools/converter/quantizer/bitpacking.h index 7e80ffa4c7..05413fb86e 100644 --- a/mindspore/lite/tools/converter/quantizer/bitpacking.h +++ b/mindspore/lite/tools/converter/quantizer/bitpacking.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/quantizer/calc_quant_param.cc b/mindspore/lite/tools/converter/quantizer/calc_quant_param.cc index 0218a02520..9161e20480 100644 --- a/mindspore/lite/tools/converter/quantizer/calc_quant_param.cc +++ b/mindspore/lite/tools/converter/quantizer/calc_quant_param.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -61,8 +61,8 @@ STATUS QuantParamCalcer::ComputeConstQuantParam(const schema::TensorT &tensor, Q return quant::CalQuantizationParams(quantParam, min, max); } -// init inTensor quantParam from preNode if possable -// init outTensor quantParam from postNode if possable +// init inTensor quantParam from preNode if possible +// init outTensor quantParam from postNode if possible int QuantParamCalcer::Calc(MetaGraphT *graph, const CNodeT &node) { MS_ASSERT(node.inputIndex.size() > 0); MS_ASSERT(node.quantParam.size() == node.inputIndex.size() + node.outputIndex.size()); @@ -472,7 +472,7 @@ class CalcActivation : public QuantParamCalcer { MS_ASSERT(node.inputIndex.size() == 1); MS_ASSERT(node.outputIndex.size() == 1); MS_ASSERT(node.attr.AsActivation() != nullptr); - if (node.primitive->value.AsActivation()->type == schema::ActivationType_SIGMOID) { + if (node.primitive->value.AsActivation()->activation_type == schema::ActivationType_SIGMOID) { auto calcToSet = CalcToSet(0, 1); return calcToSet.Calc(subGraph, node); } else { @@ -504,21 +504,21 @@ QuantParamCalcRegister::QuantParamCalcRegister() { if (!hasError) { _registerMap[schema::PrimitiveType_Concat] = std::make_shared<CalcConcat>(); _registerMap[schema::PrimitiveType_Activation] = std::make_shared<CalcActivation>(); - _registerMap[schema::PrimitiveType_Add] = std::make_shared<CalcAdd>(); - _registerMap[schema::PrimitiveType_Mul] = commonCalcer; - _registerMap[schema::PrimitiveType_Scale] = std::make_shared<ConvCalcer>(); - _registerMap[schema::PrimitiveType_Conv2D] = std::make_shared<ConvCalcer>(); - _registerMap[schema::PrimitiveType_DeConv2D] = std::make_shared<ConvCalcer>(); - _registerMap[schema::PrimitiveType_DepthwiseConv2D] = std::make_shared<ConvCalcer>(); - _registerMap[schema::PrimitiveType_Pooling] = linearCalcer; + _registerMap[schema::PrimitiveType_AddFusion] = std::make_shared<CalcAdd>(); + _registerMap[schema::PrimitiveType_MulFusion] = commonCalcer; + _registerMap[schema::PrimitiveType_ScaleFusion] = std::make_shared<ConvCalcer>(); + _registerMap[schema::PrimitiveType_Conv2DFusion] = std::make_shared<ConvCalcer>(); + _registerMap[schema::PrimitiveType_Conv2dTransposeFusion] = std::make_shared<ConvCalcer>(); + _registerMap[schema::PrimitiveType_AvgPoolFusion] = linearCalcer; + _registerMap[schema::PrimitiveType_MaxPoolFusion] = linearCalcer; _registerMap[schema::PrimitiveType_Resize] = linearCalcer; _registerMap[schema::PrimitiveType_Reshape] = linearCalcer; _registerMap[schema::PrimitiveType_StridedSlice] = linearCalcer; _registerMap[schema::PrimitiveType_Shape] = linearCalcer; - _registerMap[schema::PrimitiveType_SoftMax] = std::make_shared<CalcToSet>(0, 1); + _registerMap[schema::PrimitiveType_Softmax] = std::make_shared<CalcToSet>(0, 1); _registerMap[schema::PrimitiveType_Squeeze] = linearCalcer; _registerMap[schema::PrimitiveType_RealDiv] = std::make_shared<CalcRealDiv>(); - _registerMap[schema::PrimitiveType_Reduce] = commonCalcer; + _registerMap[schema::PrimitiveType_ReduceFusion] = commonCalcer; _registerMap[schema::PrimitiveType_BiasAdd] = std::make_shared<BiasAddCalcer>(); _registerMap[schema::PrimitiveType_Transpose] = linearCalcer; _registerMap[schema::PrimitiveType_MatMul] = std::make_shared<ConvCalcer>(); diff --git a/mindspore/lite/tools/converter/quantizer/calc_quant_param.h b/mindspore/lite/tools/converter/quantizer/calc_quant_param.h index 1f1704a900..c289a8e964 100644 --- a/mindspore/lite/tools/converter/quantizer/calc_quant_param.h +++ b/mindspore/lite/tools/converter/quantizer/calc_quant_param.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/converter/quantizer/huffman_encode.cc b/mindspore/lite/tools/converter/quantizer/huffman_encode.cc index 4d764a95d1..7d4b984647 100644 --- a/mindspore/lite/tools/converter/quantizer/huffman_encode.cc +++ b/mindspore/lite/tools/converter/quantizer/huffman_encode.cc @@ -15,17 +15,14 @@ */ #include "tools/converter/quantizer/huffman_encode.h" - #include <utility> -#include <iostream> - #include "src/dequant.h" +#include "tools/converter/quantizer/quantize_util.h" namespace mindspore { namespace lite { - -STATUS HuffmanEncode::DoHuffmanEncode(const ParamValueLitePtr &weight, const std::shared_ptr<PrimitiveC> &primitive_c, - void *quant_datas, const size_t &bit_num) { +STATUS HuffmanEncode::DoHuffmanEncode(const ParamValueLitePtr &weight, const PrimitivePtr &primitive, void *quant_datas, + const size_t &bit_num) { if (quant_datas == nullptr) { MS_LOG(ERROR) << "quant data is nullptr"; return RET_ERROR; @@ -63,7 +60,9 @@ STATUS HuffmanEncode::DoHuffmanEncode(const ParamValueLitePtr &weight, const std return RET_MEMORY_FAILED; } weight->SetTensorData(encode_data, ch_size); - primitive_c->set_enable_huffman_code(true); + auto quant_param_holder = quant::GetCNodeQuantHolder(primitive); + MS_ASSERT(quant_param_holder != nullptr); + quant_param_holder->set_enable_huffman_code(true); } huffman_encoded_str_.clear(); huffman_table_.clear(); diff --git a/mindspore/lite/tools/converter/quantizer/huffman_encode.h b/mindspore/lite/tools/converter/quantizer/huffman_encode.h index 250e0fd143..cd0090187c 100644 --- a/mindspore/lite/tools/converter/quantizer/huffman_encode.h +++ b/mindspore/lite/tools/converter/quantizer/huffman_encode.h @@ -24,12 +24,12 @@ #include <queue> #include <map> #include <memory> -#include <fstream> -#include "src/common/log_adapter.h" -#include "src/ops/primitive_c.h" +#include "ir/func_graph.h" +#include "ir/primitive.h" +#include "schema/inner/model_generated.h" #include "securec/include/securec.h" +#include "src/common/log_adapter.h" #include "src/param_value_lite.h" -#include "ir/func_graph.h" namespace mindspore { namespace lite { @@ -58,10 +58,8 @@ class HuffmanEncode { ~HuffmanEncode(); - STATUS GetParamValueLitePtr(const std::shared_ptr<AnfNode> &input_node, ParamValueLitePtr *param_value); - - STATUS DoHuffmanEncode(const ParamValueLitePtr &weight, const std::shared_ptr<PrimitiveC> &primitive_c, - void *quant_datas, const size_t &bit_num); + STATUS DoHuffmanEncode(const ParamValueLitePtr &weight, const PrimitivePtr &primitive, void *quant_datas, + const size_t &bit_num); private: std::map<int, std::string> huffman_table_; diff --git a/mindspore/lite/tools/converter/quantizer/post_training_quantizer.cc b/mindspore/lite/tools/converter/quantizer/post_training_quantizer.cc index 12b673ab13..bc60292b42 100644 --- a/mindspore/lite/tools/converter/quantizer/post_training_quantizer.cc +++ b/mindspore/lite/tools/converter/quantizer/post_training_quantizer.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -29,11 +29,17 @@ #include <thread> #include <vector> #include <fstream> -#include "schema/inner/model_generated.h" +#include "ops/fusion/conv2d_fusion.h" +#include "ops/fusion/conv2d_transpose_fusion.h" +#include "ops/fusion/full_connection.h" +#include "ops/fusion/layer_norm_fusion.h" +#include "ops/gather.h" +#include "ops/tuple_get_item.h" #include "src/tensor.h" #include "tools/anf_exporter/anf_exporter.h" #include "tools/converter/quantizer/quant_cast.h" #include "tools/converter/quantizer/quantize_util.h" +#include "tools/optimizer/common/gllo_utils.h" #include "src/common/log_adapter.h" #include "securec/include/securec.h" #include "tools/common/tensor_util.h" @@ -45,23 +51,94 @@ using std::string; using std::vector; namespace mindspore::lite::quant { -STATUS DivergInfo::RecordMaxValue(const std::vector<float> &datas) { - for (float data : datas) { - max = std::max(data, max); - min = std::min(data, min); +namespace { +STATUS ComputeBiasDataAndQuantParam(const std::vector<double> &bias_scales, const std::vector<double> &input_scales, + const float *raw_datas, const QuantParamHolderPtr &quant_param_holder, + std::vector<schema::QuantParamT> *quant_params, std::vector<int32_t> *quant_datas) { + MS_ASSERT(raw_datas != nullptr && quant_param_holder != nullptr); + MS_ASSERT(quant_params != nullptr && quant_datas != nullptr); + double bias_scale_tmp; + const constexpr int32_t quanted_bias_abs_limit = 0.5 * INT32_MAX; + auto active_weight_quant_params = quant_param_holder->input_quant_params(); + auto shape_size = quant_datas->size(); + if (bias_scales.size() == shape_size) { + for (size_t i = 0; i < shape_size; i++) { + bias_scale_tmp = bias_scales[i]; + if (fabs(bias_scale_tmp) <= 0.0f) { + MS_LOG(ERROR) << "divisor 'bias_scale_tmp' cannot be 0."; + return RET_ERROR; + } + if (std::abs(raw_datas[i] / bias_scale_tmp) >= quanted_bias_abs_limit) { + MS_LOG(DEBUG) << "quanted bias over flow, maybe the scale of weight: " << active_weight_quant_params[1][i].scale + << " is too small, need to update"; + // update filter scale and zp + double activate_scale = input_scales[0]; + double filter_scale = std::abs(raw_datas[i]) / (activate_scale * quanted_bias_abs_limit); + active_weight_quant_params[1][i].scale = filter_scale; + active_weight_quant_params[1][i].zeroPoint = 0; + quant_param_holder->set_input_quant_params(active_weight_quant_params); + bias_scale_tmp = std::abs(raw_datas[i]) / quanted_bias_abs_limit; + quant_params->at(i).scale = bias_scale_tmp; + MS_LOG(DEBUG) << "new filter scale: " << filter_scale; + } + auto quant_data = (int32_t)std::round(raw_datas[i] / bias_scale_tmp); + quant_datas->at(i) = quant_data; + } + return RET_OK; + } else if (bias_scales.size() == 1) { + // for fc, per tensor quant + bias_scale_tmp = quant_params->front().scale; + float max_raw_data = 0.0f; + for (size_t i = 0; i < shape_size; i++) { + if (std::abs(raw_datas[i]) > max_raw_data) { + max_raw_data = std::abs(raw_datas[i]); + } + } + if (fabs(bias_scale_tmp) <= 0.0f) { + MS_LOG(ERROR) << "divisor 'bias_scale_tmp' cannot be 0."; + return RET_ERROR; + } + if (std::abs(max_raw_data / bias_scale_tmp) >= quanted_bias_abs_limit) { + MS_LOG(DEBUG) << "quanted bias over flow, maybe the scale of weight: " << active_weight_quant_params[1][0].scale + << " is too small, need to update"; + double activate_scale = input_scales[0]; + double filter_scale = std::abs(max_raw_data) / (activate_scale * quanted_bias_abs_limit); + active_weight_quant_params[1][0].scale = filter_scale; + active_weight_quant_params[1][0].zeroPoint = 0; + quant_param_holder->set_input_quant_params(active_weight_quant_params); + bias_scale_tmp = max_raw_data / quanted_bias_abs_limit; + quant_params->front().scale = bias_scale_tmp; + MS_LOG(DEBUG) << "new filter scale: " << filter_scale; + } + for (size_t i = 0; i < shape_size; i++) { + auto quant_data = (int32_t)std::round(raw_datas[i] / bias_scale_tmp); + quant_datas->at(i) = quant_data; + } + return RET_OK; + } + MS_LOG(ERROR) << "unexpected input_scales size: " << input_scales.size() + << " weight_scales size: " << active_weight_quant_params[1].size(); + return RET_ERROR; +} +} // namespace + +STATUS DivergInfo::RecordMaxValue(const std::vector<float> &data) { + for (float val : data) { + max = std::max(val, max); + min = std::min(val, min); } return RET_OK; } -STATUS DivergInfo::RecordMaxValueArray(const std::vector<float> &datas) { - if (datas.empty()) { +STATUS DivergInfo::RecordMaxValueArray(const std::vector<float> &data) { + if (data.empty()) { return RET_ERROR; } - float max_num = datas.at(0); - float min_num = datas.at(0); - for (float data : datas) { - max_num = std::max(data, max_num); - min_num = std::min(data, min_num); + float max_num = data.at(0); + float min_num = data.at(0); + for (float val : data) { + max_num = std::max(val, max_num); + min_num = std::min(val, min_num); } this->max_datas.emplace_back(max_num); this->min_datas.emplace_back(min_num); @@ -97,6 +174,71 @@ void DivergInfo::DumpHistogram() { std::cout << std::endl; } +void DivergInfo::HandleBinForKL(int quant_bint_nums, int bin_index, std::vector<float> *quantized_histogram, + std::vector<float> *expanded_histogram) { + MS_ASSERT(quantized_histogram != nullptr && expanded_histogram != nullptr); + const float bin_interval = static_cast<float>(bin_index) / static_cast<float>(quant_bint_nums); + // merge i bins to target bins + for (int j = 0; j < quant_bint_nums; ++j) { + const float start = j * bin_interval; + const float end = start + bin_interval; + const int left_upper = static_cast<int>(std::ceil(start)); + if (left_upper > start) { + const double left_scale = left_upper - start; + quantized_histogram->at(j) += left_scale * this->histogram[left_upper - 1]; + } + const int right_lower = static_cast<int>(std::floor(end)); + if (right_lower < end) { + const double right_scale = end - right_lower; + quantized_histogram->at(j) += right_scale * this->histogram[right_lower]; + } + std::for_each(this->histogram.begin() + left_upper, this->histogram.begin() + right_lower, + [&quantized_histogram, j](float item) { quantized_histogram->at(j) += item; }); + } + // expand target bins to i bins in order to calculate KL with reference_histogram + for (int j = 0; j < quant_bint_nums; ++j) { + const float start = j * bin_interval; + const float end = start + bin_interval; + float count = 0; + const int left_upper = static_cast<int>(std::ceil(start)); + float left_scale = 0.0f; + if (left_upper > start) { + left_scale = left_upper - start; + if (this->histogram[left_upper - 1] != 0) { + count += left_scale; + } + } + const int right_lower = static_cast<int>(std::floor(end)); + double right_scale = 0.0f; + if (right_lower < end) { + right_scale = end - right_lower; + if (this->histogram[right_lower] != 0) { + count += right_scale; + } + } + std::for_each(this->histogram.begin() + left_upper, this->histogram.begin() + right_lower, [&count](float item) { + if (item != 0) { + count += 1; + } + }); + if (count == 0) { + continue; + } + const float average_num = quantized_histogram->at(j) / count; + if (left_upper > start && this->histogram[left_upper - 1] != 0) { + expanded_histogram->at(left_upper - 1) += average_num * left_scale; + } + if (right_lower < end && this->histogram[right_lower] != 0) { + expanded_histogram->at(right_lower) += average_num * right_scale; + } + for (int k = left_upper; k < right_lower; ++k) { + if (this->histogram[k] != 0) { + expanded_histogram->at(k) += average_num; + } + } + } +} + STATUS DivergInfo::ComputeThreshold() { if (method_x == kMethodMaxMin) { this->best_T = std::max(fabs(this->max), fabs(this->min)); @@ -121,66 +263,8 @@ STATUS DivergInfo::ComputeThreshold() { std::vector<float> expanded_histogram(i, 0); reference_histogram[i - 1] += after_threshold_sum; after_threshold_sum -= this->histogram[i]; - const float bin_interval = static_cast<float>(i) / static_cast<float>(quant_bint_nums); - // merge i bins to target bins - for (int j = 0; j < quant_bint_nums; ++j) { - const float start = j * bin_interval; - const float end = start + bin_interval; - const int left_upper = static_cast<int>(std::ceil(start)); - if (left_upper > start) { - const double left_scale = left_upper - start; - quantized_histogram[j] += left_scale * this->histogram[left_upper - 1]; - } - const int right_lower = static_cast<int>(std::floor(end)); - if (right_lower < end) { - const double right_scale = end - right_lower; - quantized_histogram[j] += right_scale * this->histogram[right_lower]; - } - std::for_each(this->histogram.begin() + left_upper, this->histogram.begin() + right_lower, - [&quantized_histogram, j](float item) { quantized_histogram[j] += item; }); - } - // expand target bins to i bins in order to calculate KL with reference_histogram - for (int j = 0; j < quant_bint_nums; ++j) { - const float start = j * bin_interval; - const float end = start + bin_interval; - float count = 0; - const int left_upper = static_cast<int>(std::ceil(start)); - float left_scale = 0.0f; - if (left_upper > start) { - left_scale = left_upper - start; - if (this->histogram[left_upper - 1] != 0) { - count += left_scale; - } - } - const int right_lower = static_cast<int>(std::floor(end)); - double right_scale = 0.0f; - if (right_lower < end) { - right_scale = end - right_lower; - if (this->histogram[right_lower] != 0) { - count += right_scale; - } - } - std::for_each(this->histogram.begin() + left_upper, this->histogram.begin() + right_lower, [&count](float item) { - if (item != 0) { - count += 1; - } - }); - if (count == 0) { - continue; - } - const float average_num = quantized_histogram[j] / count; - if (left_upper > start && this->histogram[left_upper - 1] != 0) { - expanded_histogram[left_upper - 1] += average_num * left_scale; - } - if (right_lower < end && this->histogram[right_lower] != 0) { - expanded_histogram[right_lower] += average_num * right_scale; - } - for (int k = left_upper; k < right_lower; ++k) { - if (this->histogram[k] != 0) { - expanded_histogram[k] += average_num; - } - } - } + // handle bins for computing KL. + HandleBinForKL(quant_bint_nums, i, &quantized_histogram, &expanded_histogram); auto KLDivergence = [](std::vector<float> p, std::vector<float> q) { auto sum = 0.0f; std::for_each(p.begin(), p.end(), [&sum](float item) { sum += item; }); @@ -237,7 +321,7 @@ std::pair<CNodePtr, int32_t> DivergInfo::GetZeropoint() { } else if (quant_min == -127 && quant_max == 127) { zero_point = 0; } else { - MS_LOG(WARNING) << "unexpectd quant range, quant_min: " << quant_min << " quant_max: " << quant_max; + MS_LOG(WARNING) << "unexpected quant range, quant_min: " << quant_min << " quant_max: " << quant_max; } if (this->method_x == kMethodOutlier) { MS_ASSERT(fabs(scale_tmp) <= 0.0f); @@ -330,7 +414,7 @@ STATUS Calibrator::ComputeThreshold() { for (const auto &output_diverg_info : outputs_diverg_info.second) { auto output_diverg_cnode = output_diverg_info->cnode; if (output_diverg_cnode == input_cnode) { - if (NodePrimitiveType(input_cnode) != schema::PrimitiveType_TupleGetItem) { + if (NodePrimitiveType(input_cnode) != ops::kNameTupleGetItem) { *(input_infos[i]) = *output_diverg_info; input_infos[i]->cnode = cnode; already_computed = true; @@ -413,7 +497,7 @@ PostTrainingQuantizer::PostTrainingQuantizer(FuncGraphPtr graph, string path, in } calibrator_ = std::make_unique<Calibrator>(std::move(path), this->bit_num, quant_max, quant_min); if (calibrator_ == nullptr) { - MS_LOG(ERROR) << "creat calibrator failed!"; + MS_LOG(ERROR) << "create calibrator failed!"; return; } } @@ -426,9 +510,11 @@ PostTrainingQuantizer::~PostTrainingQuantizer() { } STATUS PostTrainingQuantizer::DoQuantInput(double scale, int32_t zeropoint, struct MaxMin *max_min, - const std::shared_ptr<PrimitiveC> &lite_primitive) const { + const PrimitivePtr &primitive) const { MS_ASSERT(max_min != nullptr); - MS_ASSERT(lite_primitive != nullptr); + MS_ASSERT(primitive != nullptr); + auto quant_param_holder = GetCNodeQuantHolder(primitive); + MS_ASSERT(quant_param_holder != nullptr); schema::QuantParamT quant_param; quant_param.scale = scale; quant_param.zeroPoint = zeropoint; @@ -440,14 +526,16 @@ STATUS PostTrainingQuantizer::DoQuantInput(double scale, int32_t zeropoint, stru quant_param.roundType = 1; quant_param.multiplier = 1; std::vector<schema::QuantParamT> quant_params = {quant_param}; - lite_primitive->AddInputQuantParam(quant_params); + quant_param_holder->AddInputQuantParam(quant_params); return RET_OK; } STATUS PostTrainingQuantizer::DoQuantOutput(double scale, int zeropoint, struct MaxMin *max_min, - const std::shared_ptr<PrimitiveC> &lite_primitive) const { + const PrimitivePtr &primitive) const { MS_ASSERT(max_min != nullptr); - MS_ASSERT(lite_primitive != nullptr); + MS_ASSERT(primitive != nullptr); + auto quant_param_holder = GetCNodeQuantHolder(primitive); + MS_ASSERT(quant_param_holder != nullptr); schema::QuantParamT quant_param; quant_param.scale = scale; quant_param.zeroPoint = zeropoint; @@ -459,14 +547,14 @@ STATUS PostTrainingQuantizer::DoQuantOutput(double scale, int zeropoint, struct quant_param.roundType = 1; quant_param.multiplier = 1; std::vector<schema::QuantParamT> quant_params = {quant_param}; - lite_primitive->AddOutputQuantParam(quant_params); + quant_param_holder->AddOutputQuantParam(quant_params); return RET_OK; } STATUS PostTrainingQuantizer::DoWeightQuant(const std::string &op_name, const AnfNodePtr &weight, - std::shared_ptr<PrimitiveC> primitive_c, bool perchanel) const { + const PrimitivePtr &primitive, bool perchanel) const { MS_ASSERT(weight != nullptr); - MS_ASSERT(lite_primitive != nullptr); + MS_ASSERT(primitive != nullptr); // perlayer if (!weight->isa<Parameter>()) { MS_LOG(ERROR) << "not a parameter"; @@ -495,8 +583,8 @@ STATUS PostTrainingQuantizer::DoWeightQuant(const std::string &op_name, const An quant_min_t = -(1 << (unsigned int)(bit_num_t - 1)); } } - auto status = QuantFilter<int8_t>(paramValue, std::move(primitive_c), QuantType_PostTraining, quant_max_t, - quant_min_t, bit_num_t, perchanel); + auto status = + QuantFilter<int8_t>(paramValue, primitive, QuantType_PostTraining, quant_max_t, quant_min_t, bit_num_t, perchanel); if (status != RET_OK) { MS_LOG(ERROR) << "QuantFilter failed: " << status; return status; @@ -520,8 +608,8 @@ STATUS PostTrainingQuantizer::DoWeightQuant(const std::string &op_name, const An return RET_OK; } -STATUS PostTrainingQuantizer::DoBiasQuant(const AnfNodePtr &bias, const std::shared_ptr<PrimitiveC> &primitive_c) { - if (primitive_c == nullptr || bias == nullptr) { +STATUS PostTrainingQuantizer::DoBiasQuant(const AnfNodePtr &bias, const PrimitivePtr &primitive) { + if (primitive == nullptr || bias == nullptr) { MS_LOG(ERROR) << "null pointer!"; return RET_NULL_PTR; } @@ -530,7 +618,9 @@ STATUS PostTrainingQuantizer::DoBiasQuant(const AnfNodePtr &bias, const std::sha auto bias_default_param = bias_parameter_ptr->default_param(); auto bias_param = std::dynamic_pointer_cast<ParamValueLite>(bias_default_param); MS_ASSERT(bias_parameter_ptr != nullptr); - auto active_weight_quant_params = primitive_c->input_quant_params(); + auto quant_param_holder = GetCNodeQuantHolder(primitive); + MS_ASSERT(quant_param_holder != nullptr); + auto active_weight_quant_params = quant_param_holder->input_quant_params(); if (active_weight_quant_params.size() != 2) { MS_LOG(ERROR) << "unexpected active_weight_quant_params size: " << active_weight_quant_params.size(); return RET_ERROR; @@ -566,7 +656,7 @@ STATUS PostTrainingQuantizer::DoBiasQuant(const AnfNodePtr &bias, const std::sha size_t shape_size = bias_param->tensor_shape_size(); // set bias quant param - vector<schema::QuantParamT> quant_params; + std::vector<schema::QuantParamT> quant_params; for (double bias_scale : bias_scales) { schema::QuantParamT quant_param; quant_param.scale = bias_scale; @@ -578,68 +668,12 @@ STATUS PostTrainingQuantizer::DoBiasQuant(const AnfNodePtr &bias, const std::sha std::vector<int32_t> quant_datas(shape_size); auto *raw_datas = static_cast<float *>(bias_param->tensor_addr()); - double bias_scale_tmp; - const constexpr int32_t quanted_bias_abs_limit = 0.5 * INT32_MAX; - - if (bias_scales.size() == shape_size) { - for (size_t i = 0; i < shape_size; i++) { - bias_scale_tmp = bias_scales[i]; - if (fabs(bias_scale_tmp) <= 0.0f) { - MS_LOG(ERROR) << "divisor 'bias_scale_tmp' cannot be 0."; - return RET_ERROR; - } - if (std::abs(raw_datas[i] / bias_scale_tmp) >= quanted_bias_abs_limit) { - MS_LOG(DEBUG) << "quanted bias over flow, maybe the scale of weight: " << active_weight_quant_params[1][i].scale - << " is too small, need to update"; - // update filter scale and zp - double activate_scale = input_scales[0]; - double filter_scale = std::abs(raw_datas[i]) / (activate_scale * quanted_bias_abs_limit); - active_weight_quant_params[1][i].scale = filter_scale; - active_weight_quant_params[1][i].zeroPoint = 0; - primitive_c->set_input_quant_params(active_weight_quant_params); - bias_scale_tmp = std::abs(raw_datas[i]) / quanted_bias_abs_limit; - quant_params[i].scale = bias_scale_tmp; - MS_LOG(DEBUG) << "new filter scale: " << filter_scale; - } - auto quant_data = (int32_t)std::round(raw_datas[i] / bias_scale_tmp); - quant_datas[i] = quant_data; - } - } else if (bias_scales.size() == 1) { - // for fc, per tensor quant - bias_scale_tmp = quant_params[0].scale; - float max_raw_data = 0.0f; - for (size_t i = 0; i < shape_size; i++) { - if (std::abs(raw_datas[i]) > max_raw_data) { - max_raw_data = std::abs(raw_datas[i]); - } - } - if (fabs(bias_scale_tmp) <= 0.0f) { - MS_LOG(ERROR) << "divisor 'bias_scale_tmp' cannot be 0."; - return RET_ERROR; - } - if (std::abs(max_raw_data / bias_scale_tmp) >= quanted_bias_abs_limit) { - MS_LOG(DEBUG) << "quanted bias over flow, maybe the scale of weight: " << active_weight_quant_params[1][0].scale - << " is too small, need to update"; - double activate_scale = input_scales[0]; - double filter_scale = std::abs(max_raw_data) / (activate_scale * quanted_bias_abs_limit); - active_weight_quant_params[1][0].scale = filter_scale; - active_weight_quant_params[1][0].zeroPoint = 0; - primitive_c->set_input_quant_params(active_weight_quant_params); - bias_scale_tmp = max_raw_data / quanted_bias_abs_limit; - quant_params[0].scale = bias_scale_tmp; - MS_LOG(DEBUG) << "new filter scale: " << filter_scale; - } - for (size_t i = 0; i < shape_size; i++) { - auto quant_data = (int32_t)std::round(raw_datas[i] / bias_scale_tmp); - quant_datas[i] = quant_data; - } - } else { - MS_LOG(ERROR) << "unexpected input_scales size: " << input_scales.size() - << " weight_scales size: " << active_weight_quant_params[1].size(); + if (ComputeBiasDataAndQuantParam(bias_scales, input_scales, raw_datas, quant_param_holder, &quant_params, + &quant_datas) != RET_OK) { + MS_LOG(ERROR) << "compute bias data failed."; return RET_ERROR; } - - primitive_c->AddInputQuantParam(quant_params); + quant_param_holder->AddInputQuantParam(quant_params); auto ret = memcpy_s(bias_param->tensor_addr(), bias_param->tensor_size(), quant_datas.data(), shape_size * sizeof(int32_t)); if (ret != EOK) { @@ -665,6 +699,87 @@ STATUS PostTrainingQuantizer::DoBiasQuant(const AnfNodePtr &bias, const std::sha return RET_OK; } +STATUS PostTrainingQuantizer::QuantNodeSimpleOp(const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + auto inputs_diverg_info = calibrator_->GetInputDivergInfo(); + auto primitive = GetValueNode<PrimitivePtr>(cnode->input(0)); + if (primitive == nullptr) { + return RET_ERROR; + } + auto op_name = cnode->fullname_with_scope(); + auto primitive_quant_holder = GetCNodeQuantHolder(primitive); + MS_ASSERT(primitive_quant_holder != nullptr); + for (size_t i = 1; i < cnode->inputs().size(); i++) { + auto input_node = cnode->input(i); + MS_ASSERT(input_node != nullptr); + bool is_graph_input = false; + if (input_node->isa<Parameter>()) { + if (!input_node->cast<ParameterPtr>()->has_default()) { + is_graph_input = true; + } + } + if (input_node->isa<mindspore::CNode>()) { + if (primitive->name() == ops::kNameGather) { + continue; + } + auto input_cnode = std::dynamic_pointer_cast<mindspore::CNode>(input_node); + auto input_cnode_primitive = GetValueNode<PrimitivePtr>(input_cnode->input(0)); + if (input_cnode_primitive == nullptr) { + MS_LOG(DEBUG) << "input: " << i << " " << input_cnode->fullname_with_scope() << ": " + << " Primitive is null"; + continue; + } + auto input_primitive_quant_holder = GetCNodeQuantHolder(input_cnode_primitive); + MS_ASSERT(input_primitive_quant_holder != nullptr); + if (input_primitive_quant_holder->IsOutputQuantParamsInited()) { + auto quant_param = input_primitive_quant_holder->output_quant_params().front(); + primitive_quant_holder->AddInputQuantParam(quant_param); + } else { + // do input quant + auto &info = (*inputs_diverg_info)[op_name][i - 1]; + auto input_scale = info->GetScale().second; + auto input_zp = info->GetZeropoint().second; + struct MaxMin input_min_max {}; + input_min_max.max = info->max; + input_min_max.min = info->min; + DoQuantInput(input_scale, input_zp, &input_min_max, primitive); + } + } else if (is_graph_input) { + auto &info = (*inputs_diverg_info)[op_name][i - 1]; + auto input_scale = info->GetScale().second; + auto input_zp = info->GetZeropoint().second; + struct MaxMin input_min_max {}; + input_min_max.max = info->max; + input_min_max.min = info->min; + DoQuantInput(input_scale, input_zp, &input_min_max, primitive); + } else { + MS_LOG(DEBUG) << "node: " << op_name << " input " << i << " not a cnode"; + // get dtype + auto abstractBase = input_node->abstract(); + if (abstractBase == nullptr) { + MS_LOG(ERROR) << "Abstract of parameter is nullptr, " << input_node->fullname_with_scope(); + return RET_ERROR; + } + if (!utils::isa<abstract::AbstractTensorPtr>(abstractBase)) { + MS_LOG(ERROR) << "Abstract of parameter should be anstract tensor, " << input_node->fullname_with_scope(); + return RET_ERROR; + } + auto abstractTensor = utils::cast<abstract::AbstractTensorPtr>(abstractBase); + if (abstractTensor == nullptr || abstractTensor->element() == nullptr) { + MS_LOG(ERROR) << "abstractTensor is nullptr, " << input_node->fullname_with_scope(); + return RET_NULL_PTR; + } + if (abstractTensor->element()->GetTypeTrack()->type_id() == kNumberTypeFloat32) { + MS_LOG(DEBUG) << "this parameter do quant"; + DoWeightQuant(op_name, input_node, primitive, false); + } else { + MS_LOG(DEBUG) << "this parameter no need to do quant"; + } + } + } + return RET_OK; +} + STATUS PostTrainingQuantizer::QuantNode() { auto inputs_diverg_info = calibrator_->GetInputDivergInfo(); auto outputs_diverg_info = calibrator_->GetOutputDivergInfo(); @@ -672,117 +787,53 @@ STATUS PostTrainingQuantizer::QuantNode() { auto cnodes = funcGraph->GetOrderedCnodes(); for (auto &cnode : cnodes) { auto op_name = cnode->fullname_with_scope(); - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - if (primitive_c == nullptr) { - MS_LOG(ERROR) << "primitive_c is nullptr"; + auto primitive = GetValueNode<PrimitivePtr>(cnode->input(0)); + if (primitive == nullptr) { + MS_LOG(ERROR) << "primitive is nullptr"; continue; } + auto primitive_quant_holder = GetCNodeQuantHolder(primitive); + MS_ASSERT(primitive_quant_holder != nullptr); if (inputs_diverg_info->find(op_name) == inputs_diverg_info->end()) { MS_LOG(INFO) << op_name << " can not do quant"; - primitive_c->set_quant_type(schema::QuantType_QUANT_NONE); + primitive_quant_holder->set_quant_type(schema::QuantType_QUANT_NONE); continue; } - auto op_type = (schema::PrimitiveType)primitive_c->Type(); + auto op_type = primitive->name(); MS_LOG(DEBUG) << "OpName: " << op_name; - if (op_type == PrimitiveType_TupleGetItem) { + if (op_type == ops::kNameTupleGetItem) { auto index_node = cnode->input(2); auto index_value_node = std::dynamic_pointer_cast<mindspore::ValueNode>(index_node); if (index_value_node == nullptr) { MS_LOG(WARNING) << "index value node is null"; continue; } - size_t index = CastToInt(index_value_node->value()).front(); + size_t index = opt::CastToInt(index_value_node->value()).front(); auto input_node = cnode->input(1); MS_ASSERT(input_node != nullptr); auto input_cnode = std::dynamic_pointer_cast<mindspore::CNode>(input_node); MS_ASSERT(input_cnode != nullptr); - auto input_cnode_primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(input_cnode->input(0)); - if (input_cnode_primitive_c == nullptr) { - MS_LOG(WARNING) << "input_cnode_primitive_c is null"; + auto input_cnode_primitive = GetValueNode<PrimitivePtr>(input_cnode->input(0)); + if (input_cnode_primitive == nullptr) { + MS_LOG(WARNING) << "input_cnode_primitive is null"; continue; } - if (input_cnode_primitive_c->output_quant_params().size() > index) { - auto quant_param = input_cnode_primitive_c->output_quant_params()[index]; - primitive_c->AddInputQuantParam(quant_param); - primitive_c->AddOutputQuantParam(quant_param); + auto input_primitive_quant_holder = GetCNodeQuantHolder(input_cnode_primitive); + MS_ASSERT(input_primitive_quant_holder != nullptr); + if (input_primitive_quant_holder->output_quant_params().size() > index) { + auto quant_param = input_primitive_quant_holder->output_quant_params()[index]; + primitive_quant_holder->AddInputQuantParam(quant_param); + primitive_quant_holder->AddOutputQuantParam(quant_param); } else { MS_LOG(WARNING) << "this TupleGetItem node's input node: " << input_cnode->fullname_with_scope() - << "'s output quant_params size: " << input_cnode_primitive_c->output_quant_params().size() + << "'s output quant_params size: " << input_primitive_quant_holder->output_quant_params().size() << ", but index: " << index; } - primitive_c->set_quant_type(schema::QuantType_PostTraining); + primitive_quant_holder->set_quant_type(schema::QuantType_PostTraining); continue; - } else if (op_type != PrimitiveType_Conv2D && op_type != PrimitiveType_DepthwiseConv2D && - op_type != PrimitiveType_DeConv2D && op_type != PrimitiveType_DeDepthwiseConv2D && - op_type != PrimitiveType_FullConnection && op_type != PrimitiveType_LayerNorm) { - for (size_t i = 1; i < cnode->inputs().size(); i++) { - auto input_node = cnode->input(i); - MS_ASSERT(input_node != nullptr); - bool is_graph_input = false; - if (input_node->isa<Parameter>()) { - if (!input_node->cast<ParameterPtr>()->has_default()) { - is_graph_input = true; - } - } - if (input_node->isa<mindspore::CNode>()) { - if (op_type == PrimitiveType_Gather) { - continue; - } - auto input_cnode = std::dynamic_pointer_cast<mindspore::CNode>(input_node); - auto input_cnode_primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(input_cnode->input(0)); - if (input_cnode_primitive_c == nullptr) { - MS_LOG(DEBUG) << "input: " << i << " " << input_cnode->fullname_with_scope() << ": " - << " PrimitiveC is null"; - continue; - } - if (input_cnode_primitive_c->IsOutputQuantParamsInited()) { - auto quant_param = input_cnode_primitive_c->output_quant_params().front(); - primitive_c->AddInputQuantParam(quant_param); - } else { - // do input quant - auto &info = (*inputs_diverg_info)[op_name][i - 1]; - auto input_scale = info->GetScale().second; - auto input_zp = info->GetZeropoint().second; - struct MaxMin input_min_max {}; - input_min_max.max = info->max; - input_min_max.min = info->min; - DoQuantInput(input_scale, input_zp, &input_min_max, primitive_c); - } - } else if (is_graph_input) { - auto &info = (*inputs_diverg_info)[op_name][i - 1]; - auto input_scale = info->GetScale().second; - auto input_zp = info->GetZeropoint().second; - struct MaxMin input_min_max {}; - input_min_max.max = info->max; - input_min_max.min = info->min; - DoQuantInput(input_scale, input_zp, &input_min_max, primitive_c); - } else { - MS_LOG(DEBUG) << "node: " << op_name << " input " << i << " not a cnode"; - // get dtype - auto abstractBase = input_node->abstract(); - if (abstractBase == nullptr) { - MS_LOG(ERROR) << "Abstract of parameter is nullptr, " << input_node->fullname_with_scope(); - return RET_ERROR; - } - if (!utils::isa<abstract::AbstractTensorPtr>(abstractBase)) { - MS_LOG(ERROR) << "Abstract of parameter should be anstract tensor, " << input_node->fullname_with_scope(); - return RET_ERROR; - } - auto abstractTensor = utils::cast<abstract::AbstractTensorPtr>(abstractBase); - if (abstractTensor == nullptr || abstractTensor->element() == nullptr) { - MS_LOG(ERROR) << "abstractTensor is nullptr, " << input_node->fullname_with_scope(); - return RET_NULL_PTR; - } - if (abstractTensor->element()->GetTypeTrack()->type_id() == kNumberTypeFloat32) { - MS_LOG(DEBUG) << "this parameter do quant"; - DoWeightQuant(op_name, input_node, primitive_c, false); - } else { - MS_LOG(DEBUG) << "this parameter no need to do quant"; - } - } - } - } else { + } else if (op_type == ops::kNameConv2DFusion || op_type == ops::kNameConv2dTransposeFusion || + op_type == ops::kNameFullConnection || op_type == ops::kNameLayerNormFusion) { // do input quant auto &info = (*inputs_diverg_info)[op_name][0]; auto input_scale = info->GetScale().second; @@ -790,19 +841,24 @@ STATUS PostTrainingQuantizer::QuantNode() { struct MaxMin input_min_max {}; input_min_max.max = info->max; input_min_max.min = info->min; - DoQuantInput(input_scale, input_zp, &input_min_max, primitive_c); + DoQuantInput(input_scale, input_zp, &input_min_max, primitive); // do weight quant auto weight = cnode->input(2); bool perchannel = false; - if (op_type == PrimitiveType_Conv2D || op_type == PrimitiveType_DepthwiseConv2D || - op_type == PrimitiveType_FullConnection) { + if (op_type == ops::kNameConv2DFusion || op_type == ops::kNameFullConnection) { perchannel = true; } - DoWeightQuant(op_name, weight, primitive_c, perchannel); + DoWeightQuant(op_name, weight, primitive, perchannel); // do bias quant if (cnode->inputs().size() == 4) { auto bias = cnode->input(3); - DoBiasQuant(bias, primitive_c); + DoBiasQuant(bias, primitive); + } + } else { // do simple op quant + auto status = QuantNodeSimpleOp(cnode); + if (status != RET_OK) { + MS_LOG(ERROR) << "simple op quant failed."; + return status; } } // do output quant, there may multi-output @@ -814,8 +870,8 @@ STATUS PostTrainingQuantizer::QuantNode() { output_min_max.max = info->max; output_min_max.min = info->min; - DoQuantOutput(output_scale, output_zp, &output_min_max, primitive_c); - primitive_c->set_quant_type(schema::QuantType_PostTraining); + DoQuantOutput(output_scale, output_zp, &output_min_max, primitive); + primitive_quant_holder->set_quant_type(schema::QuantType_PostTraining); } } return RET_OK; @@ -856,12 +912,14 @@ STATUS PostTrainingQuantizer::PreProcess() { if (strategy.CanOpPostQuantized(anf)) { calibrator_->AddQuantizedOp(cnode); } - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - if (primitive_c == nullptr) { + auto primitive = GetValueNode<PrimitivePtr>(cnode->input(0)); + if (primitive == nullptr) { MS_LOG(ERROR) << cnode->fullname_with_scope() << " primitive is null"; continue; } - primitive_c->ClearInputOutputQuantParam(); + auto quant_param_holder = GetCNodeQuantHolder(primitive); + MS_ASSERT(quant_param_holder != nullptr); + quant_param_holder->ClearInputOutputQuantParam(); } return RET_OK; } @@ -989,125 +1047,11 @@ STATUS PostTrainingQuantizer::Int8Inference() { } for (size_t i = 0; i < calibrator_->GetBatchNum(); i++) { - KernelCallBack beforeCallBack = [this](const std::vector<mindspore::tensor::MSTensor *> &beforeInputs, - const std::vector<mindspore::tensor::MSTensor *> &beforeOutputs, - const CallBackParam &callParam) -> bool { - if (callParam.node_type == kTypeConv2D || callParam.node_type == kTypeDepthwiseConv2D) { - vector<float> fp32_op_input; - while (!OpInputDataHandle(FETCH, callParam.node_name, &fp32_op_input)) { - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - } - auto tensor = beforeInputs[0]; - MS_ASSERT(tensor != nullptr); - auto lite_tensor = dynamic_cast<mindspore::lite::Tensor *>(tensor); - MS_ASSERT(lite_tensor != nullptr); - if (tensor->data_type() != kNumberTypeInt8) { - MS_LOG(ERROR) << "unexpected tensor type: " << tensor->data_type(); - return false; - } - // do quantization: activation is always per layer quantized - std::vector<int8_t> quant_datas; - auto quant_params = lite_tensor->quant_params(); - if (quant_params.size() != 1) { - MS_LOG(ERROR) << "unexpected quant_params size: " << quant_params.size(); - return false; - } - schema::QuantParamT quant_param_t; - quant_param_t.scale = quant_params[0].scale; - quant_param_t.zeroPoint = quant_params[0].zeroPoint; - for (auto float_data : fp32_op_input) { - auto quant_data = QuantizeData<int8_t>(float_data, quant_param_t, quant_max, quant_min); - quant_datas.push_back(quant_data); - } - - if (tensor->Size() != quant_datas.size() * sizeof(int8_t)) { - MS_LOG(ERROR) << "unexpected tensor size: " << quant_datas.size() - << " not the same with: " << quant_datas.size() * sizeof(int8_t); - return false; - } - - auto ret = - memcpy_s(tensor->MutableData(), tensor->Size(), quant_datas.data(), quant_datas.size() * sizeof(int8_t)); - if (ret != EOK) { - MS_LOG(ERROR) << "memcpy error: " << ret; - return false; - } - } - return true; - }; - // func - KernelCallBack afterCallBack = [this](const std::vector<mindspore::tensor::MSTensor *> &afterInputs, - const std::vector<mindspore::tensor::MSTensor *> &afterOutputs, - const CallBackParam &callParam) -> bool { - if (callParam.node_type == kTypeConv2D || callParam.node_type == kTypeDepthwiseConv2D) { - vector<float> fp32_op_output_ch_mean; - while (!OpOutputChMeanDataHandle(FETCH, callParam.node_name, &fp32_op_output_ch_mean)) { - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - } - auto tensor = afterOutputs[0]; - MS_ASSERT(tensor != nullptr); - auto lite_tensor = dynamic_cast<mindspore::lite::Tensor *>(tensor); - MS_ASSERT(lite_tensor != nullptr); - if (tensor->data_type() != kNumberTypeInt8) { - MS_LOG(ERROR) << "unexpected tensor type: " << tensor->data_type(); - return false; - } - const int8_t *tensor_data = static_cast<int8_t *>(tensor->MutableData()); - size_t elem_count = tensor->ElementsNum(); - auto shapes = tensor->shape(); - if (shapes.size() != 4) { - MS_LOG(ERROR) << "unexpected shape size: " << shapes.size(); - return false; - } - // suppose the the format is NHWC - auto channels = shapes[3]; - if (channels == 0) { - MS_LOG(ERROR) << "unexpected channels: 0"; - return false; - } - auto quant_params = lite_tensor->quant_params(); - if (quant_params.size() != 1) { - MS_LOG(ERROR) << "unexpected activatation quant_params size: " << quant_params.size(); - return false; - } - auto scale = quant_params[0].scale; - auto zp = quant_params[0].zeroPoint; - - std::vector<float> dequant_op_output_ch_mean(channels); - auto one_filter_size = elem_count / channels; - for (int i = 0; i < channels; i++) { - float sum = 0; - for (size_t j = 0; j < one_filter_size; j++) { - auto index = j * channels + i; - if (index >= elem_count) { - MS_LOG(ERROR) << "over flow!"; - return RET_ERROR; - } - // deuqant activation - auto float_data = scale * (tensor_data[index] - zp); - sum += float_data; - } - if (one_filter_size == 0) { - MS_LOG(ERROR) << "divisor 'one_filter_size' cannot be 0."; - return RET_ERROR; - } - sum = sum / one_filter_size; - dequant_op_output_ch_mean[i] = sum; - } - std::transform(fp32_op_output_ch_mean.begin(), fp32_op_output_ch_mean.end(), dequant_op_output_ch_mean.begin(), - dequant_op_output_ch_mean.begin(), std::minus<>()); - - if (op_bias_diff_map.find(callParam.node_name) != op_bias_diff_map.end()) { - auto &bias_diff = op_bias_diff_map[callParam.node_name]; - std::transform(bias_diff.begin(), bias_diff.end(), dequant_op_output_ch_mean.begin(), bias_diff.begin(), - std::plus<>()); - } else { - op_bias_diff_map[callParam.node_name] = dequant_op_output_ch_mean; - } - } - return true; - }; - auto ret = int8_session_->RunGraph(beforeCallBack, afterCallBack); + // before func + KernelCallBack before_call_back = GetBeforeCallBack(true); + // after func + KernelCallBack after_call_back = GetAfterCallBack(true); + auto ret = int8_session_->RunGraph(before_call_back, after_call_back); if (ret != RET_OK) { MS_LOG(ERROR) << "run model failed!"; return RET_ERROR; @@ -1117,7 +1061,6 @@ STATUS PostTrainingQuantizer::Int8Inference() { } STATUS PostTrainingQuantizer::BiasCorrection(const FuncGraphPtr &func_graph) { - auto ret = RET_OK; std::future<STATUS> int8_inference = std::async(std::launch::async, &PostTrainingQuantizer::Int8Inference, this); // get input tensor vector<mindspore::tensor::MSTensor *> inputs = fp32_session_->GetInputs(); @@ -1134,87 +1077,19 @@ STATUS PostTrainingQuantizer::BiasCorrection(const FuncGraphPtr &func_graph) { return RET_ERROR; } } - KernelCallBack beforeCallBack = [this](const std::vector<mindspore::tensor::MSTensor *> &beforeInputs, - const std::vector<mindspore::tensor::MSTensor *> &beforeOutputs, - const CallBackParam &callParam) -> bool { - if (callParam.node_type == kTypeConv2D || callParam.node_type == kTypeDepthwiseConv2D) { - if (PostTrainingQuantizer::CheckFp32TensorVec(callParam.node_name, beforeInputs) != RET_OK) { - return false; - } - auto tensor = beforeInputs[0]; - MS_ASSERT(tensor != nullptr); - size_t elem_count = tensor->ElementsNum(); - std::vector<float> fp32_op_input(elem_count); - auto ret = - memcpy_s(fp32_op_input.data(), fp32_op_input.size() * sizeof(float), tensor->MutableData(), tensor->Size()); - if (ret != EOK) { - MS_LOG(ERROR) << "memcpy error: " << ret; - return false; - } - while (!OpInputDataHandle(STORE, callParam.node_name, &fp32_op_input)) { - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - } - } - return true; - }; - // func - KernelCallBack afterCallBack = [this](const std::vector<mindspore::tensor::MSTensor *> &afterInputs, - const std::vector<mindspore::tensor::MSTensor *> &afterOutputs, - const CallBackParam &callParam) -> bool { - if (callParam.node_type == kTypeConv2D || callParam.node_type == kTypeDepthwiseConv2D) { - if (PostTrainingQuantizer::CheckFp32TensorVec(callParam.node_name, afterOutputs) != RET_OK) { - return false; - } - auto tensor = afterOutputs[0]; - MS_ASSERT(tensor != nullptr); - const auto *tensor_data = static_cast<const float *>(tensor->MutableData()); - size_t elem_count = tensor->ElementsNum(); - auto shapes = tensor->shape(); - if (shapes.size() != 4) { - MS_LOG(ERROR) << "unexpected shape size: " << shapes.size(); - return false; - } - // suppose the activation format: NHWC - auto channels = shapes[3]; - if (channels == 0) { - MS_LOG(ERROR) << "unexpected channels: 0"; - return false; - } - std::vector<float> fp32_op_output_ch_mean(channels); - auto one_filter_size = elem_count / channels; - for (int i = 0; i < channels; i++) { - float sum = 0; - for (size_t j = 0; j < one_filter_size; j++) { - auto index = j * channels + i; - if (index >= elem_count) { - MS_LOG(ERROR) << "over flow!"; - return RET_ERROR; - } - sum += tensor_data[index]; - } - if (one_filter_size == 0) { - MS_LOG(ERROR) << "divisor 'one_filter_size' cannot be 0."; - return false; - } - sum = sum / one_filter_size; - fp32_op_output_ch_mean[i] = sum; - } - while (!OpOutputChMeanDataHandle(STORE, callParam.node_name, &fp32_op_output_ch_mean)) { - std::this_thread::sleep_for(std::chrono::milliseconds(10)); - } - } - - return true; - }; - auto status = fp32_session_->RunGraph(beforeCallBack, afterCallBack); + // before func + KernelCallBack before_call_back = GetBeforeCallBack(false); + // after func + KernelCallBack after_call_back = GetAfterCallBack(false); + auto status = fp32_session_->RunGraph(before_call_back, after_call_back); if (status != RET_OK) { MS_LOG(ERROR) << "run model failed!"; return RET_ERROR; } } // end for images - ret = int8_inference.get(); - if (ret != RET_OK) { + STATUS status = int8_inference.get(); + if (status != RET_OK) { MS_LOG(ERROR) << "int8 inference failed!"; return RET_ERROR; } @@ -1229,105 +1104,113 @@ STATUS PostTrainingQuantizer::BiasCorrection(const FuncGraphPtr &func_graph) { auto cnodes = func_graph->GetOrderedCnodes(); for (auto &cnode : cnodes) { auto op_name = cnode->fullname_with_scope(); - if (op_bias_diff_map.find(op_name) != op_bias_diff_map.end()) { - const auto &bias_diff = op_bias_diff_map[op_name]; - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - if (primitive_c == nullptr) { - MS_LOG(ERROR) << "primitive_c is nullptr"; - continue; - } - auto input_quant_params = primitive_c->input_quant_params(); - - if (input_quant_params.size() == 3) { - // compensate the existed - auto bias_quant_params = input_quant_params[2]; - auto bias = cnode->input(3); - auto bias_parameter_ptr = std::dynamic_pointer_cast<Parameter>(bias); - auto bias_default_param = bias_parameter_ptr->default_param(); - auto bias_param = std::dynamic_pointer_cast<ParamValueLite>(bias_default_param); - int *bias_datas = static_cast<int *>(bias_param->tensor_addr()); - - if (static_cast<size_t>(bias_param->tensor_shape_size()) != bias_diff.size()) { - MS_LOG(ERROR) << "unexpected bias data count: " << bias_param->tensor_shape_size() - << " not the same as bias_diff: " << bias_diff.size(); - continue; - } - if (bias_quant_params.size() != bias_diff.size()) { - MS_LOG(ERROR) << "unexpected bias quant params size: " << bias_quant_params.size() - << " not the same as bias_diff: " << bias_diff.size(); - } + if (op_bias_diff_map.find(op_name) == op_bias_diff_map.end()) { + continue; + } + status = BiasCorrection(func_graph, cnode); + if (status != RET_OK) { + MS_LOG(ERROR) << "do node bias correct failed."; + break; + } + } + return status; +} - for (int i = 0; i < bias_param->tensor_shape_size(); i++) { - auto scale = bias_quant_params[i].scale; - if (fabs(scale) <= 0.0f) { - MS_LOG(ERROR) << "divisor 'scale' cannot be 0."; - return RET_ERROR; - } - double after_correct = std::round(bias_diff[i] / scale) + bias_datas[i]; - const constexpr int32_t corrected_bias_abs_limit = 0.6 * INT32_MAX; - if (after_correct > corrected_bias_abs_limit) { - MS_LOG(WARNING) << op_name << " ch: " << i << " bias after_corrected too large: " << after_correct - << " origin value: " << bias_datas[i] << " bias_diff: " << bias_diff[i] - << " scale: " << scale; - bias_datas[i] = static_cast<int>(corrected_bias_abs_limit); - } else if (after_correct < -corrected_bias_abs_limit) { - MS_LOG(WARNING) << op_name << " ch: " << i << " bias after_corrected too small: " << after_correct - << " origin value: " << bias_datas[i] << " bias_diff: " << bias_diff[i] - << " scale: " << scale; - bias_datas[i] = static_cast<int>(-corrected_bias_abs_limit); - } else { - auto diff = static_cast<int>(std::round(bias_diff[i] / scale)); - bias_datas[i] += diff; - } - } - } else if (input_quant_params.size() == 2) { - MS_LOG(INFO) << op_name << " add bias input"; - // need to add bias input - auto parameter = func_graph->add_parameter(); - if (parameter == nullptr) { - MS_LOG(ERROR) << "parameter is nullptr."; - return RET_NULL_PTR; - } - ShapeVector shape; - shape.push_back(bias_diff.size()); - auto type_ptr = TypeIdToType(kNumberTypeFloat32); - auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape); - parameter->set_abstract(abstract_tensor); - parameter->set_name("added_" + op_name + "_bias"); - - ParamValueLitePtr param_value = std::make_shared<ParamValueLite>(); - MS_ASSERT(param_value != nullptr); - std::vector<int32_t> shape_vector; - (void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), - [](const int64_t &value) { return static_cast<int32_t>(value); }); - param_value->set_tensor_shape(shape_vector); - param_value->set_tensor_type(kNumberTypeFloat32); - - auto size = sizeof(float) * bias_diff.size(); - char *tensor_data = new (std::nothrow) char[size]; - if (tensor_data == nullptr) { - MS_LOG(ERROR) << "new char[] failed"; - return RET_MEMORY_FAILED; - } - ret = memcpy_s(tensor_data, size * sizeof(char), bias_diff.data(), size * sizeof(char)); - if (ret != EOK) { - MS_LOG(ERROR) << "memcpy_s error: " << ret; - delete[] tensor_data; - return false; - } - param_value->SetTensorData(tensor_data, size); - parameter->set_default_param(param_value); - cnode->add_input(parameter); - DoBiasQuant(parameter, primitive_c); - delete[] tensor_data; +STATUS PostTrainingQuantizer::BiasCorrection(const FuncGraphPtr &func_graph, const CNodePtr &cnode) { + auto op_name = cnode->fullname_with_scope(); + const auto &bias_diff = op_bias_diff_map[op_name]; + auto primitive = GetValueNode<PrimitivePtr>(cnode->input(0)); + if (primitive == nullptr) { + MS_LOG(ERROR) << "primitive is nullptr"; + return RET_NULL_PTR; + } + auto quant_param_holder = GetCNodeQuantHolder(primitive); + MS_ASSERT(quant_param_holder != nullptr); + auto input_quant_params = quant_param_holder->input_quant_params(); + if (input_quant_params.size() == 3) { + // compensate the existed + auto bias_quant_params = input_quant_params[2]; + auto bias = cnode->input(3); + auto bias_parameter_ptr = std::dynamic_pointer_cast<Parameter>(bias); + auto bias_default_param = bias_parameter_ptr->default_param(); + auto bias_param = std::dynamic_pointer_cast<ParamValueLite>(bias_default_param); + int *bias_datas = static_cast<int *>(bias_param->tensor_addr()); + + if (static_cast<size_t>(bias_param->tensor_shape_size()) != bias_diff.size()) { + MS_LOG(DEBUG) << "unexpected bias data count: " << bias_param->tensor_shape_size() + << " not the same as bias_diff: " << bias_diff.size(); + return RET_ERROR; + } + if (bias_quant_params.size() != bias_diff.size()) { + MS_LOG(ERROR) << "unexpected bias quant params size: " << bias_quant_params.size() + << " not the same as bias_diff: " << bias_diff.size(); + return RET_ERROR; + } + for (int i = 0; i < bias_param->tensor_shape_size(); i++) { + auto scale = bias_quant_params[i].scale; + if (fabs(scale) <= 0.0f) { + MS_LOG(ERROR) << "divisor 'scale' cannot be 0."; + return RET_ERROR; + } + double after_correct = std::round(bias_diff[i] / scale) + bias_datas[i]; + const constexpr int32_t corrected_bias_abs_limit = 0.6 * INT32_MAX; + if (after_correct > corrected_bias_abs_limit) { + MS_LOG(WARNING) << op_name << " ch: " << i << " bias after_corrected too large: " << after_correct + << " origin value: " << bias_datas[i] << " bias_diff: " << bias_diff[i] << " scale: " << scale; + bias_datas[i] = static_cast<int>(corrected_bias_abs_limit); + } else if (after_correct < -corrected_bias_abs_limit) { + MS_LOG(WARNING) << op_name << " ch: " << i << " bias after_corrected too small: " << after_correct + << " origin value: " << bias_datas[i] << " bias_diff: " << bias_diff[i] << " scale: " << scale; + bias_datas[i] = static_cast<int>(-corrected_bias_abs_limit); } else { - MS_LOG(ERROR) << "unexpected input_quant_params size: " << input_quant_params.size(); - continue; + auto diff = static_cast<int>(std::round(bias_diff[i] / scale)); + bias_datas[i] += diff; } - } // end fine op_name + } + } else if (input_quant_params.size() == 2) { + MS_LOG(INFO) << op_name << " add bias input"; + // need to add bias input + auto parameter = func_graph->add_parameter(); + if (parameter == nullptr) { + MS_LOG(ERROR) << "parameter is nullptr."; + return RET_NULL_PTR; + } + ShapeVector shape; + shape.push_back(bias_diff.size()); + auto type_ptr = TypeIdToType(kNumberTypeFloat32); + auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape); + parameter->set_abstract(abstract_tensor); + parameter->set_name("added_" + op_name + "_bias"); + + ParamValueLitePtr param_value = std::make_shared<ParamValueLite>(); + MS_ASSERT(param_value != nullptr); + std::vector<int32_t> shape_vector; + (void)std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), + [](const int64_t &value) { return static_cast<int32_t>(value); }); + param_value->set_tensor_shape(shape_vector); + param_value->set_tensor_type(kNumberTypeFloat32); + + auto size = sizeof(float) * bias_diff.size(); + char *tensor_data = new (std::nothrow) char[size]; + if (tensor_data == nullptr) { + MS_LOG(ERROR) << "new char[] failed"; + return RET_MEMORY_FAILED; + } + STATUS status = memcpy_s(tensor_data, size * sizeof(char), bias_diff.data(), size * sizeof(char)); + if (status != EOK) { + MS_LOG(ERROR) << "memcpy_s error: " << status; + delete[] tensor_data; + return RET_ERROR; + } + param_value->SetTensorData(tensor_data, size); + parameter->set_default_param(param_value); + cnode->add_input(parameter); + DoBiasQuant(parameter, primitive); + delete[] tensor_data; + } else { + MS_LOG(ERROR) << "unexpected input_quant_params size: " << input_quant_params.size(); } - - return ret; + return RET_OK; } STATUS PostTrainingQuantizer::CollectDataFrequency() { @@ -1556,4 +1439,214 @@ bool PostTrainingQuantizer::OpOutputChMeanDataHandle(OperationType type, const s } return false; } + +KernelCallBack PostTrainingQuantizer::GetBeforeCallBack(bool int8_op) { + KernelCallBack before_call_back; + if (!int8_op) { + before_call_back = [this](const std::vector<mindspore::tensor::MSTensor *> &beforeInputs, + const std::vector<mindspore::tensor::MSTensor *> &beforeOutputs, + const CallBackParam &callParam) -> bool { + if (callParam.node_type == kTypeConv2D || callParam.node_type == kTypeDepthwiseConv2D) { + if (PostTrainingQuantizer::CheckFp32TensorVec(callParam.node_name, beforeInputs) != RET_OK) { + return false; + } + auto tensor = beforeInputs[0]; + MS_ASSERT(tensor != nullptr); + size_t elem_count = tensor->ElementsNum(); + std::vector<float> fp32_op_input(elem_count); + auto ret = + memcpy_s(fp32_op_input.data(), fp32_op_input.size() * sizeof(float), tensor->MutableData(), tensor->Size()); + if (ret != EOK) { + MS_LOG(ERROR) << "memcpy error: " << ret; + return false; + } + while (!OpInputDataHandle(STORE, callParam.node_name, &fp32_op_input)) { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + } + return true; + }; + } else { + before_call_back = [this](const std::vector<mindspore::tensor::MSTensor *> &beforeInputs, + const std::vector<mindspore::tensor::MSTensor *> &beforeOutputs, + const CallBackParam &callParam) -> bool { + if (callParam.node_type == kTypeConv2D || callParam.node_type == kTypeDepthwiseConv2D) { + vector<float> fp32_op_input; + while (!OpInputDataHandle(FETCH, callParam.node_name, &fp32_op_input)) { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + auto tensor = beforeInputs[0]; + MS_ASSERT(tensor != nullptr); + auto lite_tensor = dynamic_cast<mindspore::lite::Tensor *>(tensor); + MS_ASSERT(lite_tensor != nullptr); + if (tensor->data_type() != kNumberTypeInt8) { + MS_LOG(ERROR) << "unexpected tensor type: " << tensor->data_type(); + return false; + } + // do quantization: activation is always per layer quantized + std::vector<int8_t> quant_datas; + auto quant_params = lite_tensor->quant_params(); + if (quant_params.size() != 1) { + MS_LOG(ERROR) << "unexpected quant_params size: " << quant_params.size(); + return false; + } + schema::QuantParamT quant_param_t; + quant_param_t.scale = quant_params[0].scale; + quant_param_t.zeroPoint = quant_params[0].zeroPoint; + for (auto float_data : fp32_op_input) { + auto quant_data = QuantizeData<int8_t>(float_data, quant_param_t, quant_max, quant_min); + quant_datas.push_back(quant_data); + } + + if (tensor->Size() != quant_datas.size() * sizeof(int8_t)) { + MS_LOG(ERROR) << "unexpected tensor size: " << quant_datas.size() + << " not the same with: " << quant_datas.size() * sizeof(int8_t); + return false; + } + + auto ret = + memcpy_s(tensor->MutableData(), tensor->Size(), quant_datas.data(), quant_datas.size() * sizeof(int8_t)); + if (ret != EOK) { + MS_LOG(ERROR) << "memcpy error: " << ret; + return false; + } + } + return true; + }; + } + return before_call_back; +} + +KernelCallBack PostTrainingQuantizer::GetAfterCallBack(bool int8_op) { + KernelCallBack after_call_back; + if (!int8_op) { + return GetFloatAfterCallBack(); + } + return GetInt8AfterCallBack(); +} + +KernelCallBack PostTrainingQuantizer::GetInt8AfterCallBack() { + KernelCallBack after_call_back = [this](const std::vector<mindspore::tensor::MSTensor *> &afterInputs, + const std::vector<mindspore::tensor::MSTensor *> &afterOutputs, + const CallBackParam &callParam) -> bool { + if (callParam.node_type == kTypeConv2D || callParam.node_type == kTypeDepthwiseConv2D) { + vector<float> fp32_op_output_ch_mean; + while (!OpOutputChMeanDataHandle(FETCH, callParam.node_name, &fp32_op_output_ch_mean)) { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + auto tensor = afterOutputs[0]; + MS_ASSERT(tensor != nullptr); + auto lite_tensor = dynamic_cast<mindspore::lite::Tensor *>(tensor); + MS_ASSERT(lite_tensor != nullptr); + if (tensor->data_type() != kNumberTypeInt8) { + MS_LOG(ERROR) << "unexpected tensor type: " << tensor->data_type(); + return false; + } + const int8_t *tensor_data = static_cast<int8_t *>(tensor->MutableData()); + size_t elem_count = tensor->ElementsNum(); + auto shapes = tensor->shape(); + if (shapes.size() != 4) { + MS_LOG(ERROR) << "unexpected shape size: " << shapes.size(); + return false; + } + // suppose the the format is NHWC + auto channels = shapes[3]; + if (channels == 0) { + MS_LOG(ERROR) << "unexpected channels: 0"; + return false; + } + auto quant_params = lite_tensor->quant_params(); + if (quant_params.size() != 1) { + MS_LOG(ERROR) << "unexpected activatation quant_params size: " << quant_params.size(); + return false; + } + auto scale = quant_params[0].scale; + auto zp = quant_params[0].zeroPoint; + std::vector<float> dequant_op_output_ch_mean(channels); + auto one_filter_size = elem_count / channels; + for (int i = 0; i < channels; i++) { + float sum = 0; + for (size_t j = 0; j < one_filter_size; j++) { + auto index = j * channels + i; + if (index >= elem_count) { + MS_LOG(ERROR) << "over flow!"; + return RET_ERROR; + } + // deuqant activation + auto float_data = scale * (tensor_data[index] - zp); + sum += float_data; + } + if (one_filter_size == 0) { + MS_LOG(ERROR) << "divisor 'one_filter_size' cannot be 0."; + return RET_ERROR; + } + sum = sum / one_filter_size; + dequant_op_output_ch_mean[i] = sum; + } + std::transform(fp32_op_output_ch_mean.begin(), fp32_op_output_ch_mean.end(), dequant_op_output_ch_mean.begin(), + dequant_op_output_ch_mean.begin(), std::minus<>()); + + if (op_bias_diff_map.find(callParam.node_name) != op_bias_diff_map.end()) { + auto &bias_diff = op_bias_diff_map[callParam.node_name]; + std::transform(bias_diff.begin(), bias_diff.end(), dequant_op_output_ch_mean.begin(), bias_diff.begin(), + std::plus<>()); + } else { + op_bias_diff_map[callParam.node_name] = dequant_op_output_ch_mean; + } + } + return true; + }; + return after_call_back; +} + +KernelCallBack PostTrainingQuantizer::GetFloatAfterCallBack() { + KernelCallBack after_call_back = [this](const std::vector<mindspore::tensor::MSTensor *> &afterInputs, + const std::vector<mindspore::tensor::MSTensor *> &afterOutputs, + const CallBackParam &callParam) -> bool { + if (callParam.node_type == kTypeConv2D || callParam.node_type == kTypeDepthwiseConv2D) { + if (PostTrainingQuantizer::CheckFp32TensorVec(callParam.node_name, afterOutputs) != RET_OK) { + return false; + } + auto tensor = afterOutputs[0]; + MS_ASSERT(tensor != nullptr); + const auto *tensor_data = static_cast<const float *>(tensor->MutableData()); + size_t elem_count = tensor->ElementsNum(); + auto shapes = tensor->shape(); + if (shapes.size() != 4) { + MS_LOG(ERROR) << "unexpected shape size: " << shapes.size(); + return false; + } + // suppose the activation format: NHWC + auto channels = shapes[3]; + if (channels == 0) { + MS_LOG(ERROR) << "unexpected channels: 0"; + return false; + } + std::vector<float> fp32_op_output_ch_mean(channels); + auto one_filter_size = elem_count / channels; + for (int i = 0; i < channels; i++) { + float sum = 0; + for (size_t j = 0; j < one_filter_size; j++) { + auto index = j * channels + i; + if (index >= elem_count) { + MS_LOG(ERROR) << "over flow!"; + return RET_ERROR; + } + sum += tensor_data[index]; + } + if (one_filter_size == 0) { + MS_LOG(ERROR) << "divisor 'one_filter_size' cannot be 0."; + return false; + } + sum = sum / one_filter_size; + fp32_op_output_ch_mean[i] = sum; + } + while (!OpOutputChMeanDataHandle(STORE, callParam.node_name, &fp32_op_output_ch_mean)) { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + } + return true; + }; + return after_call_back; +} } // namespace mindspore::lite::quant diff --git a/mindspore/lite/tools/converter/quantizer/post_training_quantizer.h b/mindspore/lite/tools/converter/quantizer/post_training_quantizer.h index 7a95032a88..27a91d031d 100644 --- a/mindspore/lite/tools/converter/quantizer/post_training_quantizer.h +++ b/mindspore/lite/tools/converter/quantizer/post_training_quantizer.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,8 @@ #include <cfloat> #include <map> #include <utility> +#include "ops/primitive_c.h" +#include "schema/inner/model_generated.h" #include "src/lite_session.h" #include "tools/converter/quantizer/quantizer.h" #include "tools/converter/converter.h" @@ -68,8 +70,8 @@ class PostTrainingQuantizer : public Quantizer { session::LiteSession *int8_session_{nullptr}; Model *int8_model_{nullptr}; - std::map<std::string, std::vector<float>> fp32_op_input_map; // concurency - std::map<std::string, std::vector<float>> fp32_op_output_ch_mean_map; // concurency + std::map<std::string, std::vector<float>> fp32_op_input_map; // concurrency + std::map<std::string, std::vector<float>> fp32_op_output_ch_mean_map; // concurrency std::map<std::string, std::vector<float>> op_bias_diff_map; // only use by int8 model std::mutex mutex_op_input; std::mutex mutex_op_output; @@ -82,10 +84,11 @@ class PostTrainingQuantizer : public Quantizer { bool OpInputDataHandle(OperationType type, const string &op_name, std::vector<float> *data); bool OpOutputChMeanDataHandle(OperationType type, const string &op_name, std::vector<float> *data); - const std::string kTypeConv2D = schema::EnumNamePrimitiveType(schema::PrimitiveType_Conv2D); - const std::string kTypeDepthwiseConv2D = schema::EnumNamePrimitiveType(schema::PrimitiveType_DepthwiseConv2D); + const std::string kTypeConv2D = schema::EnumNamePrimitiveType(schema::PrimitiveType_Conv2DFusion); + /* todo checkout */ + const std::string kTypeDepthwiseConv2D = schema::EnumNamePrimitiveType(schema::PrimitiveType_Conv2DFusion); const std::string kTypeConcat = schema::EnumNamePrimitiveType(schema::PrimitiveType_Concat); - const std::string kTypeAdd = schema::EnumNamePrimitiveType(schema::PrimitiveType_Add); + const std::string kTypeAdd = schema::EnumNamePrimitiveType(schema::PrimitiveType_AddFusion); STATUS PreProcess(); @@ -100,19 +103,25 @@ class PostTrainingQuantizer : public Quantizer { STATUS ComputeThreshold(); + STATUS QuantNodeSimpleOp(const CNodePtr &cnode); + STATUS QuantNode(); - STATUS DoQuantInput(double scale, int32_t zeropoint, struct MaxMin *max_min, - const std::shared_ptr<PrimitiveC> &lite_primitive) const; - STATUS DoQuantOutput(double scale, int32_t zeropoint, struct MaxMin *max_min, - const std::shared_ptr<PrimitiveC> &) const; + STATUS DoQuantInput(double scale, int32_t zeropoint, struct MaxMin *max_min, const PrimitivePtr &primitive) const; + + STATUS DoQuantOutput(double scale, int32_t zeropoint, struct MaxMin *max_min, const PrimitivePtr &) const; - STATUS DoWeightQuant(const std::string &op_name, const AnfNodePtr &weight, std::shared_ptr<PrimitiveC> primitive_c, + STATUS DoWeightQuant(const std::string &op_name, const AnfNodePtr &weight, const PrimitivePtr &primitive, bool perchannel) const; - STATUS DoBiasQuant(const AnfNodePtr &bias, const std::shared_ptr<PrimitiveC> &primitive_c); + STATUS DoBiasQuant(const AnfNodePtr &bias, const PrimitivePtr &primitive); STATUS Int8Inference(); STATUS BiasCorrection(const FuncGraphPtr &func_graph); + STATUS BiasCorrection(const FuncGraphPtr &func_graph, const CNodePtr &cnode); + KernelCallBack GetBeforeCallBack(bool int8_op); + KernelCallBack GetAfterCallBack(bool int8_op); + KernelCallBack GetInt8AfterCallBack(); + KernelCallBack GetFloatAfterCallBack(); }; struct DivergInfo { @@ -145,9 +154,9 @@ struct DivergInfo { std::fill(histogram.begin(), histogram.end(), 1.0e-7); } - STATUS RecordMaxValue(const std::vector<float> &datas); + STATUS RecordMaxValue(const std::vector<float> &data); - STATUS RecordMaxValueArray(const std::vector<float> &datas); + STATUS RecordMaxValueArray(const std::vector<float> &data); void UpdateInterval(); @@ -155,6 +164,9 @@ struct DivergInfo { void DumpHistogram(); + void HandleBinForKL(int quant_bint_nums, int bin_index, std::vector<float> *quantized_histogram, + std::vector<float> *expanded_histogram); + STATUS ComputeThreshold(); std::pair<CNodePtr, float> GetScale(); diff --git a/mindspore/lite/tools/converter/quantizer/quant_cast.cc b/mindspore/lite/tools/converter/quantizer/quant_cast.cc index b27892449e..4442ac06af 100644 --- a/mindspore/lite/tools/converter/quantizer/quant_cast.cc +++ b/mindspore/lite/tools/converter/quantizer/quant_cast.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,38 +17,39 @@ #include "mindspore/lite/tools/converter/quantizer/quant_cast.h" #include <memory> #include <vector> -#include "src/ops/primitive_c.h" +#include "ops/gather.h" +#include "ops/quant_dtype_cast.h" +#include "tools/converter/quantizer/quantize_util.h" namespace mindspore::lite::quant { ValueNodePtr NewQuantCastValueNode(int src_type, int dst_type, const std::vector<schema::QuantParamT> &quant_params) { - std::unique_ptr<schema::PrimitiveT> primitive = std::make_unique<schema::PrimitiveT>(); - schema::QuantDTypeCastT quant_dtype_cast; - quant_dtype_cast.srcT = src_type; // kNumberTypeInt8; - quant_dtype_cast.dstT = dst_type; // kNumberTypeFloat32; - primitive->value.Set(quant_dtype_cast); - auto primTValue = std::make_shared<PrimitiveC>(primitive.release()); - primTValue->set_quant_type(schema::QuantType_PostTraining); + auto prim_c = std::make_shared<ops::QuantDTypeCast>(); + prim_c->Init(src_type, dst_type); + auto quant_params_holder = std::make_shared<QuantParamHolder>(); + quant_params_holder->set_quant_type(schema::QuantType_PostTraining); for (auto &quant_param : quant_params) { std::vector<schema::QuantParamT> quant_params_in = {quant_param}; - primTValue->AddInputQuantParam(quant_params_in); - primTValue->AddOutputQuantParam(quant_params_in); + quant_params_holder->AddInputQuantParam(quant_params_in); + quant_params_holder->AddOutputQuantParam(quant_params_in); } - return NewValueNode(primTValue); + prim_c->AddAttr("quant_params", quant_params_holder); + return NewValueNode(prim_c); } STATUS QuantCast::Run(const FuncGraphPtr &graph) { MS_ASSERT(graph != nullptr); auto cnodes = graph->GetOrderedCnodes(); for (auto &cnode : cnodes) { - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); + auto primitive_c = GetValueNode<std::shared_ptr<ops::PrimitiveC>>(cnode->input(0)); + auto primitive_quant_param_holder = GetCNodeQuantHolder(primitive_c); + MS_ASSERT(primitive_quant_param_holder != nullptr); auto curnode_quant_type = schema::QuantType_QUANT_NONE; if (primitive_c == nullptr) { MS_LOG(WARNING) << "primitive_c is nullptr: " << cnode->fullname_with_scope(); } else { - curnode_quant_type = primitive_c->quant_type(); + curnode_quant_type = primitive_quant_param_holder->quant_type(); } - auto op_type = (schema::PrimitiveType)primitive_c->Type(); - if (op_type == schema::PrimitiveType_Gather) { + if (primitive_c->name() == ops::kNameGather) { continue; } for (size_t i = 1; i < cnode->inputs().size(); i++) { @@ -59,32 +60,39 @@ STATUS QuantCast::Run(const FuncGraphPtr &graph) { is_graph_input = true; } } - if (!input_node->isa<CNode>() && !is_graph_input) { + if (!input_node->isa<mindspore::CNode>() && !is_graph_input) { continue; } auto input_cnode_quant_type = schema::QuantType_QUANT_NONE; - std::shared_ptr<PrimitiveC> input_cnode_primitive_c = nullptr; + std::shared_ptr<ops::PrimitiveC> input_cnode_primitive_c = nullptr; if (!is_graph_input) { - auto input_cnode = std::dynamic_pointer_cast<CNode>(input_node); - input_cnode_primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(input_cnode->input(0)); + auto input_cnode = std::dynamic_pointer_cast<mindspore::CNode>(input_node); + input_cnode_primitive_c = GetValueNode<std::shared_ptr<ops::PrimitiveC>>(input_cnode->input(0)); if (input_cnode_primitive_c == nullptr) { MS_LOG(DEBUG) << "input: " << i << " " << input_cnode->fullname_with_scope() << ": " << " PrimitiveC is null"; continue; } - input_cnode_quant_type = input_cnode_primitive_c->quant_type(); + auto input_primitive_quant_holder = GetCNodeQuantHolder(input_cnode_primitive_c); + MS_ASSERT(input_primitive_quant_holder != nullptr); + input_cnode_quant_type = input_primitive_quant_holder->quant_type(); } if (curnode_quant_type != input_cnode_quant_type) { ValueNodePtr value_node = nullptr; if (curnode_quant_type == schema::QuantType_PostTraining && input_cnode_quant_type == schema::QuantType_QUANT_NONE) { - value_node = - NewQuantCastValueNode(kNumberTypeFloat32, kNumberTypeInt8, primitive_c->input_quant_params()[i - 1]); + if (primitive_quant_param_holder->input_quant_params().size() < i) { + MS_LOG(ERROR) << "quant param is invalid."; + return RET_ERROR; + } + value_node = NewQuantCastValueNode(kNumberTypeFloat32, kNumberTypeInt8, + primitive_quant_param_holder->input_quant_params()[i - 1]); } else if (curnode_quant_type == schema::QuantType_QUANT_NONE && input_cnode_quant_type == schema::QuantType_PostTraining) { + auto input_primitive_quant_param_holder = GetCNodeQuantHolder(input_cnode_primitive_c); value_node = NewQuantCastValueNode(kNumberTypeInt8, kNumberTypeFloat32, - input_cnode_primitive_c->output_quant_params().front()); + input_primitive_quant_param_holder->output_quant_params().front()); } if (value_node == nullptr) { MS_LOG(WARNING) << "value_node is null! " diff --git a/mindspore/lite/tools/converter/quantizer/quant_cast.h b/mindspore/lite/tools/converter/quantizer/quant_cast.h index 164ea28d2c..bff6101c93 100644 --- a/mindspore/lite/tools/converter/quantizer/quant_cast.h +++ b/mindspore/lite/tools/converter/quantizer/quant_cast.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,10 +17,10 @@ #ifndef MINDSPORE_LITE_TOOLS_CONVERTER_QUANTIZER__QUANT_CAST_H #define MINDSPORE_LITE_TOOLS_CONVERTER_QUANTIZER__QUANT_CAST_H -#include "mindspore/core/ir/anf.h" -#include "mindspore/lite/include/errorcode.h" +#include "include/errorcode.h" +#include "ir/anf.h" #include "ir/dtype/type_id.h" -#include "mindspore/core/ir/func_graph.h" +#include "ir/func_graph.h" namespace mindspore::lite::quant { class QuantCast { diff --git a/mindspore/lite/tools/converter/quantizer/quantize_util.cc b/mindspore/lite/tools/converter/quantizer/quantize_util.cc index 2e5dae9871..752b8605e4 100644 --- a/mindspore/lite/tools/converter/quantizer/quantize_util.cc +++ b/mindspore/lite/tools/converter/quantizer/quantize_util.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,34 +23,48 @@ #include <memory> #include <vector> #include <set> -#include "src/ops/primitive_c.h" -#include "mindspore/lite/tools/converter/quantizer/bitpacking.h" +#include "include/version.h" +#include "ops/concat.h" +#include "ops/crop.h" +#include "ops/eltwise.h" +#include "ops/fusion/activation.h" +#include "ops/fusion/add_fusion.h" +#include "ops/fusion/avg_pool_fusion.h" +#include "ops/fusion/conv2d_fusion.h" +#include "ops/fusion/conv2d_transpose_fusion.h" +#include "ops/fusion/full_connection.h" +#include "ops/fusion/layer_norm_fusion.h" +#include "ops/fusion/max_pool_fusion.h" +#include "ops/fusion/mul_fusion.h" +#include "ops/gather.h" +#include "ops/mat_mul.h" +#include "ops/reshape.h" +#include "ops/split.h" +#include "ops/transpose.h" +#include "ops/tuple_get_item.h" +#include "tools/anf_exporter/anf_exporter.h" +#include "tools/converter/quantizer/bitpacking.h" #include "src/common/utils.h" #include "abstract/abstract_value.h" #include "securec/include/securec.h" -#include "tools/anf_exporter/anf_exporter.h" -#include "mindspore/lite/include/version.h" using std::string; using std::vector; namespace mindspore::lite::quant { -const std::vector<schema::PrimitiveType> QuantStrategy::conv_types = { - schema::PrimitiveType_DeConv2D, schema::PrimitiveType_DeDepthwiseConv2D, schema::PrimitiveType_Conv2D, - schema::PrimitiveType_DepthwiseConv2D}; -const std::vector<schema::PrimitiveType> QuantStrategy::mul_types = {schema::PrimitiveType_MatMul, - schema::PrimitiveType_FullConnection}; -QuantStrategy::QuantStrategy(size_t weightSize, size_t convWeightQuantChannelThreshold) - : mWeightSize(weightSize), mConvWeightQuantChannelThreshold(convWeightQuantChannelThreshold) {} +const std::vector<std::string> QuantStrategy::conv_types_ = {ops::kNameConv2DFusion, ops::kNameConv2dTransposeFusion}; +const std::vector<std::string> QuantStrategy::mul_types_ = {ops::kNameMatMul, ops::kNameFullConnection}; +QuantStrategy::QuantStrategy(size_t weight_size, size_t conv_weight_quant_channel_threshold) + : m_weight_size_(weight_size), m_conv_weight_quant_channel_threshold_(conv_weight_quant_channel_threshold) {} bool QuantStrategy::CanConvOpQuantized(const CNodePtr &node) const { MS_ASSERT(node != nullptr); - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(node->input(0)); + auto primitive_c = GetValueNode<std::shared_ptr<ops::PrimitiveC>>(node->input(0)); if (primitive_c == nullptr) { MS_LOG(ERROR) << "primitive_c is nullptr"; return false; } - if (!IsContain(conv_types, (schema::PrimitiveType)primitive_c->Type())) { + if (!IsContain(conv_types_, primitive_c->name())) { return false; } if (node->size() < 3) { @@ -74,12 +88,12 @@ bool QuantStrategy::CanConvOpQuantized(const CNodePtr &node) const { for (auto dim : weight_shape) { shapeSize = shapeSize * dim; } - if (shapeSize < mWeightSize) { + if (shapeSize < m_weight_size_) { MS_LOG(INFO) << "shapeSize Invalid!" << shapeSize; return false; } - if (weight_shape[0] <= static_cast<int>(mConvWeightQuantChannelThreshold)) { - MS_LOG(INFO) << "channel less mConvWeightQuantChannelThreshold!" << weight_shape[0]; + if (weight_shape[0] <= static_cast<int>(m_conv_weight_quant_channel_threshold_)) { + MS_LOG(INFO) << "channel less m_conv_weight_quant_channel_threshold_!" << weight_shape[0]; return false; } return true; @@ -92,44 +106,30 @@ bool QuantStrategy::CanOpPostQuantized(AnfNodePtr &node) const { } auto cnode = std::dynamic_pointer_cast<mindspore::CNode>(node); auto type = NodePrimitiveType(cnode); - static const std::vector<schema::PrimitiveType> int8OpList = { - schema::PrimitiveType_Conv2D, - schema::PrimitiveType_DepthwiseConv2D, - schema::PrimitiveType_Add, - schema::PrimitiveType_Mul, - schema::PrimitiveType_Pooling, - schema::PrimitiveType_Concat, - schema::PrimitiveType_Split, - schema::PrimitiveType_TupleGetItem, - schema::PrimitiveType_Reshape, - schema::PrimitiveType_FullConnection, - schema::PrimitiveType_MatMul, - schema::PrimitiveType_Crop, - schema::PrimitiveType_DeDepthwiseConv2D, - schema::PrimitiveType_DeConv2D, - schema::PrimitiveType_Activation, - schema::PrimitiveType_Transpose, - schema::PrimitiveType_Eltwise, - schema::PrimitiveType_Gather, - schema::PrimitiveType_LayerNorm, + static const std::vector<std::string> int8OpList = { + ops::kNameAddFusion, ops::kNameActivation, ops::kNameAvgPoolFusion, + ops::kNameConcat, ops::kNameConv2DFusion, ops::kNameConv2dTransposeFusion, + ops::kNameCrop, ops::kNameEltwise, ops::kNameFullConnection, + ops::kNameGather, ops::kNameLayerNormFusion, ops::kNameMatMul, + ops::kNameMaxPoolFusion, ops::kNameMulFusion, ops::kNameReshape, + ops::kNameSplit, ops::kNameTranspose, ops::kNameTupleGetItem, }; bool contain = IsContain(int8OpList, type); if (!contain) { - MS_LOG(INFO) << "not quant, " << cnode->fullname_with_scope() - << " of type: " << schema::EnumNamePrimitiveType(type); + MS_LOG(INFO) << "not quant, " << cnode->fullname_with_scope() << " of type: " << type; } return contain; } bool QuantStrategy::CanMulOpQuantized(const CNodePtr &node) const { MS_ASSERT(node != nullptr); - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(node->input(0)); + auto primitive_c = GetValueNode<std::shared_ptr<ops::PrimitiveC>>(node->input(0)); if (primitive_c == nullptr) { MS_LOG(ERROR) << "primitive_c is nullptr"; return false; } - if (!IsContain(mul_types, (schema::PrimitiveType)primitive_c->Type())) { + if (!IsContain(mul_types_, primitive_c->name())) { return false; } @@ -172,7 +172,7 @@ bool QuantStrategy::CanMulOpQuantized(const CNodePtr &node) const { for (auto dim : weight_shape) { shapeSize = shapeSize * dim; } - if (shapeSize < mWeightSize) { + if (shapeSize < m_weight_size_) { MS_LOG(INFO) << "shapeSize Invalid!" << shapeSize; return false; } @@ -180,6 +180,23 @@ bool QuantStrategy::CanMulOpQuantized(const CNodePtr &node) const { return true; } +QuantParamHolderPtr GetCNodeQuantHolder(const PrimitivePtr &primitive) { + MS_ASSERT(primitive != nullptr); + QuantParamHolderPtr quant_params_holder = nullptr; + auto quant_params_valueptr = primitive->GetAttr("quant_params"); + if (quant_params_valueptr == nullptr) { + quant_params_holder = std::make_shared<QuantParamHolder>(); + primitive->AddAttr("quant_params", quant_params_holder); + } else { + quant_params_holder = quant_params_valueptr->cast<QuantParamHolderPtr>(); + if (quant_params_holder == nullptr) { + quant_params_holder = std::make_shared<QuantParamHolder>(); + primitive->AddAttr("quant_params", quant_params_holder); + } + } + return quant_params_holder; +} + STATUS CalQuantizationParams(schema::QuantParamT *quantParam, double mMin, double mMax, bool narrowRange, int quant_max, int quant_min, int num_bits) { MS_ASSERT(quantParam != nullptr); @@ -467,17 +484,17 @@ std::vector<int8_t> KMeans(float *data, size_t elem_count, size_t k, size_t epoc return clusters_index; } -schema::PrimitiveType NodePrimitiveType(const CNodePtr &cnode) { +std::string NodePrimitiveType(const CNodePtr &cnode) { if (cnode == nullptr) { MS_LOG(ERROR) << "cnode is null"; - return schema::PrimitiveType_NONE; + return ""; } - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); + auto primitive_c = GetValueNode<std::shared_ptr<ops::PrimitiveC>>(cnode->input(0)); if (primitive_c == nullptr) { MS_LOG(ERROR) << "primitive_c is null"; - return schema::PrimitiveType_NONE; + return ""; } - return (schema::PrimitiveType)primitive_c->Type(); + return primitive_c->name(); } std::vector<int> DataToVector(const string &str) { diff --git a/mindspore/lite/tools/converter/quantizer/quantize_util.h b/mindspore/lite/tools/converter/quantizer/quantize_util.h index 21f7417598..52e1adb556 100644 --- a/mindspore/lite/tools/converter/quantizer/quantize_util.h +++ b/mindspore/lite/tools/converter/quantizer/quantize_util.h @@ -27,8 +27,9 @@ #include <algorithm> #include <limits> #include <utility> +#include "ops/mat_mul.h" +#include "ops/fusion/full_connection.h" #include "tools/converter/quantizer/quantizer.h" -#include "src/ops/primitive_c.h" #include "include/errorcode.h" #include "ir/func_graph.h" #include "ir/anf.h" @@ -82,12 +83,12 @@ class QuantStrategy { bool CanMulOpQuantized(const CNodePtr &node) const; bool CanOpPostQuantized(AnfNodePtr &node) const; - size_t mWeightSize; - size_t mConvWeightQuantChannelThreshold; + size_t m_weight_size_; + size_t m_conv_weight_quant_channel_threshold_; private: - static const std::vector<schema::PrimitiveType> conv_types; - static const std::vector<schema::PrimitiveType> mul_types; + static const std::vector<std::string> conv_types_; + static const std::vector<std::string> mul_types_; }; constexpr float delta = 0.1; @@ -95,6 +96,8 @@ constexpr float ratio = 10.0; constexpr int percent = 10; constexpr int quant_param_size = 32 * 8; +QuantParamHolderPtr GetCNodeQuantHolder(const PrimitivePtr &primitive); + STATUS CalQuantizationParams(schema::QuantParamT *quantParam, double mMin, double mMax, bool narrowRange, int quant_max, int quant_min, int num_bits); @@ -326,11 +329,10 @@ STATUS DoBitPack(const ParamValueLitePtr &weight, const size_t &bit_num, const s } template <typename T> -STATUS QuantFilter(const ParamValueLitePtr &weight, const std::shared_ptr<PrimitiveC> &primitive_c, - QuantType quant_type, int quant_max, int quant_min, size_t bit_num, bool per_channel, int index = 1, - bool k_means = false) { +STATUS QuantFilter(const ParamValueLitePtr &weight, const PrimitivePtr &primitive, QuantType quant_type, int quant_max, + int quant_min, size_t bit_num, bool per_channel, int index = 1, bool k_means = false) { MS_ASSERT(weight != nullptr); - MS_ASSERT(primitive_c != nullptr); + MS_ASSERT(primitive != nullptr); auto dims = weight->tensor_shape(); if (per_channel) { if (dims.size() <= 1) { @@ -352,11 +354,11 @@ STATUS QuantFilter(const ParamValueLitePtr &weight, const std::shared_ptr<Primit int ret = RET_OK; if (per_channel) { bool channel_at_first = true; - auto op_type = (schema::PrimitiveType)primitive_c->Type(); - if (op_type == schema::PrimitiveType_MatMul && weight->tensor_shape().size() == 2) { - auto matmul_op = primitive_c->primitiveT()->value.AsMatMul(); - MS_ASSERT(matmul_op != nullptr); - channel_at_first = !(index == 1 && !matmul_op->transposeB); + if (primitive->name() == ops::kNameMatMul && weight->tensor_shape().size() == 2) { + auto matmul_prim = primitive->cast<std::shared_ptr<ops::MatMul>>(); + MS_ASSERT(matmul_prim != nullptr); + channel_at_first = + index != 1 || (matmul_prim->GetAttr(ops::kTransposeB) != nullptr && matmul_prim->get_transpose_b()); } // channel at first ret = DoPerChannelQuant<T>(weight, quant_type, &quant_params, quant_max, quant_min, bit_num, k_means, &quant_data, @@ -377,7 +379,7 @@ STATUS QuantFilter(const ParamValueLitePtr &weight, const std::shared_ptr<Primit #ifdef HUFFMAN_ENCODE auto huffman_encode = std::make_unique<lite::HuffmanEncode>(); - ret = huffman_encode->DoHuffmanEncode(weight, primitive_c, quant_datas.data(), bit_num); + ret = huffman_encode->DoHuffmanEncode(weight, primitive, quant_datas.data(), bit_num); if (ret != RET_OK) { MS_LOG(ERROR) << "Do huffman encode failed."; return ret; @@ -395,17 +397,18 @@ STATUS QuantFilter(const ParamValueLitePtr &weight, const std::shared_ptr<Primit MS_LOG(ERROR) << "quant_params empty"; return RET_ERROR; } + auto quant_param_holder = GetCNodeQuantHolder(primitive); if (quant_type == QuantType_PostTraining) { - primitive_c->AddInputQuantParam(quant_params); + quant_param_holder->AddInputQuantParam(quant_params); } else { - primitive_c->set_input_quant_param(index, quant_params); + quant_param_holder->set_input_quant_param(index, quant_params); } return ret; } // utils -schema::PrimitiveType NodePrimitiveType(const CNodePtr &cnode); +std::string NodePrimitiveType(const CNodePtr &cnode); STATUS ParseConfigFile(std::string config_file, PostQuantConfig *post_quant_config); diff --git a/mindspore/lite/tools/converter/quantizer/quantizer.cc b/mindspore/lite/tools/converter/quantizer/quantizer.cc index 6613ae042c..dccac73528 100644 --- a/mindspore/lite/tools/converter/quantizer/quantizer.cc +++ b/mindspore/lite/tools/converter/quantizer/quantizer.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,6 @@ */ #include "mindspore/lite/tools/converter/quantizer/quantizer.h" -#include "schema/inner/model_generated.h" namespace mindspore::lite::quant { diff --git a/mindspore/lite/tools/converter/quantizer/quantizer.h b/mindspore/lite/tools/converter/quantizer/quantizer.h index 3bb576bd68..02ad8c767f 100644 --- a/mindspore/lite/tools/converter/quantizer/quantizer.h +++ b/mindspore/lite/tools/converter/quantizer/quantizer.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,6 +27,7 @@ #include "base/base.h" #include "src/param_value_lite.h" #include "tools/converter/converter_flags.h" +#include "tools/converter/quant_param_holder.h" namespace mindspore::lite::quant { using STATUS = int; diff --git a/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc b/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc index 8a6641eaa6..6eaf64a77f 100644 --- a/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc +++ b/mindspore/lite/tools/converter/quantizer/weight_quantizer.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,6 +19,7 @@ #include <string> #include <vector> #include <unordered_map> +#include "tools/optimizer/common/gllo_utils.h" #include "src/common/common.h" using std::string; @@ -32,10 +33,10 @@ WeightQuantizer::WeightQuantizer(FuncGraphPtr graph, const PostQuantConfig &conf WeightQuantizer::WeightQuantizer(FuncGraphPtr graph, const converter::Flags &config) : Quantizer(graph) { this->config_file_ = config.configFile; - auto quantSize = config.quantWeightSize; + auto quant_size = config.quantWeightSize; this->bit_num_ = config.bitNum; auto convQuantWeightChannelThreshold = config.quantWeightChannel; - quant_strategy_ = std::make_unique<QuantStrategy>(quantSize, convQuantWeightChannelThreshold); + quant_strategy_ = std::make_unique<QuantStrategy>(quant_size, convQuantWeightChannelThreshold); quant_max_ = (1 << (unsigned int)(this->bit_num_ - 1)) - 1; quant_min_ = -(1 << (unsigned int)(this->bit_num_ - 1)); // parse type_id_ @@ -57,7 +58,7 @@ WeightQuantizer::~WeightQuantizer() { } STATUS WeightQuantizer::SetAbstract(ParamValueLitePtr param_value, ParameterPtr param_node, - std::shared_ptr<PrimitiveC> primitive_c) { + const PrimitivePtr &primitive) { // set dtype param_value->set_tensor_type(type_id_); auto abstract_base = param_node->abstract(); @@ -71,15 +72,16 @@ STATUS WeightQuantizer::SetAbstract(ParamValueLitePtr param_value, ParameterPtr } auto abstract_tensor = utils::cast<abstract::AbstractTensorPtr>(abstract_base); abstract_tensor->element()->set_type(TypeIdToType(type_id_)); - primitive_c->set_quant_type(schema::QuantType_WeightQuant); + auto quant_param_holder = GetCNodeQuantHolder(primitive); + quant_param_holder->set_quant_type(schema::QuantType_WeightQuant); return RET_OK; } STATUS WeightQuantizer::DoConvQuantize(CNodePtr cnode) { - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - if (primitive_c == nullptr) { - MS_LOG(ERROR) << "primitive_c is nullptr"; + auto primitive = GetValueNode<PrimitivePtr>(cnode->input(0)); + if (primitive == nullptr) { + MS_LOG(ERROR) << "primitive is nullptr"; return RET_ERROR; } @@ -103,11 +105,10 @@ STATUS WeightQuantizer::DoConvQuantize(CNodePtr cnode) { } auto status = RET_ERROR; if (type_id_ == kNumberTypeInt8) { - status = - QuantFilter<int8_t>(param_value, primitive_c, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, true); + status = QuantFilter<int8_t>(param_value, primitive, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, true); } else if (type_id_ == kNumberTypeInt16) { status = - QuantFilter<int16_t>(param_value, primitive_c, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, true); + QuantFilter<int16_t>(param_value, primitive, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, true); } if (status == RET_CONTINUE) { return RET_OK; @@ -115,7 +116,7 @@ STATUS WeightQuantizer::DoConvQuantize(CNodePtr cnode) { MS_LOG(ERROR) << "QuantFilter failed : " << status; return status; } - status = SetAbstract(param_value, param_node, primitive_c); + status = SetAbstract(param_value, param_node, primitive); if (status != RET_OK) { MS_LOG(ERROR) << "SetAbstract failed : " << status; return RET_ERROR; @@ -163,19 +164,19 @@ STATUS WeightQuantizer::DoMulQuantize(CNodePtr cnode) { return RET_OK; } - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - if (primitive_c == nullptr) { - MS_LOG(ERROR) << "primitive_c is nullptr"; + auto primitive = GetValueNode<PrimitivePtr>(cnode->input(0)); + if (primitive == nullptr) { + MS_LOG(ERROR) << "primitive is nullptr"; return RET_ERROR; } auto status = RET_ERROR; if (type_id_ == kNumberTypeInt8) { - status = QuantFilter<int8_t>(param_value, primitive_c, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, - true, index - 1); + status = QuantFilter<int8_t>(param_value, primitive, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, true, + index - 1); } else if (type_id_ == kNumberTypeInt16) { - status = QuantFilter<int16_t>(param_value, primitive_c, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, - true, index - 1); + status = QuantFilter<int16_t>(param_value, primitive, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, true, + index - 1); } if (status == RET_CONTINUE) { return RET_OK; @@ -183,7 +184,7 @@ STATUS WeightQuantizer::DoMulQuantize(CNodePtr cnode) { MS_LOG(ERROR) << "QuantFilter failed : " << status; return status; } - status = SetAbstract(param_value, param_node, primitive_c); + status = SetAbstract(param_value, param_node, primitive); if (status != RET_OK) { MS_LOG(ERROR) << "SetAbstract failed : " << status; return RET_ERROR; @@ -196,26 +197,26 @@ STATUS WeightQuantizer::DoLstmQuantize(CNodePtr cnode) { MS_ASSERT(cnode != nullptr); auto op_name = cnode->fullname_with_scope(); - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - MS_ASSERT(primitive_c != nullptr); + auto primitive = GetValueNode<PrimitivePtr>(cnode->input(0)); + MS_ASSERT(primitive != nullptr); if (cnode->inputs().size() < 4) { MS_LOG(ERROR) << op_name << " inputs is " << cnode->inputs().size(); return RET_ERROR; } - auto status = ProcessLstmWeightByIndex(cnode, primitive_c, 2); + auto status = ProcessLstmWeightByIndex(cnode, primitive, 2); if (status != RET_OK) { MS_LOG(ERROR) << "Process lstm weight i failed."; return RET_ERROR; } - status = ProcessLstmWeightByIndex(cnode, primitive_c, 3); + status = ProcessLstmWeightByIndex(cnode, primitive, 3); if (status != RET_OK) { MS_LOG(ERROR) << "Process lstm weight h failed."; return RET_ERROR; } if (cnode->inputs().size() > 4) { - status = ProcessLstmWeightByIndex(cnode, primitive_c, 4); + status = ProcessLstmWeightByIndex(cnode, primitive, 4); if (status != RET_OK) { MS_LOG(ERROR) << "Process lstm bias failed."; return RET_ERROR; @@ -226,8 +227,8 @@ STATUS WeightQuantizer::DoLstmQuantize(CNodePtr cnode) { } STATUS WeightQuantizer::DoGatherQuantize(CNodePtr cnode) { - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - MS_ASSERT(primitive_c != nullptr); + auto primitive = GetValueNode<PrimitivePtr>(cnode->input(0)); + MS_ASSERT(primitive != nullptr); auto first_input = cnode->input(1); ParameterPtr param_node; @@ -238,19 +239,19 @@ STATUS WeightQuantizer::DoGatherQuantize(CNodePtr cnode) { return RET_OK; } - if (param_value->tensor_size() / 4 < quant_strategy_->mWeightSize) { + if (param_value->tensor_size() / 4 < quant_strategy_->m_weight_size_) { MS_LOG(INFO) << cnode->fullname_with_scope() << " param cnt: " << param_value->tensor_size() / 4 << " < " - << quant_strategy_->mWeightSize; + << quant_strategy_->m_weight_size_; return RET_OK; } auto status = RET_ERROR; if (type_id_ == kNumberTypeInt8) { status = - QuantFilter<int8_t>(param_value, primitive_c, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, false, 0); + QuantFilter<int8_t>(param_value, primitive, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, false, 0); } else if (type_id_ == kNumberTypeInt16) { status = - QuantFilter<int16_t>(param_value, primitive_c, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, false, 0); + QuantFilter<int16_t>(param_value, primitive, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, false, 0); } if (status == RET_CONTINUE) { return RET_OK; @@ -258,7 +259,7 @@ STATUS WeightQuantizer::DoGatherQuantize(CNodePtr cnode) { MS_LOG(ERROR) << "QuantFilter failed : " << status; return status; } - status = SetAbstract(param_value, param_node, primitive_c); + status = SetAbstract(param_value, param_node, primitive); if (status != RET_OK) { MS_LOG(ERROR) << "SetAbstract failed : " << status; return RET_ERROR; @@ -266,7 +267,7 @@ STATUS WeightQuantizer::DoGatherQuantize(CNodePtr cnode) { return RET_OK; } -STATUS WeightQuantizer::ProcessLstmWeightByIndex(const CNodePtr &cnode, const std::shared_ptr<PrimitiveC> &primitive_c, +STATUS WeightQuantizer::ProcessLstmWeightByIndex(const CNodePtr &cnode, const PrimitivePtr &primitive, const int &index) { auto op_name = cnode->fullname_with_scope(); auto weight_i = cnode->input(index); @@ -281,17 +282,17 @@ STATUS WeightQuantizer::ProcessLstmWeightByIndex(const CNodePtr &cnode, const st MS_LOG(WARNING) << "param_value tensor type is: " << param_value->tensor_type() << " not quant"; return RET_OK; } - if (param_value->tensor_size() / 4 < quant_strategy_->mWeightSize) { + if (param_value->tensor_size() / 4 < quant_strategy_->m_weight_size_) { MS_LOG(INFO) << op_name << " weight_i cnt: " << param_value->tensor_size() / 4 << " < " - << quant_strategy_->mWeightSize; + << quant_strategy_->m_weight_size_; return RET_OK; } auto status = RET_ERROR; if (type_id_ == kNumberTypeInt8) { - status = QuantFilter<int8_t>(param_value, primitive_c, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, - false, index - 1); + status = QuantFilter<int8_t>(param_value, primitive, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, false, + index - 1); } else if (type_id_ == kNumberTypeInt16) { - status = QuantFilter<int16_t>(param_value, primitive_c, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, + status = QuantFilter<int16_t>(param_value, primitive, QuantType_WeightQuant, quant_max_, quant_min_, bit_num_, false, index - 1); } if (status == RET_CONTINUE) { @@ -300,7 +301,7 @@ STATUS WeightQuantizer::ProcessLstmWeightByIndex(const CNodePtr &cnode, const st MS_LOG(ERROR) << "QuantFilter failed : " << status; return status; } - status = SetAbstract(param_value, param_node, primitive_c); + status = SetAbstract(param_value, param_node, primitive); if (status != RET_OK) { MS_LOG(ERROR) << "SetAbstract failed : " << status; return RET_ERROR; @@ -447,14 +448,13 @@ STATUS WeightQuantizer::DoMixedQuantize(const FuncGraphPtr &func_graph) { auto cnodes = func_graph->GetOrderedCnodes(); int status = RET_OK; for (auto &cnode : cnodes) { - auto op_type = NodePrimitiveType(cnode); - if (op_type == schema::PrimitiveType_Lstm) { + if (opt::CheckPrimitiveType(cnode, prim::kPrimLstm)) { status = DoLstmQuantize(cnode); if (status != RET_OK) { MS_LOG(ERROR) << "DoLstmQuantize error"; return RET_ERROR; } - } else if (op_type == schema::PrimitiveType_Gather) { + } else if (opt::CheckPrimitiveType(cnode, prim::kPrimGather)) { status = DoGatherQuantize(cnode); if (status != RET_OK) { MS_LOG(ERROR) << "DoGatherQuantize error"; @@ -497,17 +497,17 @@ STATUS WeightQuantizer::GetParamNodeAndValue(const std::shared_ptr<AnfNode> &inp return RET_OK; } STATUS WeightQuantizer::TryQuant(const int &bit_num_t, const ParameterPtr &param_node, - const ParamValueLitePtr &param_value, const std::shared_ptr<PrimitiveC> &primitive_c) { + const ParamValueLitePtr &param_value, const PrimitivePtr &primitive) { int status; type_id_ = TypeId::kNumberTypeInt8; int quant_max_t = (1 << (unsigned int)(bit_num_t - 1)) - 1; int quant_min_t = -(1 << (unsigned int)(bit_num_t - 1)); if (type_id_ == TypeId::kNumberTypeInt8) { - status = QuantFilter<int8_t>(param_value, primitive_c, QuantType::QuantType_WeightQuant, quant_max_t, quant_min_t, + status = QuantFilter<int8_t>(param_value, primitive, QuantType::QuantType_WeightQuant, quant_max_t, quant_min_t, bit_num_t, true); } else if (type_id_ == TypeId::kNumberTypeInt16) { - status = QuantFilter<int16_t>(param_value, primitive_c, QuantType::QuantType_WeightQuant, quant_max_t, quant_min_t, + status = QuantFilter<int16_t>(param_value, primitive, QuantType::QuantType_WeightQuant, quant_max_t, quant_min_t, bit_num_t, true); } else { MS_LOG(ERROR) << "unexpected type_id_: " << type_id_; @@ -519,7 +519,7 @@ STATUS WeightQuantizer::TryQuant(const int &bit_num_t, const ParameterPtr &param MS_LOG(ERROR) << "quant filter failed."; return RET_ERROR; } - status = SetAbstract(param_value, param_node, primitive_c); + status = SetAbstract(param_value, param_node, primitive); if (status != RET_OK) { MS_LOG(ERROR) << "SetAbstract failed : " << status; return RET_ERROR; @@ -532,14 +532,13 @@ STATUS WeightQuantizer::DoQuantSearch(const FuncGraphPtr &func_graph) { int status = RET_OK; for (auto iter = cnodes.end(); iter != cnodes.begin();) { auto cnode = *(--iter); - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - if (primitive_c == nullptr) { - MS_LOG(ERROR) << "primitive_c is null."; + auto primitive = GetValueNode<PrimitivePtr>(cnode->input(0)); + if (primitive == nullptr) { + MS_LOG(ERROR) << "primitive is null."; return RET_ERROR; } auto op_name = cnode->fullname_with_scope(); - MS_LOG(DEBUG) << "process node: " << op_name - << " type: " << schema::EnumNamePrimitiveType((schema::PrimitiveType)primitive_c->Type()); + MS_LOG(DEBUG) << "process node: " << op_name << " type: " << primitive->name(); if (quant_strategy_->CanConvOpQuantized(cnode) || quant_strategy_->CanMulOpQuantized(cnode)) { auto input_node = cnode->input(2); ParameterPtr param_node; @@ -560,7 +559,7 @@ STATUS WeightQuantizer::DoQuantSearch(const FuncGraphPtr &func_graph) { } // 1. try quant for (int bit_num_t = 2; bit_num_t <= 8; bit_num_t++) { - status = TryQuant(bit_num_t, param_node, param_value, primitive_c); + status = TryQuant(bit_num_t, param_node, param_value, primitive); if (status != RET_OK) { MS_LOG(ERROR) << "TryQuant failed."; return RET_ERROR; @@ -673,13 +672,12 @@ STATUS WeightQuantizer::DoMixedQuant(FuncGraphPtr func_graph) { STATUS WeightQuantizer::DoFixedQuant(FuncGraphPtr func_graph) { MS_ASSERT(func_graph != nullptr); for (auto &cnode : func_graph->GetOrderedCnodes()) { - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - if (primitive_c == nullptr) { - MS_LOG(DEBUG) << cnode->fullname_with_scope() << " : primitive_c is nullptr"; + auto primitive = GetValueNode<PrimitivePtr>(cnode->input(0)); + if (primitive == nullptr) { + MS_LOG(DEBUG) << cnode->fullname_with_scope() << " : primitive is nullptr"; continue; } auto op_name = cnode->fullname_with_scope(); - auto op_type = (schema::PrimitiveType)primitive_c->Type(); if (quant_strategy_->CanConvOpQuantized(cnode)) { auto status = DoConvQuantize(cnode); @@ -693,20 +691,20 @@ STATUS WeightQuantizer::DoFixedQuant(FuncGraphPtr func_graph) { MS_LOG(ERROR) << "DoMulQuantize error"; return RET_ERROR; } - } else if (op_type == schema::PrimitiveType_Lstm) { + } else if (opt::CheckPrimitiveType(cnode, prim::kPrimLstm)) { auto status = DoLstmQuantize(cnode); if (status != RET_OK) { MS_LOG(ERROR) << "DoLstmQuantize error"; return RET_ERROR; } - } else if (op_type == schema::PrimitiveType_Gather) { + } else if (opt::CheckPrimitiveType(cnode, prim::kPrimGather)) { auto status = DoGatherQuantize(cnode); if (status != RET_OK) { MS_LOG(ERROR) << "DoGatherQuantize error"; return RET_ERROR; } } else { - MS_LOG(DEBUG) << op_name << " of type: " << schema::EnumNamePrimitiveType(op_type) << " no need quant"; + MS_LOG(DEBUG) << op_name << " of type: " << primitive->name() << " no need quant"; } } return RET_OK; diff --git a/mindspore/lite/tools/converter/quantizer/weight_quantizer.h b/mindspore/lite/tools/converter/quantizer/weight_quantizer.h index 791c3c9bc1..bc3f0d27e8 100644 --- a/mindspore/lite/tools/converter/quantizer/weight_quantizer.h +++ b/mindspore/lite/tools/converter/quantizer/weight_quantizer.h @@ -46,8 +46,7 @@ class WeightQuantizer : public Quantizer { STATUS DoLstmQuantize(CNodePtr cnode); STATUS DoGatherQuantize(CNodePtr cnode); - STATUS ProcessLstmWeightByIndex(const CNodePtr &cnode, const std::shared_ptr<PrimitiveC> &primitive_c, - const int &index); + STATUS ProcessLstmWeightByIndex(const CNodePtr &cnode, const PrimitivePtr &primitive, const int &index); int quant_max_{127}; int quant_min_{-128}; @@ -63,7 +62,7 @@ class WeightQuantizer : public Quantizer { std::vector<std::unordered_map<std::string, mindspore::tensor::MSTensor *>> fp32_output_tensors_; STATUS DoMixedQuant(FuncGraphPtr); - STATUS SetAbstract(ParamValueLitePtr param_value, ParameterPtr param_node, std::shared_ptr<PrimitiveC> primitive_c); + STATUS SetAbstract(ParamValueLitePtr param_value, ParameterPtr param_node, const PrimitivePtr &primitive); STATUS DoFixedQuant(FuncGraphPtr); STATUS RunFp32Graph(FuncGraphPtr); @@ -72,7 +71,7 @@ class WeightQuantizer : public Quantizer { STATUS GetParamNodeAndValue(const std::shared_ptr<AnfNode> &input_node, const std::string &op_name, ParameterPtr *param_node, ParamValueLitePtr *param_value); STATUS TryQuant(const int &bit_num_t, const ParameterPtr &param_node, const ParamValueLitePtr &param_value, - const std::shared_ptr<PrimitiveC> &primitive_c); + const PrimitivePtr &primitive); STATUS DoQuantSearch(const FuncGraphPtr &func_graph); }; } // namespace mindspore::lite::quant diff --git a/mindspore/lite/tools/cropper/build_cropper_config.sh b/mindspore/lite/tools/cropper/build_cropper_config.sh index 35a8c1dcac..8b1caeb7e0 100644 --- a/mindspore/lite/tools/cropper/build_cropper_config.sh +++ b/mindspore/lite/tools/cropper/build_cropper_config.sh @@ -105,7 +105,6 @@ getCommonFile() { while IFS='' read -r line; do runtime_files_h+=("$line"); done < <(ls ${MINDSPORE_HOME}/mindspore/lite/src/runtime/*.h) others_files_h=( "${MINDSPORE_HOME}"/mindspore/lite/src/populate/populate_register.h - "${MINDSPORE_HOME}"/mindspore/lite/src/ops/primitive_c.h "${MINDSPORE_HOME}"/mindspore/lite/nnacl/nnacl_utils.h "${MINDSPORE_HOME}"/mindspore/lite/nnacl/pack.h "${MINDSPORE_HOME}"/mindspore/lite/src/runtime/kernel/arm/fp16/common_fp16.h @@ -131,7 +130,6 @@ getCommonFile() { assembly_files=() while IFS='' read -r line; do assembly_files+=("$line"); done < <(ls ${MINDSPORE_HOME}/mindspore/lite/nnacl/assembly/*/*.S) others_files_c=( - "${MINDSPORE_HOME}"/mindspore/lite/src/ops/primitive_c.cc "${MINDSPORE_HOME}"/mindspore/lite/nnacl/nnacl_utils.c "${MINDSPORE_HOME}"/mindspore/lite/src/runtime/kernel/arm/fp16/common_fp16.cc "${MINDSPORE_HOME}"/mindspore/lite/src/ops/populate/arithmetic_populate.cc diff --git a/mindspore/lite/tools/optimizer/common/gllo_utils.cc b/mindspore/lite/tools/optimizer/common/gllo_utils.cc index c1d18337a6..2cadc425fb 100644 --- a/mindspore/lite/tools/optimizer/common/gllo_utils.cc +++ b/mindspore/lite/tools/optimizer/common/gllo_utils.cc @@ -20,7 +20,7 @@ #include <unordered_map> #include <functional> #include <string> -#include "src/ops/primitive_c.h" +#include "ops/fusion/conv2d_fusion.h" #include "src/common/common.h" #include "frontend/operator/ops.h" #include "backend/optimizer/common/helper.h" @@ -51,10 +51,10 @@ bool IsRealKernel(const AnfNodePtr &node) { auto input = cnode->inputs()[0]; bool is_virtual_node = IsPrimitive(input, prim::kPrimImageSummary) || IsPrimitive(input, prim::kPrimScalarSummary) || IsPrimitive(input, prim::kPrimTensorSummary) || - IsPrimitive(input, prim::kPrimHistogramSummary) || IsPrimitive(input, prim::kPrimMakeTuple) || + IsPrimitive(input, prim::kPrimHistogramSummary) || IsPrimitive(input, kPrimMakeTuple) || IsPrimitive(input, prim::kPrimStateSetItem) || IsPrimitive(input, prim::kPrimDepend) || IsPrimitive(input, prim::kPrimTupleGetItem) || IsPrimitive(input, prim::kPrimControlDepend) || - IsPrimitive(input, prim::kPrimReturn) || IsPrimitive(input, prim::kPrimPartial); + IsPrimitive(input, kPrimReturn) || IsPrimitive(input, prim::kPrimPartial); return !is_virtual_node; } @@ -136,20 +136,76 @@ bool CheckInputs(const CNodePtr &cnode) { return true; } +std::vector<int> CastToInt(const ValuePtr &value) { + if (value == nullptr) { + MS_LOG(WARNING) << "valueptr is nullptr."; + return {}; + } + std::vector<int> cur_value = {}; + if (utils::isa<ValueSequeuePtr>(value)) { + if (!value->cast<ValueSequeuePtr>()->value().empty()) { + if (value->cast<ValueSequeuePtr>()->value().front()->type()->number_type() == kNumberTypeInt64) { + auto origin_value = GetValue<std::vector<int64_t>>(value); + for (size_t index = 0; index < origin_value.size(); ++index) { + cur_value.push_back(static_cast<int>(origin_value[index])); + } + } else { + cur_value = GetValue<std::vector<int>>(value); + } + } + } else { + if (value->type()->number_type() == kNumberTypeInt64) { + cur_value.push_back(static_cast<int>(GetValue<int64_t>(value))); + } else { + cur_value.push_back(GetValue<int>(value)); + } + } + return cur_value; +} + +std::vector<std::vector<int>> CastToVec2DInt(const ValuePtr &value) { + if (value == nullptr) { + MS_LOG(WARNING) << "valueptr is nullptr."; + return {}; + } + + std::vector<std::vector<int>> result_value; + if (utils::isa<ValueSequeuePtr>(value)) { + if (value->cast<ValueSequeuePtr>() + ->value() + .front() + ->cast<ValueSequeuePtr>() + ->value() + .front() + ->type() + ->number_type() == kNumberTypeInt64) { + auto origin_value = GetValue<std::vector<std::vector<int64_t>>>(value); + for (size_t i = 0; i < origin_value.size(); ++i) { + std::vector<int> cur_value; + for (size_t j = 0; j < origin_value.at(i).size(); ++j) { + cur_value.push_back(static_cast<int>(origin_value[i][j])); + } + result_value.push_back(cur_value); + } + } else { + result_value = GetValue<std::vector<std::vector<int>>>(value); + } + } + return result_value; +} + bool CheckPrimitiveType(const AnfNodePtr &node, const PrimitivePtr &primitive_type) { if (node == nullptr) { lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR); return false; } - if (!node->isa<CNode>()) { - return false; - } - auto cnode = node->cast<CNodePtr>(); - if (cnode == nullptr) { - lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR); - return false; + if (node->isa<CNode>()) { + auto cnode = node->cast<CNodePtr>(); + return IsPrimitive(cnode->input(kAnfPrimitiveIndex), primitive_type); + } else if (node->isa<ValueNode>()) { + return IsPrimitive(node, primitive_type); } - return IsPrimitive(cnode->input(kAnfPrimitiveIndex), primitive_type); + return false; } bool AnfEqualPrimitive(AnfNodePtr a_node, AnfNodePtr b_node) { @@ -173,7 +229,7 @@ bool AnfEqualPrimitive(AnfNodePtr a_node, AnfNodePtr b_node) { lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR); return false; } - return a_prim->cast<PrimitiveCPtr>()->Type() == b_prim->cast<PrimitiveCPtr>()->Type(); + return a_prim->name() == b_prim->name(); } bool AnfEqualValueNode(AnfNodePtr a_node, AnfNodePtr b_node) { @@ -192,9 +248,9 @@ bool AnfEqualValueNode(AnfNodePtr a_node, AnfNodePtr b_node) { return false; } - if (utils::isa<lite::PrimitiveC>(a_value_ptr) && utils::isa<lite::PrimitiveC>(b_value_ptr)) { - auto a_obj = (lite::PrimitiveC *)(a_value_ptr.get()); - auto b_obj = (lite::PrimitiveC *)(b_value_ptr.get()); + if (utils::isa<ops::PrimitiveC>(a_value_ptr) && utils::isa<ops::PrimitiveC>(b_value_ptr)) { + auto a_obj = (ops::PrimitiveC *)(a_value_ptr.get()); + auto b_obj = (ops::PrimitiveC *)(b_value_ptr.get()); return (*a_obj) == (*b_obj); } else { return (*a_value_ptr) == (*b_value_ptr); @@ -216,10 +272,10 @@ bool AnfEqual(const BaseRef &a, const BaseRef &b) { return AnfEqualValueNode(a_node, b_node); } } - if (a.m_ptr->isa<lite::PrimitiveC>() && b.m_ptr->isa<lite::PrimitiveC>()) { + if (a.m_ptr->isa<mindspore::ops::PrimitiveC>() && b.m_ptr->isa<mindspore::ops::PrimitiveC>()) { auto a_value_node_ptr = a.m_ptr->cast<PrimitiveCPtr>(); auto b_value_node_ptr = b.m_ptr->cast<PrimitiveCPtr>(); - return a_value_node_ptr->Type() == b_value_node_ptr->Type(); + return a_value_node_ptr->name() == b_value_node_ptr->name(); } return a == b; @@ -276,7 +332,7 @@ bool IsRealCNodeKernel(const AnfNodePtr &node) { return false; } // return considered as a real node - if (CheckPrimitiveType(node, prim::kPrimReturn)) { + if (CheckPrimitiveType(node, kPrimReturn)) { return true; } return IsRealKernel(node); @@ -395,35 +451,6 @@ ParameterPtr AddNewBiasNode(float *bias_data, const FuncGraphPtr &func_graph, in return bias_parameter; } -schema::PrimitiveType GetCNodeType(const BaseRef &n) { - ValueNodePtr value_node; - if (utils::isa<CNodePtr>(n)) { - auto in = utils::cast<CNodePtr>(n); - value_node = in->input(0)->cast<ValueNodePtr>(); - } else if (utils::isa<ValueNodePtr>(n)) { - value_node = utils::cast<ValueNodePtr>(n); - } else { - MS_LOG(INFO) << "only value node or cnode has type"; - return schema::PrimitiveType_NONE; - } - if (value_node == nullptr) { - lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR); - return schema::PrimitiveType_NONE; - } - auto value = value_node->value(); - MS_ASSERT(value != nullptr); - if (utils::isa<PrimitiveCPtr>(value)) { - auto primitive = value->cast<PrimitiveCPtr>(); - MS_ASSERT(primitive != nullptr); - return (schema::PrimitiveType)primitive->Type(); - } else if (utils::isa<Primitive>(value)) { - auto primitive = value->cast<PrimitivePtr>(); - MS_ASSERT(primitive != nullptr); - MS_LOG(INFO) << "anf primitive node type:" << primitive->name(); - return schema::PrimitiveType_NONE; - } - return schema::PrimitiveType_NONE; -} ParamValueLitePtr GetLiteParamValue(const AnfNodePtr &node) { MS_ASSERT(node != nullptr); if (!utils::isa<ParameterPtr>(node)) { @@ -464,7 +491,7 @@ AbstractBasePtr GetCNodeInputAbstract(const CNodePtr &cnode, size_t index) { abstract = parameter->abstract(); } else if (utils::isa<CNodePtr>(input)) { auto input_cnode = input->cast<CNodePtr>(); - if (GetCNodeType(input_cnode) == schema::PrimitiveType_TupleGetItem) { + if (CheckPrimitiveType(input_cnode, prim::kPrimTupleGetItem)) { auto tuple_inputs = input_cnode->inputs(); MS_ASSERT(tuple_inputs.size() == kTupleGetItemInputSize); auto get_item_input_cnode = tuple_inputs.at(1); @@ -504,34 +531,45 @@ bool IsParamNode(const BaseRef &n) { } bool IsConvNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_Conv2D || type == schema::PrimitiveType_DepthwiseConv2D || - type == schema::PrimitiveType_DeConv2D; + if (utils::isa<AnfNodePtr>(n)) { + auto anf_node = utils::cast<AnfNodePtr>(n); + PrimitivePtr prim; + if (utils::isa<CNodePtr>(anf_node)) { + prim = GetValueNode<PrimitivePtr>(anf_node->cast<CNodePtr>()->input(0)); + } + if (utils::isa<ValueNodePtr>(anf_node)) { + prim = GetValueNode<PrimitivePtr>(anf_node); + } + if (prim == nullptr) { + return false; + } + bool is_depth_wise = + prim->GetAttr(ops::kIsDepthWise) != nullptr && GetValue<bool>(prim->GetAttr(ops::kIsDepthWise)); + return CheckPrimitiveType(anf_node, prim::kPrimConv2DFusion) || + (CheckPrimitiveType(anf_node, prim::kPrimConv2dTransposeFusion) && !is_depth_wise); } return false; } bool IsPoolingNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_Pooling; + if (utils::isa<AnfNodePtr>(n)) { + auto anf_node = utils::cast<AnfNodePtr>(n); + return CheckPrimitiveType(anf_node, prim::kPrimAvgPoolFusion) || + CheckPrimitiveType(anf_node, prim::kPrimMaxPoolFusion); } return false; } bool IsActivationNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_Activation; + if (utils::isa<AnfNodePtr>(n)) { + return CheckPrimitiveType(utils::cast<AnfNodePtr>(n), prim::kPrimActivation); } return false; } bool IsQuantNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_QuantDTypeCast; + if (utils::isa<AnfNodePtr>(n)) { + return CheckPrimitiveType(utils::cast<AnfNodePtr>(n), prim::kPrimQuantDTypeCast); } return false; } @@ -627,7 +665,7 @@ size_t GetTupleGetItemOutIndex(const CNodePtr &tuple_get_item) { MS_ASSERT(output_index_value_node != nullptr); auto value_node = output_index_value_node->cast<ValueNodePtr>(); MS_ASSERT(value_node != nullptr); - return IntToSize(lite::CastToInt(value_node->value()).front()); + return IntToSize(CastToInt(value_node->value()).front()); } std::shared_ptr<std::vector<std::pair<AnfNodePtr, int>>> GetRealNodeUsedListByOutputIdx(const FuncGraphPtr &graph, const AnfNodePtr &node, @@ -645,9 +683,9 @@ std::shared_ptr<std::vector<std::pair<AnfNodePtr, int>>> GetRealNodeUsedListByOu auto output_info_list = iter->second; for (const auto &output_info : output_info_list) { size_t used_output_index; - if (GetCNodeType(output_info.first) == schema::PrimitiveType_TupleGetItem) { + if (CheckPrimitiveType(output_info.first, prim::kPrimTupleGetItem)) { used_output_index = GetTupleGetItemOutIndex(utils::cast<CNodePtr>(output_info.first)); - } else if (GetCNodeType(node) == schema::PrimitiveType_TupleGetItem) { + } else if (CheckPrimitiveType(node, prim::kPrimTupleGetItem)) { used_output_index = output_index; } else { if (output_index != 0) { @@ -1088,50 +1126,45 @@ STATUS TransFilterFormat(const ParamValueLitePtr &tensor, schema::Format dst_for MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(static_cast<schema::Format>(src_format)) << " to " << EnumNameFormat(dst_format); return RET_ERROR; - } else { - status = TransFilterFormatWithType(tensor, data_type, - khwc_trans_maps.find(static_cast<const schema::Format>(src_format))->second); } + status = TransFilterFormatWithType(tensor, data_type, + khwc_trans_maps.find(static_cast<const schema::Format>(src_format))->second); } break; case schema::Format::Format_HWCK: { if (hwck_trans_maps.find(static_cast<const schema::Format>(src_format)) == hwck_trans_maps.end()) { MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(static_cast<schema::Format>(src_format)) << " to " << EnumNameFormat(dst_format); return RET_ERROR; - } else { - status = TransFilterFormatWithType(tensor, data_type, - hwck_trans_maps.find(static_cast<const schema::Format>(src_format))->second); } + status = TransFilterFormatWithType(tensor, data_type, + hwck_trans_maps.find(static_cast<const schema::Format>(src_format))->second); } break; case schema::Format::Format_KCHW: { if (kchw_trans_maps.find(static_cast<const schema::Format>(src_format)) == kchw_trans_maps.end()) { MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(static_cast<schema::Format>(src_format)) << " to " << EnumNameFormat(dst_format); return RET_ERROR; - } else { - status = TransFilterFormatWithType(tensor, data_type, - kchw_trans_maps.find(static_cast<const schema::Format>(src_format))->second); } + status = TransFilterFormatWithType(tensor, data_type, + kchw_trans_maps.find(static_cast<const schema::Format>(src_format))->second); } break; case schema::Format::Format_CKHW: { if (ckhw_trans_maps.find(static_cast<const schema::Format>(src_format)) == ckhw_trans_maps.end()) { MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(static_cast<schema::Format>(src_format)) << " to " << EnumNameFormat(dst_format); return RET_ERROR; - } else { - status = TransFilterFormatWithType(tensor, data_type, - ckhw_trans_maps.find(static_cast<const schema::Format>(src_format))->second); } + status = TransFilterFormatWithType(tensor, data_type, + ckhw_trans_maps.find(static_cast<const schema::Format>(src_format))->second); } break; case schema::Format::Format_CHWK: { if (chwk_trans_maps.find(static_cast<const schema::Format>(src_format)) == chwk_trans_maps.end()) { MS_LOG(ERROR) << "Unsupported transform from " << EnumNameFormat(static_cast<schema::Format>(src_format)) << " to " << EnumNameFormat(dst_format); return RET_ERROR; - } else { - status = TransFilterFormatWithType(tensor, data_type, - chwk_trans_maps.find(static_cast<const schema::Format>(src_format))->second); } + status = TransFilterFormatWithType(tensor, data_type, + chwk_trans_maps.find(static_cast<const schema::Format>(src_format))->second); } break; default: MS_LOG(ERROR) << "Unsupported transform from " << src_format << " to " << EnumNameFormat(dst_format); @@ -1144,6 +1177,71 @@ STATUS TransFilterFormat(const ParamValueLitePtr &tensor, schema::Format dst_for return RET_OK; } +ParameterPtr BuildParameterNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, + const ParamValueLitePtr &param_value) { + MS_ASSERT(func_graph != nullptr); + MS_ASSERT(cnode != nullptr); + MS_ASSERT(param_value != nullptr); + auto param_node = func_graph->add_parameter(); + auto shape = param_value->tensor_shape(); + std::vector<int64_t> shape_vector; + std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), + [](const int &val) { return static_cast<int64_t>(val); }); + auto data_type = param_value->tensor_type() == kNumberTypeInt64 ? kNumberTypeInt32 : param_value->tensor_type(); + auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(TypeIdToType(data_type), shape_vector); + param_node->set_abstract(abstract_tensor); + if (utils::isa<CNodePtr>(node)) { + param_node->set_name(node->cast<CNodePtr>()->fullname_with_scope()); + } else if (utils::isa<ParameterPtr>(node)) { + param_node->set_name(node->cast<ParameterPtr>()->name()); + } + ParamValueLitePtr param_value_new = std::make_shared<ParamValueLite>(); + param_value_new->set_format(param_value->format()); + param_value_new->set_tensor_shape(shape); + size_t data_count = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>()); + if (param_value->tensor_size() == 0) { + if (param_value->tensor_type() == kNumberTypeInt64) { + param_value_new->set_tensor_type(kNumberTypeInt32); + } + param_node->set_default_param(param_value_new); + return param_node; + } + if (param_value->tensor_type() == kNumberTypeInt64) { + param_value_new->set_tensor_type(kNumberTypeInt32); + auto *tensor_data = new (std::nothrow) int[data_count]; + if (tensor_data == nullptr) { + MS_LOG(ERROR) << "new data failed"; + return nullptr; + } + auto *origin_data = reinterpret_cast<int64_t *>(param_value->tensor_addr()); + for (size_t i = 0; i < data_count; ++i) { + if (origin_data[i] > static_cast<int64_t>(INT32_MAX) || origin_data[i] < static_cast<int64_t>(INT32_MIN)) { + MS_LOG(WARNING) << "int64 data " << origin_data[i] << "too big to fit into int32"; + tensor_data[i] = origin_data[i] > 0 ? INT32_MAX : INT32_MIN; + } else { + tensor_data[i] = static_cast<int>(origin_data[i]); + } + } + param_value_new->SetTensorData(tensor_data, data_count * sizeof(int32_t)); + } else { + param_value_new->set_tensor_type(param_value->tensor_type()); + char *tensor_data = new (std::nothrow) char[param_value->tensor_size()]; + if (tensor_data == nullptr) { + MS_LOG(ERROR) << "new data failed"; + return nullptr; + } + if (memcpy_s(tensor_data, param_value->tensor_size(), param_value->tensor_addr(), param_value->tensor_size()) != + lite::RET_OK) { + MS_LOG(ERROR) << "memcpy data failed."; + delete[] tensor_data; + return nullptr; + } + param_value_new->SetTensorData(tensor_data, param_value->tensor_size()); + } + param_node->set_default_param(param_value_new); + return param_node; +} + ParameterPtr BuildIntValueParameterNode(const FuncGraphPtr &func_graph, const int32_t &data, const std::string &node_name) { MS_ASSERT(func_graph != nullptr); @@ -1184,13 +1282,16 @@ ParameterPtr BuildIntVecParameterNode(const FuncGraphPtr &func_graph, const std: std::vector<int32_t> shape{static_cast<int32_t>(data.size())}; param_value->set_tensor_shape(shape); param_value->set_tensor_type(kNumberTypeInt32); - char *default_data = new (std::nothrow) char[data.size() * sizeof(int32_t)]; - if (memcpy_s(default_data, data.size() * sizeof(int32_t), data.data(), data.size() * sizeof(int32_t)) != EOK) { - MS_LOG(ERROR) << "memcpy data failed."; - delete[] default_data; - return nullptr; + + if (!data.empty()) { + char *default_data = new (std::nothrow) char[data.size() * sizeof(int32_t)]; + if (memcpy_s(default_data, data.size() * sizeof(int32_t), data.data(), data.size() * sizeof(int32_t)) != EOK) { + MS_LOG(ERROR) << "memcpy data failed."; + delete[] default_data; + return nullptr; + } + param_value->SetTensorData(default_data, data.size() * sizeof(int32_t)); } - param_value->SetTensorData(default_data, data.size() * sizeof(int32_t)); param_node->set_default_param(param_value); return param_node; } diff --git a/mindspore/lite/tools/optimizer/common/gllo_utils.h b/mindspore/lite/tools/optimizer/common/gllo_utils.h index bb32ac402a..cbcc11557f 100644 --- a/mindspore/lite/tools/optimizer/common/gllo_utils.h +++ b/mindspore/lite/tools/optimizer/common/gllo_utils.h @@ -18,9 +18,9 @@ #define MINDSPORE_LITE_SRC_PASS_COMMON_GLLO_UTILS_H_ #include <memory> -#include <vector> #include <string> -#include "src/ops/primitive_c.h" +#include <vector> +#include "ops/primitive_c.h" #include "ir/anf.h" #include "ir/func_graph.h" #include "src/common/utils.h" @@ -29,12 +29,19 @@ #include "src/param_value_lite.h" #include "tools/converter/converter_context.h" -using PrimitiveCPtr = std::shared_ptr<mindspore::lite::PrimitiveC>; +using PrimitiveCPtr = std::shared_ptr<mindspore::ops::PrimitiveC>; using mindspore::lite::RET_ERROR; using mindspore::lite::RET_OK; using mindspore::lite::STATUS; namespace mindspore { namespace opt { +inline const PrimitivePtr kPrimReturn = std::make_shared<Primitive>("Return"); +inline const PrimitivePtr kPrimMakeTuple = std::make_shared<Primitive>("MakeTuple"); +inline const PrimitivePtr kPrimIdentity = std::make_shared<Primitive>("Identity"); +std::vector<int> CastToInt(const ValuePtr &value); + +std::vector<std::vector<int>> CastToVec2DInt(const ValuePtr &value); + bool CheckPrimitiveType(const AnfNodePtr &node, const PrimitivePtr &primitive_type); bool IsRealCNodeKernel(const AnfNodePtr &node); @@ -60,8 +67,6 @@ int CheckLeastInputSize(const CNodePtr &node, int size); ParameterPtr AddNewBiasNode(float *bias_data, const FuncGraphPtr &func_graph, int kernel_num, const ParamValueLitePtr &weight_tensor); -schema::PrimitiveType GetCNodeType(const BaseRef &node); - bool IsParamNode(const BaseRef &n); bool IsConvNode(const BaseRef &n); @@ -125,6 +130,9 @@ static lite::STATUS TransFilterFormat(const ParamValueLitePtr &tensor, kTransFil STATUS TransFilterFormat(const ParamValueLitePtr &tensor, schema::Format dst_format); +ParameterPtr BuildParameterNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, + const ParamValueLitePtr &param_value); + ParameterPtr BuildIntValueParameterNode(const FuncGraphPtr &func_graph, const int32_t &data, const std::string &node_name); @@ -136,7 +144,6 @@ ParameterPtr BuildIntVec2DParameterNode(const FuncGraphPtr &func_graph, const st ParameterPtr BuildFloatValueParameterNode(const FuncGraphPtr &func_graph, const float &data, const std::string &node_name); - } // namespace opt } // namespace mindspore #endif // MINDSPORE_LITE_SRC_PASS_COMMON_GLLO_UTILS_H_ diff --git a/mindspore/lite/tools/optimizer/common/node_pass_extends.cc b/mindspore/lite/tools/optimizer/common/node_pass_extends.cc index af2adaf107..be1c8baa77 100644 --- a/mindspore/lite/tools/optimizer/common/node_pass_extends.cc +++ b/mindspore/lite/tools/optimizer/common/node_pass_extends.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,11 +14,9 @@ * limitations under the License. */ #include "backend/optimizer/common/node_pass.h" - #include <unordered_set> #include <deque> #include <algorithm> - #include "ir/anf.h" #include "ir/func_graph.h" #include "ir/manager.h" diff --git a/mindspore/lite/tools/optimizer/common/pass_manager_extends.cc b/mindspore/lite/tools/optimizer/common/pass_manager_extends.cc index 0edabe31eb..326cfd732b 100644 --- a/mindspore/lite/tools/optimizer/common/pass_manager_extends.cc +++ b/mindspore/lite/tools/optimizer/common/pass_manager_extends.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,13 +14,11 @@ * limitations under the License. */ #include "backend/optimizer/common/pass_manager.h" - #include <sys/time.h> #include <unordered_set> #include <deque> #include <string> #include <algorithm> - #include "ir/anf.h" #include "ir/func_graph.h" #include "ir/manager.h" diff --git a/mindspore/lite/tools/optimizer/fusion/batchmatmul_fusion.cc b/mindspore/lite/tools/optimizer/fusion/batchmatmul_fusion.cc index 202b73e267..d89cf59f60 100644 --- a/mindspore/lite/tools/optimizer/fusion/batchmatmul_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/batchmatmul_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,26 +17,25 @@ #include <memory> #include <vector> #include <algorithm> -#include "src/ops/primitive_c.h" -#include "src/param_value_lite.h" +#include "ops/mat_mul.h" #include "schema/inner/model_generated.h" +#include "src/param_value_lite.h" #include "utils/utils.h" +#include "tools/converter/quant_param_holder.h" #include "tools/optimizer/common/gllo_utils.h" #include "securec/include/securec.h" namespace mindspore::opt { namespace { bool IsStackNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_Stack; + if (utils::isa<AnfNodePtr>(n)) { + return CheckPrimitiveType(utils::cast<AnfNodePtr>(n), prim::kPrimStack); } return false; } bool IsFullConnectNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_FullConnection; + if (utils::isa<AnfNodePtr>(n)) { + return CheckPrimitiveType(utils::cast<AnfNodePtr>(n), prim::kPrimFullConnection); } return false; } @@ -60,7 +59,7 @@ void *GetInputAddr(const AnfNodePtr &node, size_t input_index) { } return param_value->tensor_addr(); } - MS_LOG(ERROR) << "input not paramter"; + MS_LOG(ERROR) << "input not parameter"; return nullptr; } STATUS GetRightMatmulInputParamter(const CNodePtr &stack_node, const ParameterPtr &rmatmul_input) { @@ -136,40 +135,56 @@ const AnfNodePtr BatchMatMulFusion::Process(const FuncGraphPtr &func_graph, cons MS_ASSERT(fullconnect_cnode->inputs().size() == 3); auto left_slice_node = fullconnect_cnode->input(1); auto left_slice_cnode = left_slice_node->cast<CNodePtr>(); - if (GetCNodeType(left_slice_cnode) != schema::PrimitiveType_Slice) { + if (!CheckPrimitiveType(left_slice_cnode, prim::kPrimSliceFusion)) { return nullptr; } auto left_matmul_input = left_slice_cnode->input(1); auto right_reshape_node = fullconnect_cnode->input(2); - auto matmul_primitive = std::make_unique<schema::PrimitiveT>(); - std::unique_ptr<schema::MatMulT> attr = std::make_unique<schema::MatMulT>(); - matmul_primitive->value.type = schema::PrimitiveType_MatMul; - matmul_primitive->value.value = attr.release(); - auto matmul_cvalue = lite::PrimitiveC::Create(matmul_primitive.release()); + auto matmul_cvalue = new (std::nothrow) mindspore::ops::MatMul(); + if (matmul_cvalue == nullptr) { + MS_LOG(ERROR) << "new MatMul failed"; + return nullptr; + } // get matmul quantParams std::vector<schema::QuantParamT> jointed_quant_params; for (size_t i = 1; i < stack_cnode->inputs().size(); i++) { auto fullconnect_node2 = stack_cnode->input(i)->cast<CNodePtr>(); - auto fc_prim = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(fullconnect_node2->input(0)); - auto fc_input_quantParams = fc_prim->input_quant_params(); + auto fc_prim = GetValueNode<PrimitiveCPtr>(fullconnect_node2->input(0)); + auto fc_input_quantParams_valueptr = fc_prim->GetAttr("quant_params"); + if (fc_input_quantParams_valueptr == nullptr) { + continue; + } + auto fc_input_quantParams_holder = fc_input_quantParams_valueptr->cast<lite::QuantParamHolderPtr>(); + if (fc_input_quantParams_holder == nullptr) { + MS_LOG(ERROR) << "quant param is invalid."; + return nullptr; + } + auto fc_input_quantParams = fc_input_quantParams_holder->input_quant_params(); if (fc_input_quantParams.size() > 1 && !fc_input_quantParams[1].empty()) { jointed_quant_params.push_back(fc_input_quantParams[1][0]); } } - auto fc_prim = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(fullconnect_cnode->input(0)); - auto rmatmul_quant_params = fc_prim->input_quant_params(); + auto quant_params_holder = std::make_shared<lite::QuantParamHolder>(); + auto fc_prim = GetValueNode<PrimitiveCPtr>(fullconnect_cnode->input(0)); + lite::QuantParamsVector rmatmul_quant_params; + auto rmatmul_quant_params_valueptr = fc_prim->GetAttr("quant_params"); + if (rmatmul_quant_params_valueptr != nullptr) { + auto rmatmul_quant_params_holder = rmatmul_quant_params_valueptr->cast<lite::QuantParamHolderPtr>(); + if (rmatmul_quant_params_holder == nullptr) { + MS_LOG(ERROR) << "quant param is invalid."; + return nullptr; + } + rmatmul_quant_params = rmatmul_quant_params_holder->input_quant_params(); + quant_params_holder->set_output_quant_params(rmatmul_quant_params_holder->output_quant_params()); + } rmatmul_quant_params.pop_back(); rmatmul_quant_params.pop_back(); // no bias quantParams rmatmul_quant_params.emplace_back(jointed_quant_params); - if (matmul_cvalue == nullptr) { - MS_LOG(ERROR) << "matmul_cvalue is nullptr."; - return nullptr; - } - matmul_cvalue->set_input_quant_params(rmatmul_quant_params); - matmul_cvalue->set_output_quant_params(fc_prim->output_quant_params()); - auto matmul_value_node = NewValueNode(std::shared_ptr<lite::PrimitiveC>(matmul_cvalue)); + quant_params_holder->set_input_quant_params(rmatmul_quant_params); + matmul_cvalue->AddAttr("quant_params", quant_params_holder); + auto matmul_value_node = NewValueNode(std::shared_ptr<ops::PrimitiveC>(matmul_cvalue)); std::vector<AnfNodePtr> matmul_inputs = {matmul_value_node, left_matmul_input}; // batchmatmul right node may be const @@ -179,12 +194,11 @@ const AnfNodePtr BatchMatMulFusion::Process(const FuncGraphPtr &func_graph, cons MS_LOG(ERROR) << "GetRightMatmulInputParamter failed"; return node; } - auto prim = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(matmul_value_node); - if (prim->primitiveT()->value.AsMatMul() == nullptr) { - MS_LOG(ERROR) << "prim->primitiveT()->value.AsMatMul() is nullptr."; - return nullptr; - } - prim->primitiveT()->value.AsMatMul()->transposeB = true; + auto prim = GetValueNode<PrimitiveCPtr>(matmul_value_node); + MS_ASSERT(prim != nullptr); + auto prim_matmul = prim->cast<std::shared_ptr<mindspore::ops::MatMul>>(); + MS_ASSERT(prim_matmul != nullptr); + prim_matmul->set_transpose_b(true); matmul_inputs.push_back(rmatmul_paramter); } else { auto right_reshape_cnode = right_reshape_node->cast<CNodePtr>(); diff --git a/mindspore/lite/tools/optimizer/fusion/batchmatmul_fusion.h b/mindspore/lite/tools/optimizer/fusion/batchmatmul_fusion.h index 9dff451261..fef190673d 100644 --- a/mindspore/lite/tools/optimizer/fusion/batchmatmul_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/batchmatmul_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/optimizer/fusion/bidirection_tf_gru_cell_fusion.cc b/mindspore/lite/tools/optimizer/fusion/bidirection_tf_gru_cell_fusion.cc index 5c09318901..399ec240e0 100644 --- a/mindspore/lite/tools/optimizer/fusion/bidirection_tf_gru_cell_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/bidirection_tf_gru_cell_fusion.cc @@ -16,7 +16,12 @@ #include "tools/optimizer/fusion/bidirection_tf_gru_cell_fusion.h" #include <memory> #include <functional> -#include "src/ops/primitive_c.h" +#include "ops/concat.h" +#include "ops/gru.h" +#include "ops/split.h" +#include "ops/squeeze.h" +#include "ops/stack.h" +#include "ops/transpose.h" #include "src/common/utils.h" #include "utils/utils.h" #include "tools/optimizer/common/gllo_utils.h" @@ -34,9 +39,10 @@ const auto &p1 = std::placeholders::_1; bool IsParameterNode(const BaseRef &n) { return utils::isa<ParameterPtr>(n); } -bool IsOpType(const BaseRef &n, const schema::PrimitiveType &type) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - return opt::GetCNodeType(n) == type; +bool IsOpType(const BaseRef &n, const PrimitivePtr &prim) { + if (utils::isa<AnfNodePtr>(n)) { + auto anf_node = utils::cast<AnfNodePtr>(n); + return CheckPrimitiveType(anf_node, prim); } return false; } @@ -65,75 +71,66 @@ BiDirectionTfGruCellFusion::BiDirectionTfGruCellFusion(const std::string &name, const BaseRef BiDirectionTfGruCellFusion::DefinePattern() const { // forward auto fw_max1 = - VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Reduce)), input_length_}); - auto fw_max2 = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Maximum)), + VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimReduceFusion)), input_length_}); + auto fw_max2 = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimMaximum)), std::make_shared<CondVar>(IsParameterNode), fw_max1}); - auto fw_shape = - VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Shape)), transpose_input_}); - auto fw_stride = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_StridedSlice)), - fw_shape, std::make_shared<SeqVar>()}); - auto fw_min = - VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Minimum)), fw_stride, fw_max2}); - - auto fw_reserve = - VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_TensorListReserve)), - std::make_shared<CondVar>(IsParameterNode), fw_stride}); - auto fw_from_tensor = - VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_TensorListFromTensor)), - transpose_input_, std::make_shared<CondVar>(IsParameterNode)}); - auto is_fw_while = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_While)); + auto fw_shape = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimShape)), transpose_input_}); + auto fw_stride = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimStridedSlice)), fw_shape, + std::make_shared<SeqVar>()}); + auto fw_min = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimMinimum)), fw_stride, fw_max2}); + + auto fw_reserve = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimTensorListReserve)), + std::make_shared<CondVar>(IsParameterNode), fw_stride}); + auto fw_from_tensor = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimTensorListFromTensor)), + transpose_input_, std::make_shared<CondVar>(IsParameterNode)}); + auto is_fw_while = std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimWhile)); auto fw_while = VectorRef({is_fw_while, fw_vars_[0], fw_vars_[1], std::make_shared<CondVar>(IsParameterNode), fw_stride, std::make_shared<CondVar>(IsParameterNode), fw_reserve, fw_init_state_, fw_min, fw_from_tensor, input_length_}); fw_while.insert(fw_while.end(), fw_vars_.begin() + 2, fw_vars_.end()); fw_while.emplace_back(std::make_shared<Var>()); - auto fw_get_item = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_TupleGetItem)), - fw_while, std::make_shared<Var>()}); - auto fw_stack = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_TensorListStack)), + auto fw_get_item = VectorRef( + {std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimTupleGetItem)), fw_while, std::make_shared<Var>()}); + auto fw_stack = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimTensorListStack)), fw_get_item, std::make_shared<CondVar>(IsParameterNode)}); - auto fw_out_trans = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Transpose)), - fw_stack, std::make_shared<Var>()}); + auto fw_out_trans = VectorRef( + {std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimTranspose)), fw_stack, std::make_shared<Var>()}); // backward - auto bw_reverse_seq = VectorRef( - {std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_ReverseSequence)), input_, input_length_}); + auto bw_reverse_seq = + VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimReverseSequence)), input_, input_length_}); auto bw_max1 = - VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Reduce)), input_length_}); - auto bw_max2 = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Maximum)), + VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimReduceFusion)), input_length_}); + auto bw_max2 = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimMaximum)), std::make_shared<CondVar>(IsParameterNode), bw_max1}); - auto bw_trans = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Transpose)), - bw_reverse_seq, std::make_shared<Var>()}); - auto bw_shape = - VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Shape)), bw_trans}); - auto bw_stride = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_StridedSlice)), - bw_shape, std::make_shared<SeqVar>()}); - auto bw_min = - VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Minimum)), bw_stride, bw_max2}); - auto bw_reserve = - VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_TensorListReserve)), - std::make_shared<CondVar>(IsParameterNode), bw_stride}); - auto bw_from_tensor = - VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_TensorListFromTensor)), bw_trans, - std::make_shared<CondVar>(IsParameterNode)}); - auto is_bw_while = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_While)); + auto bw_trans = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimTranspose)), bw_reverse_seq, + std::make_shared<Var>()}); + auto bw_shape = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimShape)), bw_trans}); + auto bw_stride = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimStridedSlice)), bw_shape, + std::make_shared<SeqVar>()}); + auto bw_min = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimMinimum)), bw_stride, bw_max2}); + auto bw_reserve = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimTensorListReserve)), + std::make_shared<CondVar>(IsParameterNode), bw_stride}); + auto bw_from_tensor = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimTensorListFromTensor)), + bw_trans, std::make_shared<CondVar>(IsParameterNode)}); + auto is_bw_while = std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimWhile)); auto bw_while = VectorRef({is_bw_while, bw_vars_[0], bw_vars_[1], std::make_shared<CondVar>(IsParameterNode), bw_stride, std::make_shared<CondVar>(IsParameterNode), bw_reserve, bw_init_state_, bw_min, bw_from_tensor, input_length_}); bw_while.insert(bw_while.end(), bw_vars_.begin() + 2, bw_vars_.end()); bw_while.emplace_back(std::make_shared<Var>()); - auto bw_get_item = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_TupleGetItem)), - bw_while, std::make_shared<Var>()}); - auto bw_stack = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_TensorListStack)), + auto bw_get_item = VectorRef( + {std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimTupleGetItem)), bw_while, std::make_shared<Var>()}); + auto bw_stack = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimTensorListStack)), bw_get_item, std::make_shared<CondVar>(IsParameterNode)}); - auto bw_out_trans = VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Transpose)), - bw_stack, std::make_shared<Var>()}); - auto bw_reverse1 = - VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_ReverseSequence)), bw_out_trans, - input_length_}); - - auto concat = VectorRef( - {std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Concat)), fw_out_trans, bw_reverse1}); + auto bw_out_trans = VectorRef( + {std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimTranspose)), bw_stack, std::make_shared<Var>()}); + auto bw_reverse1 = VectorRef( + {std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimReverseSequence)), bw_out_trans, input_length_}); + + auto concat = + VectorRef({std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimConcat)), fw_out_trans, bw_reverse1}); return concat; } @@ -142,10 +139,10 @@ AnfNodePtr BiDirectionTfGruCellFusion::GetCondGraphPattern(const PrimitiveVarMap auto is_parameter2 = std::make_shared<CondVar>(IsParameterNode); auto is_parameter3 = std::make_shared<CondVar>(IsParameterNode); auto is_parameter4 = std::make_shared<CondVar>(IsParameterNode); - auto is_less1 = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Less)); - auto is_less2 = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Less)); - auto is_logical_and = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_LogicalAnd)); - auto is_return = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Return)); + auto is_less1 = std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimLess)); + auto is_less2 = std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimLess)); + auto is_logical_and = std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimLogicalAnd)); + auto is_return = std::make_shared<CondVar>(std::bind(IsOpType, p1, kPrimReturn)); VectorRef less1_ref = VectorRef({is_less1, is_parameter1, is_parameter2}); VectorRef less2_ref = VectorRef({is_less2, is_parameter3, is_parameter4}); VectorRef logicaland_ref = VectorRef({is_logical_and, less1_ref, less2_ref}); @@ -195,13 +192,13 @@ AnfNodePtr BiDirectionTfGruCellFusion::GetBodyGraphPattern(const PrimitiveVarMap VectorRef select_hidden = VectorRef({std::make_shared<Var>("Switch"), greater_equal, placeholders[4], new_hidden}); - auto is_make_tuple = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_MakeTuple)); + auto is_make_tuple = std::make_shared<CondVar>(std::bind(IsOpType, p1, kPrimMakeTuple)); std::vector<BaseRef> outputs = {is_make_tuple, add1, placeholders[1], add, output, select_hidden, placeholders[5], placeholders[6], placeholders[7]}; outputs.insert(outputs.end(), placeholders.begin() + 8, placeholders.end()); VectorRef make_tuple_node = VectorRef(outputs); - auto is_return = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Return)); + auto is_return = std::make_shared<CondVar>(std::bind(IsOpType, p1, kPrimReturn)); VectorRef return_node = VectorRef({is_return, make_tuple_node}); VarPtr fg = std::make_shared<Var>("RootG"); @@ -417,13 +414,9 @@ CNodePtr BiDirectionTfGruCellFusion::GetStackedHiddenState(const FuncGraphPtr &f MS_ASSERT(func_graph != nullptr); MS_ASSERT(fw_init_state != nullptr); MS_ASSERT(bw_init_state != nullptr); - auto stack_primitive = std::make_unique<schema::PrimitiveT>(); - std::unique_ptr<schema::StackT> attr = std::make_unique<schema::StackT>(); - attr->axis = 0; - stack_primitive->value.type = schema::PrimitiveType_Stack; - stack_primitive->value.value = attr.release(); - auto stack_cvalue = lite::PrimitiveC::Create(stack_primitive.release()); - auto value_node = NewValueNode(std::shared_ptr<lite::PrimitiveC>(stack_cvalue)); + auto stack_prim = std::make_shared<ops::Stack>(); + stack_prim->set_axis(0); + auto value_node = NewValueNode(stack_prim); std::vector<AnfNodePtr> new_node_inputs = {value_node, fw_init_state, bw_init_state}; auto new_node = func_graph->NewCNode(new_node_inputs); new_node->set_abstract(fw_init_state->abstract()->Clone()); @@ -440,13 +433,9 @@ CNodePtr BiDirectionTfGruCellFusion::CreateBiDirectionGruNode(const FuncGraphPtr MS_ASSERT(equiv != nullptr); MS_ASSERT(fw_body_equiv != nullptr); MS_ASSERT(bw_body_equiv != nullptr); - auto gru_primitive = std::make_unique<schema::PrimitiveT>(); - std::unique_ptr<schema::GruT> attr = std::make_unique<schema::GruT>(); - attr->bidirection = true; - gru_primitive->value.type = schema::PrimitiveType_Gru; - gru_primitive->value.value = attr.release(); - auto gru_cvalue = lite::PrimitiveC::Create(gru_primitive.release()); - auto value_node = NewValueNode(std::shared_ptr<lite::PrimitiveC>(gru_cvalue)); + auto gru_prim = std::make_shared<ops::GRU>(); + gru_prim->set_bidirectional(true); + auto value_node = NewValueNode(gru_prim); auto fw_gate_kernel = utils::cast<AnfNodePtr>((*equiv)[fw_vars_[2]]); MS_ASSERT(fw_gate_kernel != nullptr); @@ -537,14 +526,10 @@ CNodePtr BiDirectionTfGruCellFusion::GetPostProcessNode(const FuncGraphPtr &func const std::string base_name) const { MS_ASSERT(func_graph != nullptr); MS_ASSERT(gru_output != nullptr); - auto split_primitive = std::make_unique<schema::PrimitiveT>(); - std::unique_ptr<schema::SplitT> split_attr = std::make_unique<schema::SplitT>(); - split_attr->numberSplit = 2; - split_attr->splitDim = 1; - split_primitive->value.type = schema::PrimitiveType_Split; - split_primitive->value.value = split_attr.release(); - auto split_cvalue = lite::PrimitiveC::Create(split_primitive.release()); - auto split_value_node = NewValueNode(std::shared_ptr<lite::PrimitiveC>(split_cvalue)); + auto split_prim = std::make_shared<ops::Split>(); + split_prim->set_output_num(2); + split_prim->set_axis(1); + auto split_value_node = NewValueNode(split_prim); std::vector<AnfNodePtr> new_node_inputs = {split_value_node, gru_output}; auto split_new_node = func_graph->NewCNode(new_node_inputs); split_new_node->set_fullname_with_scope("split_" + base_name); @@ -561,39 +546,25 @@ CNodePtr BiDirectionTfGruCellFusion::GetPostProcessNode(const FuncGraphPtr &func return nullptr; } - auto concat_primitive = std::make_unique<schema::PrimitiveT>(); - std::unique_ptr<schema::ConcatT> concat_attr = std::make_unique<schema::ConcatT>(); - concat_attr->axis = 3; - concat_primitive->value.type = schema::PrimitiveType_Concat; - concat_primitive->value.value = concat_attr.release(); - auto concat_cvalue = lite::PrimitiveC::Create(concat_primitive.release()); - auto concat_value_node = NewValueNode(std::shared_ptr<lite::PrimitiveC>(concat_cvalue)); + auto concat_prim = std::make_shared<ops::Concat>(); + concat_prim->set_axis(3); + auto concat_value_node = NewValueNode(concat_prim); std::vector<AnfNodePtr> concat_new_node_inputs = {concat_value_node, split_out1, split_out2}; auto concat_new_node = func_graph->NewCNode(concat_new_node_inputs); concat_new_node->set_fullname_with_scope("concat_" + base_name); concat_new_node->set_abstract(gru_output->abstract()->Clone()); - auto squeeze_primitive = std::make_unique<schema::PrimitiveT>(); - std::unique_ptr<schema::SqueezeT> squeeze_attr = std::make_unique<schema::SqueezeT>(); - squeeze_attr->axis = std::vector<int>{1}; - squeeze_primitive->value.type = schema::PrimitiveType_Squeeze; - squeeze_primitive->value.value = squeeze_attr.release(); - auto squeeze_cvalue = lite::PrimitiveC::Create(squeeze_primitive.release()); - auto squeeze_value_node = NewValueNode(std::shared_ptr<lite::PrimitiveC>(squeeze_cvalue)); + auto squeeze_prim = std::make_shared<ops::Squeeze>(); + squeeze_prim->set_axis(std::vector<int64_t>{1}); + auto squeeze_value_node = NewValueNode(squeeze_prim); std::vector<AnfNodePtr> squeeze_new_node_inputs = {squeeze_value_node, concat_new_node}; auto squeeze_new_node = func_graph->NewCNode(squeeze_new_node_inputs); squeeze_new_node->set_fullname_with_scope("squeeze_" + base_name); squeeze_new_node->set_abstract(gru_output->abstract()->Clone()); - auto transpose_primitive = std::make_unique<schema::PrimitiveT>(); - std::unique_ptr<schema::TransposeT> transpose_attr = std::make_unique<schema::TransposeT>(); - transpose_attr->perm = std::vector<int>{1, 0, 2}; - transpose_primitive->value.type = schema::PrimitiveType_Transpose; - transpose_primitive->value.value = transpose_attr.release(); - auto transpose_cvalue = lite::PrimitiveC::Create(transpose_primitive.release()); - auto transpose_value_node = NewValueNode(std::shared_ptr<lite::PrimitiveC>(transpose_cvalue)); - std::vector<AnfNodePtr> transpose_new_node_inputs = {transpose_value_node, squeeze_new_node}; - auto transpose_new_node = func_graph->NewCNode(transpose_new_node_inputs); + auto transpose_prim = std::make_shared<ops::Transpose>(); + auto transpose_perm = BuildIntVecParameterNode(func_graph, {1, 0, 2}, "transpose_" + base_name + "_perm"); + auto transpose_new_node = func_graph->NewCNode(transpose_prim, {squeeze_new_node, transpose_perm}); transpose_new_node->set_fullname_with_scope("transpose_" + base_name); transpose_new_node->set_abstract(gru_output->abstract()->Clone()); @@ -612,7 +583,7 @@ const AnfNodePtr BiDirectionTfGruCellFusion::Process(const FuncGraphPtr &func_gr auto transpose_input = utils::cast<AnfNodePtr>((*equiv)[transpose_input_]); MS_ASSERT(transpose_input != nullptr); - if (!utils::isa<CNodePtr>(transpose_input) || GetCNodeType(transpose_input) != schema::PrimitiveType_Transpose) { + if (!utils::isa<CNodePtr>(transpose_input) || !CheckPrimitiveType(transpose_input, prim::kPrimTranspose)) { return nullptr; } diff --git a/mindspore/lite/tools/optimizer/fusion/bidirection_tf_gru_cell_fusion.h b/mindspore/lite/tools/optimizer/fusion/bidirection_tf_gru_cell_fusion.h index 53819466bd..a4222e47d7 100644 --- a/mindspore/lite/tools/optimizer/fusion/bidirection_tf_gru_cell_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/bidirection_tf_gru_cell_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc index e166696b92..45e11d06e4 100644 --- a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,19 +19,23 @@ #include <set> #include <vector> #include <algorithm> +#include "tools/converter/quant_param_holder.h" #include "tools/optimizer/common/gllo_utils.h" #include "tools/anf_exporter/anf_exporter.h" +#include "tools/common/node_util.h" +#include "src/common/common.h" +#include "src/ops/populate/populate_register.h" #include "src/kernel_registry.h" #include "src/inner_context.h" -#include "src/ops/primitive_c.h" #include "src/tensor.h" -#include "src/ops/populate/populate_register.h" +#include "src/ops/ops_utils.h" +#include "src/runtime/infer_manager.h" using mindspore::lite::KernelRegistry; -using mindspore::lite::PrimitiveC; using mindspore::lite::Tensor; namespace mindspore::opt { namespace { +constexpr size_t INITIAL_SIZE = 1024; std::vector<Tensor *> GetCNodeInputTensors(const CNodePtr &CNode) { MS_ASSERT(CNode != nullptr); auto tmp_meta_graph = std::make_unique<schema::MetaGraphT>(); @@ -111,17 +115,44 @@ ParameterPtr CreateNewParamter(const FuncGraphPtr &func_graph, Tensor *tensor) { parameter->set_default_param(param_value); return parameter; } -kernel::LiteKernel *GetLiteKernel(std::vector<Tensor *> inputs, const std::vector<Tensor *> &outputs, - OpParameter *parameter, lite::InnerContext *context, - mindspore::lite::PrimitiveC *primitive) { - MS_ASSERT(nullptr != lite_primitive); +kernel::LiteKernel *GetLiteKernel(std::vector<Tensor *> inputs, std::vector<Tensor *> *outputs, const CNodePtr &cnode, + lite::InnerContext *context) { + MS_ASSERT(cnode != nullptr && context != nullptr); + auto prim_t = lite::GetPrimitiveT(cnode->input(0)); + flatbuffers::FlatBufferBuilder fbb(INITIAL_SIZE); + auto prim = lite::ConvertToPrimitive(prim_t, &fbb); + if (prim == nullptr) { + fbb.Clear(); + MS_LOG(ERROR) << "get primitive failed."; + return nullptr; + } + auto parameter_gen = lite::PopulateRegistry::GetInstance()->GetParameterCreator(prim->value_type(), lite::SCHEMA_CUR); + if (parameter_gen == nullptr) { + fbb.Clear(); + MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " << schema::EnumNamePrimitiveType(prim->value_type()); + return nullptr; + } + auto parameter = parameter_gen(prim); + fbb.Clear(); + if (parameter == nullptr) { + MS_LOG(ERROR) << "parameter is nullptr."; + return nullptr; + } + parameter->infer_flag_ = true; + auto ret = KernelInferShape(inputs, outputs, parameter); + if (ret != lite::RET_OK) { + free(parameter); + MS_LOG(ERROR) << "infershape failed."; + return nullptr; + } auto data_type = inputs.front()->data_type(); - kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, (schema::PrimitiveType)primitive->Type()}; + kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, static_cast<schema::PrimitiveType>(parameter->type_)}; auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc); if (creator != nullptr) { - auto lite_kernel = creator(inputs, outputs, parameter, context, desc, primitive); + auto lite_kernel = creator(inputs, *outputs, parameter, context, desc); return lite_kernel; } + free(parameter); return nullptr; } @@ -142,7 +173,7 @@ lite::STATUS ReplaceCNode(const FuncGraphPtr &func_graph, const CNodePtr &any_no return lite::RET_ERROR; } auto tuple_node = used_node_list->at(0).first; - if (GetCNodeType(tuple_node) == schema::PrimitiveType_TupleGetItem) { + if (CheckPrimitiveType(tuple_node, prim::kPrimTupleGetItem)) { auto new_parameter = CreateNewParamter(func_graph, output_tensors.at(k)); if (new_parameter == nullptr) { MS_LOG(ERROR) << "CreateNewParamter failed, name: " << input_node->fullname_with_scope(); @@ -166,7 +197,45 @@ lite::STATUS ReplaceCNode(const FuncGraphPtr &func_graph, const CNodePtr &any_no } return lite::RET_OK; } -} // namespace + +lite::STATUS CopyQuantParams(const CNodePtr &cnode, const std::vector<Tensor *> &inputs, + const std::vector<Tensor *> &outputs) { + MS_ASSERT(cnode != nullptr); + auto prim = GetValueNode<PrimitivePtr>(cnode->input(0)); + auto quant_param_valueptr = prim->GetAttr("quant_params"); + if (quant_param_valueptr == nullptr) { + return lite::RET_OK; + } + auto quant_param_holder = quant_param_valueptr->cast<lite::QuantParamHolderPtr>(); + if (quant_param_holder == nullptr) { + MS_LOG(ERROR) << "quant param is invalid."; + return lite::RET_ERROR; + } + auto input_quant_params = quant_param_holder->input_quant_params(); + for (size_t m = 0; m < input_quant_params.size(); m++) { + for (auto inputQuantParam : input_quant_params[m]) { + lite::QuantArg quant_arg{}; + quant_arg.scale = inputQuantParam.scale; + quant_arg.zeroPoint = inputQuantParam.zeroPoint; + quant_arg.roundType = inputQuantParam.roundType; + quant_arg.multiplier = inputQuantParam.multiplier; + inputs[m]->AddQuantParam(quant_arg); + } + } + auto output_quant_params = quant_param_holder->output_quant_params(); + for (size_t m = 0; m < output_quant_params.size(); m++) { + for (auto outputQuantParam : output_quant_params[m]) { + lite::QuantArg quant_arg{}; + quant_arg.scale = outputQuantParam.scale; + quant_arg.zeroPoint = outputQuantParam.zeroPoint; + quant_arg.roundType = outputQuantParam.roundType; + quant_arg.multiplier = outputQuantParam.multiplier; + outputs[m]->AddQuantParam(quant_arg); + } + } + return lite::RET_OK; +} + void FreeTensors(std::vector<Tensor *> *input_tensor, std::vector<Tensor *> *output_tensor) { if (input_tensor != nullptr) { for (auto &i : *input_tensor) { @@ -181,6 +250,7 @@ void FreeTensors(std::vector<Tensor *> *input_tensor, std::vector<Tensor *> *out } } } +} // namespace const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const AnfNodePtr &node, const EquivPtr &) const { @@ -210,68 +280,30 @@ const AnfNodePtr ConstFoldPass::Process(const FuncGraphPtr &func_graph, const An for (size_t j = 0; j < output_nums; j++) { output_tensors.push_back(new (std::nothrow) Tensor()); } - auto lite_primitive = GetValueNode<std::shared_ptr<PrimitiveC>>(input_cnode->input(0)); - if (lite_primitive == nullptr) { - MS_LOG(ERROR) << "lite_primitive is nullptr"; + if (CopyQuantParams(input_cnode, input_tensors, output_tensors) != lite::RET_OK) { + MS_LOG(ERROR) << "copy quant params failed."; FreeTensors(&input_tensors, &output_tensors); return nullptr; } - auto inputQuantParams = lite_primitive->input_quant_params(); - for (size_t m = 0; m < inputQuantParams.size(); m++) { - for (auto inputQuantParam : inputQuantParams[m]) { - lite::QuantArg quant_arg{}; - quant_arg.scale = inputQuantParam.scale; - quant_arg.zeroPoint = inputQuantParam.zeroPoint; - input_tensors[m]->AddQuantParam(quant_arg); - } - } - auto outputQuantParams = lite_primitive->output_quant_params(); - for (size_t m = 0; m < outputQuantParams.size(); m++) { - for (auto outputQuantParam : outputQuantParams[m]) { - lite::QuantArg quant_arg{}; - quant_arg.scale = outputQuantParam.scale; - quant_arg.zeroPoint = outputQuantParam.zeroPoint; - output_tensors[m]->AddQuantParam(quant_arg); - } - } - lite_primitive->InferShape(input_tensors, output_tensors); - auto primitive = lite_primitive.get(); - if (primitive->Type() == schema::PrimitiveType_RandomStandardNormal) { - return nullptr; - } - MS_ASSERT(primitive != nullptr); - MS_ASSERT(primitive->Type() != nullptr); - auto func_pointer = - lite::PopulateRegistry::GetInstance()->GetParameterCreator(schema::PrimitiveType(primitive->Type())); - if (func_pointer == nullptr) { - MS_LOG(ERROR) << "ParameterCreator function pointer is nullptr, type: " - << schema::EnumNamePrimitiveType((schema::PrimitiveType)primitive->Type()); - return nullptr; - } - auto parameter = func_pointer(primitive); - - if (parameter == nullptr) { - MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " - << schema::EnumNamePrimitiveType((schema::PrimitiveType)(lite_primitive->Type())); - return nullptr; - } - auto lite_kernel = GetLiteKernel(input_tensors, output_tensors, parameter, context.get(), lite_primitive.get()); + auto lite_kernel = GetLiteKernel(input_tensors, &output_tensors, input_cnode, context.get()); if (lite_kernel == nullptr) { - MS_LOG(ERROR) << "constant_folding schedule node lite kernel nullptr"; FreeTensors(&input_tensors, &output_tensors); + MS_LOG(ERROR) << "constant_folding schedule node lite kernel nullptr"; return nullptr; } for (auto output_tensor : output_tensors) { - auto ret = output_tensor->MallocData(); - if (RET_OK != ret) { + auto status = output_tensor->MallocData(); + if (status != lite::RET_OK) { MS_LOG(ERROR) << "MallocData failed"; FreeTensors(&input_tensors, &output_tensors); + delete (lite_kernel); return nullptr; } } - auto ret = lite_kernel->Run(); - if (0 != ret) { + auto status = lite_kernel->Run(); + if (status != lite::RET_OK) { FreeTensors(&input_tensors, &output_tensors); + delete (lite_kernel); MS_LOG(ERROR) << "run kernel failed, name: " << lite_kernel->name(); return nullptr; } diff --git a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.h b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.h index 9de1eb2d03..7429100f3f 100644 --- a/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/constant_folding_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.cc index 41d5b41e56..fda4f685b5 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,12 +16,8 @@ #include "tools/optimizer/fusion/conv_activation_fusion.h" #include <memory> -#include "src/ops/primitive_c.h" -#include "src/ops/conv2d.h" -#include "src/ops/deconv2d.h" -#include "src/ops/depthwise_conv2d.h" -#include "src/ops/activation.h" -#include "schema/inner/model_generated.h" +#include "ops/fusion/activation.h" +#include "ops/op_utils.h" #include "tools/optimizer/common/gllo_utils.h" namespace mindspore::opt { @@ -47,14 +43,16 @@ const AnfNodePtr ConvActivationFusion::Process(const FuncGraphPtr &func_graph, c CheckInputSize(act_node, kActivationInputsLength) != lite::RET_OK) { return nullptr; } - auto primitivec = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(act_node->input(0)); - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::Activation>>(primitivec)); - auto act_primitivec = utils::cast<std::shared_ptr<mindspore::lite::Activation>>(primitivec); - MS_ASSERT(act_primitivec != nullptr); - if (act_primitivec->GetType() != schema::ActivationType_RELU && - act_primitivec->GetType() != schema::ActivationType_RELU6) { + if (!CheckPrimitiveType(act_node, prim::kPrimActivation)) { return nullptr; } + auto act_prim = GetValueNode<std::shared_ptr<mindspore::ops::Activation>>(act_node->input(0)); + if (act_prim == nullptr || + (act_prim->GetAttr(ops::kActivationType) != nullptr && act_prim->get_activation_type() != mindspore::RELU && + act_prim->get_activation_type() != mindspore::RELU6)) { + return nullptr; + } + AnfNodePtr pre_node = act_node->input(1); if (CheckIfAnfNodeIsNull(pre_node) != lite::RET_OK) { return nullptr; @@ -64,31 +62,19 @@ const AnfNodePtr ConvActivationFusion::Process(const FuncGraphPtr &func_graph, c return nullptr; } auto conv_node = pre_node->cast<CNodePtr>(); - auto node_type = GetCNodeType(conv_node); - auto primitive_c = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(conv_node->input(0)); MS_ASSERT(primitive_c); - if (node_type == schema::PrimitiveType_Conv2D) { - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::Conv2D>>(primitive_c)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::Conv2D>>(primitive_c); - MS_ASSERT(primc != nullptr); - if (primc->GetActivationType() == schema::ActivationType_NO_ACTIVATION) { - primc->SetActivationType(act_primitivec->GetType()); - return pre_node; - } - } else if (node_type == schema::PrimitiveType_DepthwiseConv2D) { - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::DepthwiseConv2D>>(primitive_c)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::DepthwiseConv2D>>(primitive_c); - MS_ASSERT(primc != nullptr); - if (primc->GetActivationType() == schema::ActivationType_NO_ACTIVATION) { - primc->SetActivationType(act_primitivec->GetType()); - return pre_node; - } - } else if (node_type == schema::PrimitiveType_DeConv2D) { - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::DeConv2D>>(primitive_c)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::DeConv2D>>(primitive_c); - MS_ASSERT(primc != nullptr); - if (primc->GetActivationType() == schema::ActivationType_NO_ACTIVATION) { - primc->SetActivationType(act_primitivec->GetType()); + if (CheckPrimitiveType(conv_node, prim::kPrimConv2DFusion) || + CheckPrimitiveType(conv_node, prim::kPrimConv2dTransposeFusion)) { + auto prim = GetValueNode<PrimitivePtr>(conv_node->input(0)); + MS_ASSERT(prim != nullptr); + if (prim->GetAttr(ops::kActivationType) == nullptr || + static_cast<mindspore::ActivationType>(GetValue<int64_t>(prim->GetAttr(ops::kActivationType))) == + mindspore::NO_ACTIVATION) { + if (act_prim->get_activation_type() == mindspore::RELU) { + prim->AddAttr(ops::kActivationType, MakeValue<int64_t>(mindspore::RELU)); + } else { + prim->AddAttr(ops::kActivationType, MakeValue<int64_t>(mindspore::RELU6)); + } return pre_node; } } else { diff --git a/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.h b/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.h index 39077fe9a9..c017a9ef54 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/conv_activation_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,7 +19,6 @@ #include <string> #include "backend/optimizer/common/optimizer.h" -#include "schema/inner/model_generated.h" namespace mindspore { namespace opt { diff --git a/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc index 89d0e2b536..7baaaaf26a 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,16 +15,13 @@ */ #include "tools/optimizer/fusion/conv_biasadd_fusion.h" #include <memory> -#include "src/ops/conv2d.h" -#include "src/ops/depthwise_conv2d.h" -#include "src/ops/deconv2d.h" -#include "src/ops/primitive_c.h" +#include "ops/fusion/add_fusion.h" +#include "ops/fusion/conv2d_fusion.h" +#include "ops/fusion/conv2d_transpose_fusion.h" #include "src/param_value_lite.h" -#include "schema/inner/model_generated.h" #include "utils/utils.h" #include "tools/optimizer/common/gllo_utils.h" #include "securec/include/securec.h" -#include "src/ops/add.h" namespace mindspore::opt { namespace { @@ -35,17 +32,17 @@ constexpr size_t kConvBiasIndex = 3; constexpr size_t kConvNoBiasLen = 3; constexpr size_t kConvWithBiasLen = 4; bool IsConvExtendNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_Conv2D || type == schema::PrimitiveType_DepthwiseConv2D || - type == schema::PrimitiveType_DeConv2D; + if (utils::isa<AnfNodePtr>(n)) { + auto anf_node = utils::cast<AnfNodePtr>(n); + return CheckPrimitiveType(anf_node, prim::kPrimConv2DFusion) || + CheckPrimitiveType(anf_node, prim::kPrimConv2dTransposeFusion); } return false; } bool IsAddNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_Add || type == schema::PrimitiveType_BiasAdd; + if (utils::isa<AnfNodePtr>(n)) { + auto anf_node = utils::cast<AnfNodePtr>(n); + return CheckPrimitiveType(anf_node, prim::kPrimAddFusion) || CheckPrimitiveType(anf_node, prim::kPrimBiasAdd); } return false; } @@ -59,24 +56,18 @@ int Get_Kenrnel_nums(const CNodePtr &conv_node) { MS_ASSERT(value != nullptr); auto primitive = value->cast<PrimitiveCPtr>(); MS_ASSERT(primitive != nullptr); - auto type = (schema::PrimitiveType)primitive->Type(); - if (type == schema::PrimitiveType_Conv2D) { - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::Conv2D>>(primitive)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::Conv2D>>(primitive); + if (primitive->isa<mindspore::ops::Conv2DFusion>()) { + MS_ASSERT(utils::isa<std::shared_ptr<mindspore::ops::Conv2DFusion>>(primitive)); + auto primc = utils::cast<std::shared_ptr<mindspore::ops::Conv2DFusion>>(primitive); MS_ASSERT(primc != nullptr); - return primc->GetChannelOut(); - } else if (type == schema::PrimitiveType_DepthwiseConv2D) { - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::DepthwiseConv2D>>(primitive)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::DepthwiseConv2D>>(primitive); + return primc->get_out_channel(); + } else if (primitive->isa<mindspore::ops::Conv2dTransposeFusion>()) { + MS_ASSERT(utils::isa<std::shared_ptr<mindspore::ops::Conv2dTransposeFusion>>(primitive)); + auto primc = utils::cast<std::shared_ptr<mindspore::ops::Conv2dTransposeFusion>>(primitive); MS_ASSERT(primc != nullptr); - return primc->GetChannelMultiplier() * primc->GetChannelIn(); - } else if (type == schema::PrimitiveType_DeConv2D) { - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::DeConv2D>>(primitive)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::DeConv2D>>(primitive); - MS_ASSERT(primc != nullptr); - return primc->GetChannelOut(); + return primc->get_out_channel(); } else { - MS_LOG(ERROR) << "Unsupported opType, " << type; + MS_LOG(ERROR) << "Unsupported opType, " << primitive->name(); return 0; } } @@ -171,12 +162,12 @@ const AnfNodePtr ConvBiasaddFusion::Process(const FuncGraphPtr &func_graph, cons if (CheckIfCNodeIsNull(add_node) != lite::RET_OK || CheckInputSize(add_node, kAddInputsLength) != lite::RET_OK) { return nullptr; } - if (GetCNodeType(add_node) == schema::PrimitiveType_Add) { - auto primitive_c = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(add_node->input(0)); - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::Add>>(primitive_c)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::Add>>(primitive_c); + if (CheckPrimitiveType(add_node, prim::kPrimAddFusion)) { + auto primitive_c = GetValueNode<PrimitiveCPtr>(add_node->input(0)); + MS_ASSERT(utils::isa<std::shared_ptr<mindspore::ops::AddFusion>>(primitive_c)); + auto primc = utils::cast<std::shared_ptr<mindspore::ops::AddFusion>>(primitive_c); MS_ASSERT(primc != nullptr); - if (primc->GetActivationType() != schema::ActivationType_NO_ACTIVATION) { + if (primc->GetAttr(ops::kActivationType) != nullptr && primc->get_activation_type() != mindspore::NO_ACTIVATION) { return add_node; } } diff --git a/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.h b/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.h index f3f1b01a21..3e38cf3ead 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/conv_biasadd_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.cc index a44352ba42..42ed168e0f 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,14 +16,12 @@ #include "tools/optimizer/fusion/conv_bn_fusion.h" #include <memory> -#include "src/ops/primitive_c.h" +#include "ops/batch_norm.h" +#include "ops/fused_batch_norm.h" #include "src/param_value_lite.h" -#include "schema/inner/model_generated.h" #include "utils/utils.h" #include "tools/optimizer/common/gllo_utils.h" #include "securec/include/securec.h" -#include "src/ops/batch_norm.h" -#include "src/ops/fused_batchnorm.h" namespace mindspore::opt { namespace { @@ -36,10 +34,12 @@ constexpr size_t kTFBNMeanIndex = 4; constexpr size_t kTFBNVarIndex = 5; constexpr const float EPS = 1e-8; constexpr const float POW_NUM = 0.5; +constexpr const float DEFAULT_EPS = 1e-5; bool IsBatchNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_BatchNorm || type == schema::PrimitiveType_FusedBatchNorm; + if (utils::isa<AnfNodePtr>(n)) { + auto anf_node = utils::cast<AnfNodePtr>(n); + return CheckPrimitiveType(anf_node, prim::kPrimBatchNorm) || + CheckPrimitiveType(anf_node, prim::kPrimFusedBatchNorm); } return false; } @@ -153,8 +153,8 @@ void ConvBatchNormFusion::InitTransParam(const CNodePtr &bn_node, int kernel_num AnfNodePtr bn_scale_node = nullptr; AnfNodePtr bn_bias_node = nullptr; float eps = 0; - auto primitive_c = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(bn_node->input(0)); - if (GetCNodeType(bn_node) == schema::PrimitiveType_BatchNorm) { + auto primitive_c = GetValueNode<PrimitiveCPtr>(bn_node->input(0)); + if (CheckPrimitiveType(bn_node, prim::kPrimBatchNorm)) { bn_mean_node = bn_node->input(kCaffeBNMeanIndex); bn_variance_node = bn_node->input(kCaffeBNVarIndex); AnfNodePtr bn_scale_factor_node = bn_node->input(kCaffeBNScaleFactorIndex); @@ -162,21 +162,29 @@ void ConvBatchNormFusion::InitTransParam(const CNodePtr &bn_node, int kernel_num CheckIfNodeIsParam(bn_scale_factor_node) != lite::RET_OK) { return; } - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::BatchNorm>>(primitive_c)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::BatchNorm>>(primitive_c); + MS_ASSERT(utils::isa<std::shared_ptr<mindspore::ops::BatchNorm>>(primitive_c)); + auto primc = utils::cast<std::shared_ptr<mindspore::ops::BatchNorm>>(primitive_c); MS_ASSERT(primc != nullptr); - eps = primc->GetEpsilon(); + if (primc->GetAttr("epsilon") != nullptr) { + eps = primc->get_epsilon(); + } else { + eps = DEFAULT_EPS; + } CalEstimatedData(bn_mean_node, bn_scale_factor_node); CalEstimatedData(bn_variance_node, bn_scale_factor_node); - } else if (GetCNodeType(bn_node) == schema::PrimitiveType_FusedBatchNorm) { + } else if (CheckPrimitiveType(bn_node, prim::kPrimFusedBatchNorm)) { bn_scale_node = bn_node->input(kTFBNScaleIndex); bn_bias_node = bn_node->input(kTFBNBiasIndex); bn_mean_node = bn_node->input(kTFBNMeanIndex); bn_variance_node = bn_node->input(kTFBNVarIndex); - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::FusedBatchNorm>>(primitive_c)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::FusedBatchNorm>>(primitive_c); + MS_ASSERT(utils::isa<std::shared_ptr<mindspore::ops::FusedBatchNorm>>(primitive_c)); + auto primc = utils::cast<std::shared_ptr<mindspore::ops::FusedBatchNorm>>(primitive_c); MS_ASSERT(primc != nullptr); - eps = primc->GetEpsilon(); + if (primc->GetAttr("epsilon") != nullptr) { + eps = primc->get_epsilon(); + } else { + eps = DEFAULT_EPS; + } } else { MS_LOG(ERROR) << "not caffe or tf batchnorm op."; lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_INVALID_OP_ATTR); diff --git a/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.h b/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.h index 3646927c2d..5d8f96aa2c 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/conv_bn_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/optimizer/fusion/conv_conv_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_conv_fusion.cc index a312d8cd9d..169ff0bf35 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_conv_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_conv_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,9 +17,8 @@ #include "tools/optimizer/fusion/conv_conv_fusion.h" #include <functional> #include <memory> -#include "schema/inner/model_generated.h" -#include "src/ops/conv2d.h" -#include "src/ops/primitive_c.h" +#include <vector> +#include "ops/fusion/conv2d_fusion.h" #include "tools/optimizer/common/gllo_utils.h" namespace mindspore::opt { @@ -35,9 +34,22 @@ constexpr size_t kNHWC_WDim = 2; constexpr size_t kNHWC_CDim = 3; bool IsCommonConvNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_Conv2D; + if (utils::isa<AnfNodePtr>(n)) { + auto anf_node = utils::cast<AnfNodePtr>(n); + if (!CheckPrimitiveType(anf_node, prim::kPrimConv2DFusion)) { + return false; + } + std::shared_ptr<ops::Conv2DFusion> conv = nullptr; + if (utils::isa<CNodePtr>(anf_node)) { + auto c_node = anf_node->cast<CNodePtr>(); + conv = GetValueNode<std::shared_ptr<ops::Conv2DFusion>>(c_node->input(0)); + } else if (utils::isa<ValueNodePtr>(anf_node)) { + conv = GetValueNode<std::shared_ptr<ops::Conv2DFusion>>(anf_node); + } + if (conv == nullptr) { + return false; + } + return conv->GetAttr(ops::kIsDepthWise) == nullptr || !GetValue<bool>(conv->GetAttr(ops::kIsDepthWise)); } return false; } @@ -147,14 +159,6 @@ STATUS GenNewConvWeight(const ParameterPtr &down_weight_node, const ParameterPtr new_weight_node->set_abstract(down_weight_node->abstract()); return RET_OK; } -} // namespace -const BaseRef ConvConvFusion::DefinePattern() const { - auto up_conv_var = std::make_shared<CondVar>(IsCommonConvNode); - auto down_conv_var = std::make_shared<CondVar>(IsCommonConvNode); - auto down_weight_var = std::make_shared<CondVar>(IsParamNode); - auto down_bias_var = std::make_shared<SeqVar>(); - return VectorRef({down_conv_var, up_conv_var, down_weight_var, down_bias_var}); -} void ReplaceParametersAndNodes(const FuncGraphPtr &func_graph, const CNodePtr &up_conv_cnode, const CNodePtr &down_conv_cnode) { @@ -185,25 +189,33 @@ void ReplaceParametersAndNodes(const FuncGraphPtr &func_graph, const CNodePtr &u down_conv_cnode->add_input(new_bias_parameter); } } else { - MS_LOG(INFO) << "up conv node has no bias,no need replace bias."; + MS_LOG(INFO) << "up conv node has no bias,no need to replace bias."; } MS_LOG(INFO) << "fusion node success:" << down_conv_cnode->fullname_with_scope(); // delete up conv node manager->Replace(up_conv_cnode, up_conv_cnode->input(1)); - return; } bool IsPrimitiveProper(const CNodePtr &up_conv_cnode, const CNodePtr &down_conv_cnode) { - auto down_primitive = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(down_conv_cnode->input(0)); - auto down_conv_primitive = utils::cast<std::shared_ptr<lite::Conv2D>>(down_primitive); - auto up_primitive = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(up_conv_cnode->input(0)); - auto up_conv_primitive = utils::cast<std::shared_ptr<lite::Conv2D>>(up_primitive); - return up_conv_primitive != nullptr && - up_conv_primitive->GetActivationType() == schema::ActivationType_NO_ACTIVATION && - up_conv_primitive->GetGroup() == 1 && down_conv_primitive->GetGroup() == 1 && - up_conv_primitive->GetKernelW() == down_conv_primitive->GetKernelW() && - up_conv_primitive->GetKernelH() == down_conv_primitive->GetKernelH() && - up_conv_primitive->GetPadMode() == down_conv_primitive->GetPadMode(); + auto down_conv_primitive = GetValueNode<std::shared_ptr<ops::Conv2DFusion>>(down_conv_cnode->input(0)); + MS_ASSERT(down_conv_primitive != nullptr); + auto up_conv_primitive = GetValueNode<std::shared_ptr<ops::Conv2DFusion>>(up_conv_cnode->input(0)); + MS_ASSERT(up_conv_primitive != nullptr); + int64_t up_pad_mode = up_conv_primitive->GetAttr(ops::kPadMode) == nullptr ? 0 : up_conv_primitive->get_pad_mode(); + int64_t down_pad_mode = + down_conv_primitive->GetAttr(ops::kPadMode) == nullptr ? 0 : down_conv_primitive->get_pad_mode(); + return (up_conv_primitive->GetAttr(ops::kActivationType) == nullptr || + up_conv_primitive->get_activation_type() == mindspore::NO_ACTIVATION) && + up_conv_primitive->get_group() == 1 && down_conv_primitive->get_group() == 1 && up_pad_mode == down_pad_mode; +} +} // namespace + +const BaseRef ConvConvFusion::DefinePattern() const { + auto up_conv_var = std::make_shared<CondVar>(IsCommonConvNode); + auto down_conv_var = std::make_shared<CondVar>(IsCommonConvNode); + auto down_weight_var = std::make_shared<CondVar>(IsParamNode); + auto down_bias_var = std::make_shared<SeqVar>(); + return VectorRef({down_conv_var, up_conv_var, down_weight_var, down_bias_var}); } // conv->conv1x1 fusion conv (w1x+b)w2+c = (w1*w2)*x+(w2*b+c) diff --git a/mindspore/lite/tools/optimizer/fusion/conv_conv_fusion.h b/mindspore/lite/tools/optimizer/fusion/conv_conv_fusion.h index efd375d656..e75353f09f 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_conv_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/conv_conv_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,7 +19,6 @@ #include <string> #include "backend/optimizer/common/optimizer.h" -#include "schema/inner/model_generated.h" namespace mindspore { namespace opt { diff --git a/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.cc index af52cb2818..ac29c9f736 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,7 @@ #include "tools/optimizer/fusion/conv_scale_fusion.h" #include <memory> -#include "src/ops/primitive_c.h" #include "src/param_value_lite.h" -#include "schema/inner/model_generated.h" #include "tools/optimizer/common/gllo_utils.h" #include "securec/include/securec.h" @@ -29,9 +27,9 @@ constexpr size_t kScaleBiasIndex = 3; constexpr size_t kScaleNoBiasLen = 3; constexpr size_t kScaleWithBiasLen = 4; bool IsScaleNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_Scale; + if (utils::isa<AnfNodePtr>(n)) { + auto anf_node = utils::cast<AnfNodePtr>(n); + return CheckPrimitiveType(anf_node, prim::kPrimScaleFusion); } return false; } @@ -62,12 +60,12 @@ void ConvScaleFusion::InitTransParam(const CNodePtr &scale_node, int kernel_num, return; } if (!scale_weight_node->isa<Parameter>()) { - MS_LOG(ERROR) << "scale weight node not paramter node"; + MS_LOG(ERROR) << "scale weight node not parameter node"; lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_INVALID_OP_ATTR); return; } if (scale_bias_node != nullptr && !scale_bias_node->isa<Parameter>()) { - MS_LOG(ERROR) << "scale bias node not paramter node"; + MS_LOG(ERROR) << "scale bias node not parameter node"; lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_INVALID_OP_ATTR); return; } diff --git a/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.h b/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.h index ac58a6db0f..ff31a3a892 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/conv_scale_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc index 16465a2bbe..872052d0d9 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.cc @@ -16,12 +16,9 @@ #include "tools/optimizer/fusion/conv_transform_fusion.h" #include <memory> -#include "src/ops/primitive_c.h" -#include "src/ops/conv2d.h" -#include "src/ops/deconv2d.h" -#include "src/ops/depthwise_conv2d.h" +#include "ops/fusion/conv2d_fusion.h" +#include "ops/fusion/conv2d_transpose_fusion.h" #include "src/param_value_lite.h" -#include "schema/inner/model_generated.h" #include "tools/optimizer/common/gllo_utils.h" #include "securec/include/securec.h" @@ -33,33 +30,75 @@ constexpr size_t kConvNoBiasLen = 3; constexpr size_t kConvWithBiasLen = 4; int GetOutChannels(const CNodePtr &conv_node) { MS_ASSERT(conv_node != nullptr); - auto value_primitive = conv_node->input(0); - auto value_node = value_primitive->cast<ValueNodePtr>(); + auto value_node = conv_node->input(0); MS_ASSERT(value_node != nullptr); - auto value = value_node->value(); - MS_ASSERT(value != nullptr); - auto primitive = value->cast<PrimitiveCPtr>(); - MS_ASSERT(primitive != nullptr); - auto type = (schema::PrimitiveType)primitive->Type(); + if (CheckPrimitiveType(conv_node, prim::kPrimConv2DFusion)) { + auto conv_prim = GetValueNode<std::shared_ptr<ops::Conv2DFusion>>(value_node); + MS_ASSERT(conv_prim != nullptr); + if (conv_prim->GetAttr(ops::kOutChannel) == nullptr) { + return 0; + } + return conv_prim->get_out_channel(); + } else if (CheckPrimitiveType(conv_node, prim::kPrimConv2dTransposeFusion)) { + auto conv_prim = GetValueNode<std::shared_ptr<ops::Conv2dTransposeFusion>>(value_node); + MS_ASSERT(conv_prim != nullptr); + if (conv_prim->GetAttr(ops::kOutChannel) == nullptr) { + return 0; + } + return conv_prim->get_out_channel(); + } + return 0; +} - if (type == schema::PrimitiveType_Conv2D) { - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::Conv2D>>(primitive)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::Conv2D>>(primitive); - MS_ASSERT(primc != nullptr); - return primc->GetChannelOut(); - } else if (type == schema::PrimitiveType_DeConv2D) { - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::DeConv2D>>(primitive)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::DeConv2D>>(primitive); - MS_ASSERT(primc != nullptr); - return primc->GetChannelOut(); - } else if (type == schema::PrimitiveType_DepthwiseConv2D) { - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::DepthwiseConv2D>>(primitive)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::DepthwiseConv2D>>(primitive); - MS_ASSERT(primc != nullptr); - return primc->GetChannelMultiplier() * primc->GetChannelIn(); +void GenerateNewWeightConv2D(float *dst_weight, const float *conv_weight, const float *scale_weight, FmkType fmk, + int weight_shape_size, int kernel_num) { + if (dst_weight == nullptr || conv_weight == nullptr || scale_weight == nullptr) { + return; + } + if (fmk == lite::converter::FmkType_TF) { + for (int i = 0; i < weight_shape_size; i++) { + dst_weight[i] = conv_weight[i] * scale_weight[i % kernel_num]; + } } else { - MS_LOG(ERROR) << "Unsupported opType, " << type; - return 0; + auto kernel_size = weight_shape_size / kernel_num; + for (int i = 0; i < kernel_num; i++) { + for (int j = 0; j < kernel_size; j++) { + dst_weight[i * kernel_size + j] = conv_weight[i * kernel_size + j] * scale_weight[i]; + } + } + } +} + +void GenerateNewWeightConv2DTranspose(float *dst_weight, const float *scale_weight, + const ParamValueLitePtr &weight_tensor, FmkType fmk, int group, int kernel_num) { + if (dst_weight == nullptr || scale_weight == nullptr || weight_tensor == nullptr) { + return; + } + auto weight_data = reinterpret_cast<float *>(weight_tensor->tensor_addr()); + if (fmk == lite::converter::FmkType_TF) { + auto cin_group = weight_tensor->tensor_shape()[3] / group; + int area_size = weight_tensor->tensor_shape()[0] * weight_tensor->tensor_shape()[1]; + for (int j = 0; j < area_size; j++) { + for (int i = 0; i < kernel_num; ++i) { + for (int k = 0; k < cin_group; ++k) { + dst_weight[k + i * cin_group + j * kernel_num * cin_group] = + weight_data[k + i * cin_group + j * kernel_num * cin_group] * scale_weight[i]; + } + } + } + } else { + auto cin_group = weight_tensor->tensor_shape()[0] / group; + int area_size = weight_tensor->tensor_shape()[2] * weight_tensor->tensor_shape()[3]; + int cout_size = kernel_num * area_size; + for (int k = 0; k < cin_group; ++k) { + for (int i = 0; i < kernel_num; ++i) { + auto row_addr = weight_data + k * cout_size + i * area_size; + auto new_row_addr = dst_weight + k * cout_size + i * area_size; + for (int j = 0; j < area_size; j++) { + new_row_addr[j] = row_addr[j] * scale_weight[i]; + } + } + } } } } // namespace @@ -222,10 +261,15 @@ void ConvTransformFusion::GenNewConvTensor(const FuncGraphPtr &func_graph, const new_weight_paramter->set_name(conv_node->fullname_with_scope() + conv_weight_node->fullname_with_scope()); conv_node->set_input(kConvWeightIndex, new_weight_paramter); } + void ConvTransformFusion::CalNewWeightTensor(const CNodePtr &conv_node, const ParamValueLitePtr &weight_tensor, int kernel_num, const float *trans_scale) const { MS_ASSERT(weight_data != nullptr); MS_ASSERT(trans_scale != nullptr); + if (weight_tensor->tensor_shape().size() != 4) { + MS_LOG(ERROR) << "weight tensor shape error"; + return; + } auto weight_shape_size = weight_tensor->tensor_shape_size(); auto tmp_weight_data = new (std::nothrow) float[weight_shape_size]; if (tmp_weight_data == nullptr) { @@ -241,63 +285,18 @@ void ConvTransformFusion::CalNewWeightTensor(const CNodePtr &conv_node, const Pa return; } auto weight_data = reinterpret_cast<float *>(weight_tensor->tensor_addr()); - auto conv_type = GetCNodeType(conv_node); - if (conv_type == schema::PrimitiveType_DeConv2D) { - auto value_node = conv_node->input(0)->cast<ValueNodePtr>(); - MS_ASSERT(value_node != nullptr); - auto value = value_node->value(); - MS_ASSERT(value != nullptr); - auto primitive = value->cast<PrimitivePtr>(); - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::DeConv2D>>(primitive)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::DeConv2D>>(primitive); - MS_ASSERT(primc != nullptr); - if (weight_tensor->tensor_shape().size() != 4) { - MS_LOG(ERROR) << "deconv2d weight tensor shape error"; - delete[] tmp_weight_data; - return; - } - if (this->fmk_type_ == lite::converter::FmkType_TF) { - auto group = primc->GetGroup(); - auto cin_group = weight_tensor->tensor_shape()[3] / group; - int area_size = weight_tensor->tensor_shape()[0] * weight_tensor->tensor_shape()[1]; - for (int j = 0; j < area_size; j++) { - for (int i = 0; i < kernel_num; ++i) { - for (int k = 0; k < cin_group; ++k) { - tmp_weight_data[k + i * cin_group + j * kernel_num * cin_group] = - weight_data[k + i * cin_group + j * kernel_num * cin_group] * trans_scale[i]; - } - } - } - } else { - auto group = primc->GetGroup(); - auto cin_group = weight_tensor->tensor_shape()[0] / group; - int area_size = weight_tensor->tensor_shape()[2] * weight_tensor->tensor_shape()[3]; - int cout_size = kernel_num * area_size; - for (int k = 0; k < cin_group; ++k) { - for (int i = 0; i < kernel_num; ++i) { - auto row_addr = weight_data + k * cout_size + i * area_size; - auto new_row_addr = tmp_weight_data + k * cout_size + i * area_size; - for (int j = 0; j < area_size; j++) { - new_row_addr[j] = row_addr[j] * trans_scale[i]; - } - } - } - } - } else { - if (this->fmk_type_ == lite::converter::FmkType_TF) { - for (int i = 0; i < weight_shape_size; i++) { - tmp_weight_data[i] = weight_data[i] * trans_scale[i % kernel_num]; - } - } else { - auto kernel_size = weight_shape_size / kernel_num; - for (int i = 0; i < kernel_num; i++) { - for (int j = 0; j < kernel_size; j++) { - tmp_weight_data[i * kernel_size + j] = weight_data[i * kernel_size + j] * trans_scale[i]; - } - } - } + auto conv_prim = GetValueNode<PrimitivePtr>(conv_node->input(0)); + MS_ASSERT(conv_prim != nullptr); + bool is_depth_wise = + conv_prim->GetAttr(ops::kIsDepthWise) != nullptr && GetValue<bool>(conv_prim->GetAttr(ops::kIsDepthWise)); + if (CheckPrimitiveType(conv_node, prim::kPrimConv2DFusion)) { + GenerateNewWeightConv2D(tmp_weight_data, weight_data, trans_scale, fmk_type_, weight_shape_size, kernel_num); + } else if (CheckPrimitiveType(conv_node, prim::kPrimConv2dTransposeFusion) && !is_depth_wise) { + auto conv_primc = conv_prim->cast<std::shared_ptr<ops::Conv2dTransposeFusion>>(); + MS_ASSERT(conv_primc != nullptr); + auto group = conv_primc->GetAttr(ops::kGroup) == nullptr ? 1 : conv_primc->get_group(); + GenerateNewWeightConv2DTranspose(tmp_weight_data, trans_scale, weight_tensor, fmk_type_, group, kernel_num); } - auto ret = memcpy_s(weight_data, data_size, tmp_weight_data, data_size); if (ret != EOK) { MS_LOG(ERROR) << "memcpy error: " << ret; diff --git a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.h b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.h index c518f30d5b..4c8dc9707d 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/conv_transform_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc index 796a928f72..3cd97e9ec2 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,20 +16,17 @@ #include "tools/optimizer/fusion/conv_tuple_activation_fusion.h" #include <memory> -#include "src/ops/primitive_c.h" -#include "src/ops/conv2d.h" -#include "src/ops/depthwise_conv2d.h" -#include "src/ops/activation.h" -#include "schema/inner/model_generated.h" +#include "ops/fusion/activation.h" +#include "ops/fusion/conv2d_fusion.h" #include "tools/optimizer/common/gllo_utils.h" namespace mindspore::opt { namespace { constexpr size_t kActivationInputsLength = 2; bool IsTupleGetItemNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_TupleGetItem; + if (utils::isa<AnfNodePtr>(n)) { + auto anf_node = utils::cast<AnfNodePtr>(n); + return CheckPrimitiveType(anf_node, prim::kPrimTupleGetItem); } return false; } @@ -56,12 +53,13 @@ const AnfNodePtr ConvTupleActivationFusion::Process(const FuncGraphPtr &func_gra return nullptr; } - auto primitivec = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(act_node->input(0)); - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::Activation>>(primitivec)); - auto act_primitivec = utils::cast<std::shared_ptr<mindspore::lite::Activation>>(primitivec); - MS_ASSERT(act_primitivec != nullptr); - if (act_primitivec->GetType() != schema::ActivationType_RELU && - act_primitivec->GetType() != schema::ActivationType_RELU6) { + if (!CheckPrimitiveType(act_node, prim::kPrimActivation)) { + return nullptr; + } + auto act_prim = GetValueNode<std::shared_ptr<mindspore::ops::Activation>>(act_node->input(0)); + MS_ASSERT(act_prim != nullptr); + if (act_prim->GetAttr(ops::kActivationType) == nullptr || + (act_prim->get_activation_type() != mindspore::RELU && act_prim->get_activation_type() != mindspore::RELU6)) { return nullptr; } AnfNodePtr tuple_node = act_node->input(1); @@ -76,24 +74,11 @@ const AnfNodePtr ConvTupleActivationFusion::Process(const FuncGraphPtr &func_gra return nullptr; } auto conv_cnode = conv_node->cast<CNodePtr>(); - auto node_type = GetCNodeType(conv_cnode); - auto primitive_c = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(conv_cnode->input(0)); - MS_ASSERT(primitive_c); - if (node_type == schema::PrimitiveType_Conv2D) { - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::Conv2D>>(primitive_c)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::Conv2D>>(primitive_c); - MS_ASSERT(primc != nullptr); - if (primc->GetActivationType() == schema::ActivationType_NO_ACTIVATION) { - primc->SetActivationType(act_primitivec->GetType()); - conv_node->set_abstract(act_node->abstract()); - return conv_node; - } - } else if (node_type == schema::PrimitiveType_DepthwiseConv2D) { - MS_ASSERT(utils::isa<std::shared_ptr<mindspore::lite::DepthwiseConv2D>>(primitive_c)); - auto primc = utils::cast<std::shared_ptr<mindspore::lite::DepthwiseConv2D>>(primitive_c); + if (CheckPrimitiveType(conv_node, prim::kPrimConv2DFusion)) { + auto primc = GetValueNode<std::shared_ptr<mindspore::ops::Conv2DFusion>>(conv_cnode->input(0)); MS_ASSERT(primc != nullptr); - if (primc->GetActivationType() == schema::ActivationType_NO_ACTIVATION) { - primc->SetActivationType(act_primitivec->GetType()); + if (primc->GetAttr(ops::kActivationType) == nullptr || primc->get_activation_type() == mindspore::NO_ACTIVATION) { + primc->set_activation_type(act_prim->get_activation_type()); conv_node->set_abstract(act_node->abstract()); return conv_node; } diff --git a/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h b/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h index 74b499415f..5b2027f2ce 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/conv_tuple_activation_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -19,7 +19,6 @@ #include <string> #include "backend/optimizer/common/optimizer.h" -#include "schema/inner/model_generated.h" namespace mindspore { namespace opt { diff --git a/mindspore/lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.cc b/mindspore/lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.cc index 0f81b00946..3e351b90bb 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,9 +15,7 @@ */ #include "tools/optimizer/fusion/conv_tuplegetitem_fusion.h" #include <memory> -#include "src/ops/primitive_c.h" #include "src/param_value_lite.h" -#include "schema/inner/model_generated.h" #include "tools/optimizer/common/gllo_utils.h" #include "securec/include/securec.h" @@ -25,9 +23,9 @@ namespace mindspore::opt { namespace { constexpr size_t kTupleGetItemLen = 3; bool IsTupleGetItemNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_TupleGetItem; + if (utils::isa<AnfNodePtr>(n)) { + auto anf_node = utils::cast<AnfNodePtr>(n); + return CheckPrimitiveType(anf_node, prim::kPrimTupleGetItem); } return false; } diff --git a/mindspore/lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.h b/mindspore/lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.h index 2d04aa90f2..b0028fa51c 100644 --- a/mindspore/lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/conv_tuplegetitem_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.cc b/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.cc index 69acb8dec7..1d744066a2 100644 --- a/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,69 +15,103 @@ */ #include "tools/optimizer/fusion/layer_norm_fusion.h" #include <memory> -#include "src/ops/primitive_c.h" +#include "ops/fusion/layer_norm_fusion.h" +#include "ops/fusion/reduce_fusion.h" +#include "ops/rsqrt.h" #include "src/param_value_lite.h" -#include "schema/inner/model_generated.h" #include "utils/utils.h" #include "tools/optimizer/common/gllo_utils.h" #include "securec/include/securec.h" -#include "src/ops/add.h" -#include "src/ops/mul.h" -#include "src/ops/rsqrt.h" -#include "src/ops/reduce.h" -#include "src/ops/sub.h" namespace mindspore { namespace opt { namespace { - bool IsAddNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_Add; + if (utils::isa<AnfNodePtr>(n)) { + return CheckPrimitiveType(utils::cast<AnfNodePtr>(n), prim::kPrimAddFusion); } return false; } bool IsSquaredDifferenceNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_SquaredDifference; + if (utils::isa<AnfNodePtr>(n)) { + return CheckPrimitiveType(utils::cast<AnfNodePtr>(n), prim::kPrimSquaredDifference); } return false; } bool IsRsqrtNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_Rsqrt; + if (utils::isa<AnfNodePtr>(n)) { + return CheckPrimitiveType(utils::cast<AnfNodePtr>(n), prim::kPrimRsqrt); } return false; } bool IsMulNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_Mul; + if (utils::isa<AnfNodePtr>(n)) { + return CheckPrimitiveType(utils::cast<AnfNodePtr>(n), prim::kPrimMulFusion); } return false; } bool IsSubNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_Sub; + if (utils::isa<AnfNodePtr>(n)) { + return CheckPrimitiveType(utils::cast<AnfNodePtr>(n), prim::kPrimSubFusion); } return false; } + +lite::STATUS GetReduceAxes(const BaseRef &n, std::vector<int> *axes) { + MS_ASSERT(node != nullptr); + if (utils::isa<ParameterPtr>(n)) { + auto axes_param = utils::cast<ParameterPtr>(n); + if (!axes_param->has_default() || axes_param->default_param() == nullptr) { + return lite::RET_NOT_SUPPORT; + } + auto axes_value = axes_param->default_param()->cast<ParamValueLitePtr>(); + if (axes_value == nullptr) { + return lite::RET_ERROR; + } + axes->resize(axes_value->tensor_shape()[0]); + if (memcpy_s(axes->data(), axes_value->tensor_size(), axes_value->tensor_addr(), axes_value->tensor_size()) == + EOK) { + return lite::RET_OK; + } + } + if (utils::isa<ValueNodePtr>(n)) { + auto axes_value_node = utils::cast<ValueNodePtr>(n); + auto axes_content = CastToInt(axes_value_node->value()); + if (memcpy_s(axes->data(), axes_content.size() * sizeof(int), axes_content.data(), + axes_content.size() * sizeof(int)) == EOK) { + return lite::RET_OK; + } + } + return lite::RET_ERROR; +} + +bool IsReduceNode(const EquivPtr &equiv, const VarPtr &input_prim, const VarPtr &input_axes, std::vector<int> *axes) { + MS_ASSERT(equiv != nullptr && input_prim != nullptr); + MS_ASSERT(input_axes != nullptr && axes != nullptr); + auto reduce_value = utils::cast<AnfNodePtr>((*equiv)[input_prim]); + MS_ASSERT(reduce_value != nullptr); + auto mean2_primitive = GetValueNode<std::shared_ptr<ops::ReduceFusion>>(reduce_value); + if (mean2_primitive == nullptr || mean2_primitive->GetAttr(ops::kMode) == nullptr || + mean2_primitive->get_mode() != mindspore::Reduce_Mean) { + return false; + } + if (GetReduceAxes((*equiv)[input_axes], axes) != lite::RET_OK) { + return false; + } + return true; +} } // namespace const BaseRef LayerNormFusion::DefinePattern() const { - VectorRef mean1_ref = VectorRef({mean1_, input_}); + VectorRef mean1_ref = VectorRef({mean1_, input_, mean1_axes_}); auto squared_diffference1 = std::make_shared<CondVar>(IsSquaredDifferenceNode); VectorRef squared_diffference1_ref = VectorRef({squared_diffference1, input_, mean1_ref}); auto mul1 = std::make_shared<CondVar>(IsMulNode); - VectorRef mean2_ref = VectorRef({mean2_, squared_diffference1_ref}); + VectorRef mean2_ref = VectorRef({mean2_, squared_diffference1_ref, mean2_axes_}); auto add1 = std::make_shared<CondVar>(IsAddNode); VectorRef add1_ref = VectorRef({add1, mean2_ref, epsilon_}); auto rsqrt1 = std::make_shared<CondVar>(IsRsqrtNode); @@ -98,15 +132,9 @@ CNodePtr LayerNormFusion::CreateLayerNormNode(const FuncGraphPtr &func_graph, co int begin_norm_axis, int begin_params_axis) const { MS_ASSERT(func_graph != nullptr); MS_ASSERT(equiv != nullptr); - auto layer_norm_primitive = std::make_unique<schema::PrimitiveT>(); - std::unique_ptr<schema::LayerNormT> attr = std::make_unique<schema::LayerNormT>(); - attr->epsilon = epsilon; - attr->begin_norm_axis = begin_norm_axis; - attr->begin_params_axis = begin_params_axis; - layer_norm_primitive->value.type = schema::PrimitiveType_LayerNorm; - layer_norm_primitive->value.value = attr.release(); - auto layer_norm_cvalue = lite::PrimitiveC::Create(layer_norm_primitive.release()); - auto value_node = NewValueNode(std::shared_ptr<lite::PrimitiveC>(layer_norm_cvalue)); + auto layer_norm_primitive = std::make_shared<ops::LayerNormFusion>(); + layer_norm_primitive->Init(begin_norm_axis, begin_params_axis, epsilon); + auto value_node = NewValueNode(layer_norm_primitive); std::vector<AnfNodePtr> new_node_inputs = {value_node}; auto input_node = utils::cast<AnfNodePtr>((*equiv)[input_]); MS_ASSERT(input_node != nullptr); @@ -197,31 +225,15 @@ bool LayerNormFusion::CheckPattern(const EquivPtr &equiv, float *epsilon, int *b auto epsilon_data = reinterpret_cast<float *>(epsilon_tensor->tensor_addr()); auto epsilon_shape = epsilon_tensor->tensor_shape(); // mean2 - auto mean2_value = utils::cast<AnfNodePtr>((*equiv)[mean2_]); - MS_ASSERT(mean2_value != nullptr); - auto mean2_primitivec = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(mean2_value); - if (!utils::isa<std::shared_ptr<mindspore::lite::Reduce>>(mean2_primitivec)) { - return false; - } - auto mean2_op = utils::cast<std::shared_ptr<mindspore::lite::Reduce>>(mean2_primitivec); - MS_ASSERT(mean2_op != nullptr); - if (mean2_op->GetMode() != schema::ReduceMode_ReduceMean) { + std::vector<int> mean2_axes; + if (!IsReduceNode(equiv, mean2_, mean2_axes_, &mean2_axes)) { return false; } - auto mean2_axes = mean2_op->GetAxes(); // mean1 - auto mean1_value = utils::cast<AnfNodePtr>((*equiv)[mean1_]); - MS_ASSERT(mean1_value != nullptr); - auto mean1_primitivec = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(mean1_value); - if (!utils::isa<std::shared_ptr<mindspore::lite::Reduce>>(mean1_primitivec)) { - return false; - } - auto mean1_op = utils::cast<std::shared_ptr<mindspore::lite::Reduce>>(mean1_primitivec); - MS_ASSERT(mean1_op != nullptr); - if (mean1_op->GetMode() != schema::ReduceMode_ReduceMean) { + std::vector<int> mean1_axes; + if (!IsReduceNode(equiv, mean1_, mean1_axes_, &mean1_axes)) { return false; } - auto mean1_axes = mean1_op->GetAxes(); auto input_node = utils::cast<AnfNodePtr>((*equiv)[input_]); MS_ASSERT(input_node != nullptr); if (!utils::isa<CNodePtr>(input_node)) { diff --git a/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.h b/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.h index bf0960d79c..75d6bb44d6 100644 --- a/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/layer_norm_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -32,7 +32,9 @@ class LayerNormFusion : public PatternProcessPass { : PatternProcessPass(name, multigraph) { input_ = std::make_shared<Var>(); mean1_ = std::make_shared<Var>(); + mean1_axes_ = std::make_shared<Var>(); mean2_ = std::make_shared<Var>(); + mean2_axes_ = std::make_shared<Var>(); gamma_ = std::make_shared<Var>(); beta_ = std::make_shared<Var>(); epsilon_ = std::make_shared<Var>(); @@ -50,7 +52,9 @@ class LayerNormFusion : public PatternProcessPass { int begin_norm_axis, int begin_params_axis) const; VarPtr input_ = nullptr; VarPtr mean1_ = nullptr; + VarPtr mean1_axes_ = nullptr; VarPtr mean2_ = nullptr; + VarPtr mean2_axes_ = nullptr; VarPtr gamma_ = nullptr; VarPtr beta_ = nullptr; VarPtr epsilon_ = nullptr; diff --git a/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.cc b/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.cc index d8a245bbf4..e7872c89f9 100644 --- a/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ #include "tools/optimizer/fusion/pooling_activation_fusion.h" #include <memory> -#include "src/ops/primitive_c.h" #include "src/ops/pooling.h" #include "src/ops/activation.h" #include "schema/inner/model_generated.h" @@ -27,7 +26,7 @@ namespace { constexpr size_t kActivationInputsLength = 2; } const BaseRef PoolingActivationFusion::DefinePattern() const { - auto pooling_var = std::make_shared<CondVar>(IsPoolingNode)(); + auto pooling_var = std::make_shared<CondVar>(IsPoolingNode); auto prim = new (std::nothrow) schema::PrimitiveT(); if (prim == nullptr) { MS_LOG(ERROR) << "new primitiveT failed"; diff --git a/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.h b/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.h index 7633e0e5aa..c441c76cc3 100644 --- a/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/pooling_activation_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/optimizer/fusion/quant_dtype_cast_fusion.cc b/mindspore/lite/tools/optimizer/fusion/quant_dtype_cast_fusion.cc index c6749c7968..e811f7361d 100644 --- a/mindspore/lite/tools/optimizer/fusion/quant_dtype_cast_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/quant_dtype_cast_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,11 +15,6 @@ */ #include "tools/optimizer/fusion/quant_dtype_cast_fusion.h" #include <memory> -#include "src/ops/primitive_c.h" -#include "src/ops/conv2d.h" -#include "src/ops/depthwise_conv2d.h" -#include "src/ops/activation.h" -#include "schema/inner/model_generated.h" #include "tools/optimizer/common/gllo_utils.h" namespace mindspore::opt { namespace { diff --git a/mindspore/lite/tools/optimizer/fusion/quant_dtype_cast_fusion.h b/mindspore/lite/tools/optimizer/fusion/quant_dtype_cast_fusion.h index 28a6294839..b60153b99c 100644 --- a/mindspore/lite/tools/optimizer/fusion/quant_dtype_cast_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/quant_dtype_cast_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2019-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,7 +18,6 @@ #include <string> #include "backend/optimizer/common/optimizer.h" -#include "schema/inner/model_generated.h" namespace mindspore { namespace opt { diff --git a/mindspore/lite/tools/optimizer/fusion/sigmoid_mul_fusion.cc b/mindspore/lite/tools/optimizer/fusion/sigmoid_mul_fusion.cc index c61617bd18..e316f81bb2 100644 --- a/mindspore/lite/tools/optimizer/fusion/sigmoid_mul_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/sigmoid_mul_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,19 +15,17 @@ */ #include "tools/optimizer/fusion/sigmoid_mul_fusion.h" #include <memory> -#include "src/ops/primitive_c.h" -#include "src/ops/activation.h" +#include "ops/fusion/activation.h" +#include "ops/op_utils.h" #include "src/param_value_lite.h" -#include "schema/inner/model_generated.h" #include "utils/utils.h" #include "tools/optimizer/common/gllo_utils.h" namespace mindspore::opt { namespace { bool IsMulNode(const BaseRef &n) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - auto type = opt::GetCNodeType(n); - return type == schema::PrimitiveType_Mul; + if (utils::isa<AnfNodePtr>(n)) { + return CheckPrimitiveType(utils::cast<AnfNodePtr>(n), prim::kPrimMulFusion); } return false; } @@ -50,12 +48,12 @@ const AnfNodePtr SigmoidMulFusion::Process(const FuncGraphPtr &func_graph, const auto activation_cnode = mul_cnode->input(2)->cast<CNodePtr>(); MS_ASSERT(activation_cnode != nullptr); // activation must sigmoid - auto primitive = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(activation_cnode->input(0)); - auto activation_prim = utils::cast<std::shared_ptr<mindspore::lite::Activation>>(primitive); - if (activation_prim->GetType() != schema::ActivationType_SIGMOID) { + auto activation_prim = GetValueNode<std::shared_ptr<mindspore::ops::Activation>>(activation_cnode->input(0)); + if (activation_prim == nullptr || (activation_prim->GetAttr(ops::kActivationType) != nullptr && + activation_prim->get_activation_type() != mindspore::SIGMOID)) { return nullptr; } - activation_prim->SetType(schema::ActivationType_SWISH); + activation_prim->set_activation_type(mindspore::SWISH); return activation_cnode; } } // namespace mindspore::opt diff --git a/mindspore/lite/tools/optimizer/fusion/sigmoid_mul_fusion.h b/mindspore/lite/tools/optimizer/fusion/sigmoid_mul_fusion.h index 2a7db93915..7fdb3cc91d 100644 --- a/mindspore/lite/tools/optimizer/fusion/sigmoid_mul_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/sigmoid_mul_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/optimizer/fusion/tf_lstm_cell_fusion.cc b/mindspore/lite/tools/optimizer/fusion/tf_lstm_cell_fusion.cc index 3972d377bd..59203b52aa 100644 --- a/mindspore/lite/tools/optimizer/fusion/tf_lstm_cell_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/tf_lstm_cell_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ */ #include "tools/optimizer/fusion/tf_lstm_cell_fusion.h" #include <memory> -#include "src/ops/primitive_c.h" +#include "ops/lstm.h" #include "src/common/utils.h" #include "src/param_value_lite.h" #include "utils/utils.h" @@ -36,9 +36,10 @@ const auto &p1 = std::placeholders::_1; bool IsParameterNode(const BaseRef &n) { return utils::isa<ParameterPtr>(n); } -bool IsOpType(const BaseRef &n, const schema::PrimitiveType &type) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - return opt::GetCNodeType(n) == type; +bool IsOpType(const BaseRef &n, const PrimitivePtr &prim) { + if (utils::isa<AnfNodePtr>(n)) { + auto anf_node = utils::cast<AnfNodePtr>(n); + return CheckPrimitiveType(anf_node, prim); } return false; } @@ -87,21 +88,21 @@ AnfNodePtr TfLstmCellFusion::GetBodyGraphPattern(const PrimitiveVarMapPtr &primi VectorRef to_new_hidden = VectorRef({std::make_shared<Var>("Tanh"), input_forget_cell}); VectorRef new_hidden = VectorRef({std::make_shared<Var>("Mul"), output_gate, to_new_hidden}); - VectorRef new_to_cell = VectorRef({std::make_shared<Var>("Mul"), cell_smooth_new_, input_forget_cell}); - VectorRef old_to_cell = VectorRef({std::make_shared<Var>("Mul"), cell_smooth_old_, placeholders[4]}); + VectorRef new_to_cell = VectorRef({std::make_shared<Var>("Mul"), cell_zoneout_new_, input_forget_cell}); + VectorRef old_to_cell = VectorRef({std::make_shared<Var>("Mul"), cell_zoneout_old_, placeholders[4]}); VectorRef output_cell = VectorRef({std::make_shared<Var>("Add"), new_to_cell, old_to_cell}); - VectorRef new_to_hidden = VectorRef({std::make_shared<Var>("Mul"), hidden_smooth_new_, new_hidden}); - VectorRef old_to_hidden = VectorRef({std::make_shared<Var>("Mul"), hidden_smooth_old_, placeholders[5]}); + VectorRef new_to_hidden = VectorRef({std::make_shared<Var>("Mul"), hidden_zoneout_new_, new_hidden}); + VectorRef old_to_hidden = VectorRef({std::make_shared<Var>("Mul"), hidden_zoneout_old_, placeholders[5]}); VectorRef output_hidden = VectorRef({std::make_shared<Var>("Add"), new_to_hidden, old_to_hidden}); VectorRef set_item = VectorRef({std::make_shared<Var>(""), placeholders[3], placeholders[2], new_hidden}); - auto is_make_tuple = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_MakeTuple)); + auto is_make_tuple = std::make_shared<CondVar>(std::bind(IsOpType, p1, kPrimMakeTuple)); std::vector<BaseRef> outputs = {is_make_tuple, add3, placeholders[1], add2, set_item, output_cell, output_hidden}; outputs.insert(outputs.end(), placeholders.begin() + 6, placeholders.end()); VectorRef make_tuple_node = VectorRef(outputs); - auto is_return = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Return)); + auto is_return = std::make_shared<CondVar>(std::bind(IsOpType, p1, kPrimReturn)); VectorRef return_node = VectorRef({is_return, make_tuple_node}); VarPtr fg = std::make_shared<Var>("RootG"); @@ -286,17 +287,14 @@ STATUS TfLstmCellFusion::PopulateBiasNode(const EquivPtr &body_equiv, const Para CNodePtr TfLstmCellFusion::CreateLSTMNode(const FuncGraphPtr &func_graph, const EquivPtr &equiv, const EquivPtr &body_equiv, const std::string &base_name, - const float smooth) const { + const float zoneout_cell, const float zoneout_hidden) const { MS_ASSERT(func_graph != nullptr); MS_ASSERT(equiv != nullptr); - auto lstm_primitive = std::make_unique<schema::PrimitiveT>(); - std::unique_ptr<schema::LstmT> attr = std::make_unique<schema::LstmT>(); - attr->bidirection = false; - attr->smooth = smooth; - lstm_primitive->value.type = schema::PrimitiveType_Lstm; - lstm_primitive->value.value = attr.release(); - auto lstm_cvalue = lite::PrimitiveC::Create(lstm_primitive.release()); - auto value_node = NewValueNode(std::shared_ptr<lite::PrimitiveC>(lstm_cvalue)); + auto lstm_prim = std::make_shared<ops::LSTM>(); + lstm_prim->set_bidirectional(false); + lstm_prim->set_zoneout_cell(zoneout_cell); + lstm_prim->set_zoneout_hidden(zoneout_hidden); + auto value_node = NewValueNode(lstm_prim); auto &vars = while_input_vars_; @@ -353,7 +351,7 @@ CNodePtr TfLstmCellFusion::CreateLSTMNode(const FuncGraphPtr &func_graph, const return nullptr; } - if (!utils::isa<CNodePtr>(input) || GetCNodeType(input) != schema::PrimitiveType_TensorListFromTensor) { + if (!utils::isa<CNodePtr>(input) || !CheckPrimitiveType(input, prim::kPrimTensorListFromTensor)) { MS_LOG(DEBUG) << "input is not tensorlistfromtensor op"; return nullptr; } diff --git a/mindspore/lite/tools/optimizer/fusion/tf_lstm_cell_fusion.h b/mindspore/lite/tools/optimizer/fusion/tf_lstm_cell_fusion.h index ce8628e3f3..712ff82546 100644 --- a/mindspore/lite/tools/optimizer/fusion/tf_lstm_cell_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/tf_lstm_cell_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,7 +35,8 @@ class TfLstmCellFusion : public TfliteLstmCellFusion { private: AnfNodePtr GetBodyGraphPattern(const PrimitiveVarMapPtr &primitive_vars) const override; CNodePtr CreateLSTMNode(const FuncGraphPtr &func_graph, const EquivPtr &equiv, const EquivPtr &body_equiv, - const std::string &base_name, const float smooth) const override; + const std::string &base_name, const float zoneout_cell, + const float zoneout_hidden) const override; lite::STATUS SplitWeights(const AnfNodePtr &weight, const ParameterPtr &weight_i, const ParameterPtr &weight_c, int hidden_size) const; diff --git a/mindspore/lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.cc b/mindspore/lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.cc index 4812ed15d0..0b13d4bcb3 100644 --- a/mindspore/lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.cc +++ b/mindspore/lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,9 +14,12 @@ * limitations under the License. */ #include "tools/optimizer/fusion/tflite_lstm_cell_fusion.h" +#include <algorithm> #include <memory> #include <functional> -#include "src/ops/primitive_c.h" +#include "ops/lstm.h" +#include "ops/squeeze.h" +#include "ops/tuple_get_item.h" #include "src/common/utils.h" #include "src/param_value_lite.h" #include "schema/inner/model_generated.h" @@ -39,9 +42,10 @@ constexpr float EPSILON = 1e-5; bool IsParameterNode(const BaseRef &n) { return utils::isa<ParameterPtr>(n); } -bool IsOpType(const BaseRef &n, const schema::PrimitiveType &type) { - if (utils::isa<CNodePtr>(n) || utils::isa<ValueNodePtr>(n)) { - return opt::GetCNodeType(n) == type; +bool IsOpType(const BaseRef &n, const PrimitivePtr &prim) { + if (utils::isa<AnfNodePtr>(n)) { + auto anf_node = utils::cast<AnfNodePtr>(n); + return CheckPrimitiveType(anf_node, prim); } return false; } @@ -98,10 +102,10 @@ TfliteLstmCellFusion::TfliteLstmCellFusion(const std::string &name, bool multigr for (size_t i = 0; i < this->while_input_var_num_; ++i) { while_input_vars_.emplace_back(std::make_shared<Var>()); } - cell_smooth_old_ = std::make_shared<Var>(); - cell_smooth_new_ = std::make_shared<Var>(); - hidden_smooth_old_ = std::make_shared<Var>(); - hidden_smooth_new_ = std::make_shared<Var>(); + cell_zoneout_old_ = std::make_shared<Var>(); + cell_zoneout_new_ = std::make_shared<Var>(); + hidden_zoneout_old_ = std::make_shared<Var>(); + hidden_zoneout_new_ = std::make_shared<Var>(); } AnfNodePtr TfliteLstmCellFusion::GetCondGraphPattern(const PrimitiveVarMapPtr &primitive_vars) const { @@ -109,10 +113,10 @@ AnfNodePtr TfliteLstmCellFusion::GetCondGraphPattern(const PrimitiveVarMapPtr &p auto is_parameter2 = std::make_shared<CondVar>(IsParameterNode); auto is_parameter3 = std::make_shared<CondVar>(IsParameterNode); auto is_parameter4 = std::make_shared<CondVar>(IsParameterNode); - auto is_less1 = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Less)); - auto is_less2 = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Less)); - auto is_logical_and = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_LogicalAnd)); - auto is_return = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Return)); + auto is_less1 = std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimLess)); + auto is_less2 = std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimLess)); + auto is_logical_and = std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimLogicalAnd)); + auto is_return = std::make_shared<CondVar>(std::bind(IsOpType, p1, kPrimReturn)); VectorRef less1_ref = VectorRef({is_less1, is_parameter1, is_parameter2}); VectorRef less2_ref = VectorRef({is_less2, is_parameter3, is_parameter4}); VectorRef logicaland_ref = VectorRef({is_logical_and, less1_ref, less2_ref}); @@ -156,25 +160,25 @@ AnfNodePtr TfliteLstmCellFusion::GetBodyGraphPattern(const PrimitiveVarMapPtr &p VectorRef cell_forgeted = VectorRef({std::make_shared<Var>("Mul"), forget_gate, placeholders[4]}); VectorRef cell_new = VectorRef({std::make_shared<Var>("Add"), cell_forgeted, cell_input}); - VectorRef smooth_cell_old = VectorRef({std::make_shared<Var>("Mul"), cell_smooth_old_, placeholders[4]}); - VectorRef smooth_cell_new = VectorRef({std::make_shared<Var>("Mul"), cell_smooth_new_, cell_new}); - VectorRef cell_output = VectorRef({std::make_shared<Var>("Add"), smooth_cell_new, smooth_cell_old}); + VectorRef zoneout_cell_old = VectorRef({std::make_shared<Var>("Mul"), cell_zoneout_old_, placeholders[4]}); + VectorRef zoneout_cell_new = VectorRef({std::make_shared<Var>("Mul"), cell_zoneout_new_, cell_new}); + VectorRef cell_output = VectorRef({std::make_shared<Var>("Add"), zoneout_cell_new, zoneout_cell_old}); VectorRef output_gate = VectorRef({std::make_shared<Var>("Sigmoid"), bias_output}); VectorRef cell_to_output = VectorRef({std::make_shared<Var>("Tanh"), cell_new}); VectorRef output = VectorRef({std::make_shared<Var>("Mul"), output_gate, cell_to_output}); - VectorRef smooth_hidden_old = VectorRef({std::make_shared<Var>("Mul"), hidden_smooth_old_, placeholders[5]}); - VectorRef smooth_hidden_new = VectorRef({std::make_shared<Var>("Mul"), hidden_smooth_new_, output}); - VectorRef hidden_output = VectorRef({std::make_shared<Var>("Add"), smooth_hidden_new, smooth_hidden_old}); + VectorRef zoneout_hidden_old = VectorRef({std::make_shared<Var>("Mul"), hidden_zoneout_old_, placeholders[5]}); + VectorRef zoneout_hidden_new = VectorRef({std::make_shared<Var>("Mul"), hidden_zoneout_new_, output}); + VectorRef hidden_output = VectorRef({std::make_shared<Var>("Add"), zoneout_hidden_new, zoneout_hidden_old}); VectorRef set_item = VectorRef({std::make_shared<Var>("SetItem"), placeholders[3], placeholders[2], output}); - auto is_make_tuple = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_MakeTuple)); + auto is_make_tuple = std::make_shared<CondVar>(std::bind(IsOpType, p1, kPrimMakeTuple)); std::vector<BaseRef> outputs = {is_make_tuple, add3, placeholders[1], add2, set_item, cell_output, hidden_output}; outputs.insert(outputs.end(), placeholders.begin() + 6, placeholders.end()); VectorRef make_tuple_node = VectorRef(outputs); - auto is_return = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_Return)); + auto is_return = std::make_shared<CondVar>(std::bind(IsOpType, p1, kPrimReturn)); VectorRef return_node = VectorRef({is_return, make_tuple_node}); VarPtr fg = std::make_shared<Var>("RootG"); @@ -183,16 +187,16 @@ AnfNodePtr TfliteLstmCellFusion::GetBodyGraphPattern(const PrimitiveVarMapPtr &p } const BaseRef TfliteLstmCellFusion::DefinePattern() const { - auto is_while_node = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_While)); + auto is_while_node = std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimWhile)); VectorRef while_node = VectorRef({is_while_node}); auto while_inputs = while_input_vars_; while_inputs.insert(while_inputs.begin() + 4, while_input_vars_[2]); while_node.insert(while_node.end(), while_inputs.begin(), while_inputs.end()); - auto is_tuple_get_item = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_TupleGetItem)); + auto is_tuple_get_item = std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimTupleGetItem)); VectorRef while_output = VectorRef({is_tuple_get_item, while_node, std::make_shared<Var>()}); - auto is_tensor_list_stack = std::make_shared<CondVar>(std::bind(IsOpType, p1, schema::PrimitiveType_TensorListStack)); + auto is_tensor_list_stack = std::make_shared<CondVar>(std::bind(IsOpType, p1, prim::kPrimTensorListStack)); auto is_parameter = std::make_shared<CondVar>(IsParameterNode); VectorRef tensor_list_stack_node = VectorRef({is_tensor_list_stack, while_output, is_parameter}); @@ -228,7 +232,7 @@ bool TfliteLstmCellFusion::CheckReferencedOutputs(const FuncGraphPtr &func_graph return false; } auto cnode = utils::cast<CNodePtr>(node_user.first); - if (GetCNodeType(cnode) != schema::PrimitiveType_TupleGetItem) { + if (!CheckPrimitiveType(cnode, prim::kPrimTupleGetItem)) { return false; } auto index = GetTupleGetItemOutIndex(cnode); @@ -256,48 +260,51 @@ EquivPtr TfliteLstmCellFusion::CheckSubGraph(const FuncGraphPtr &func_graph, con } bool TfliteLstmCellFusion::CheckBodyGraph(const FuncGraphPtr &func_graph, const EquivPtr &equiv, - const CNodePtr &while_cnode, float *smooth) const { + const CNodePtr &while_cnode, float *zoneout_cell, + float *zoneout_hidden) const { MS_ASSERT(func_graph != nullptr); MS_ASSERT(equiv != nullptr); MS_ASSERT(while_cnode != nullptr); - MS_ASSERT(smooth != nullptr); - - auto cell_smooth_old_node = utils::cast<AnfNodePtr>((*equiv)[cell_smooth_old_]); - MS_ASSERT(cell_smooth_old_node != nullptr); - auto cell_smooth_new_node = utils::cast<AnfNodePtr>((*equiv)[cell_smooth_new_]); - MS_ASSERT(cell_smooth_new_node != nullptr); - auto hidden_smooth_old_node = utils::cast<AnfNodePtr>((*equiv)[hidden_smooth_old_]); - MS_ASSERT(hidden_smooth_old_node != nullptr); - auto hidden_smooth_new_node = utils::cast<AnfNodePtr>((*equiv)[hidden_smooth_new_]); - MS_ASSERT(hidden_smooth_new_node != nullptr); + MS_ASSERT(zoneout_cell != nullptr); + MS_ASSERT(zoneout_hidden != nullptr); + + auto cell_zoneout_old_node = utils::cast<AnfNodePtr>((*equiv)[cell_zoneout_old_]); + MS_ASSERT(cell_zoneout_old_node != nullptr); + auto cell_zoneout_new_node = utils::cast<AnfNodePtr>((*equiv)[cell_zoneout_new_]); + MS_ASSERT(cell_zoneout_new_node != nullptr); + auto hidden_zoneout_old_node = utils::cast<AnfNodePtr>((*equiv)[hidden_zoneout_old_]); + MS_ASSERT(hidden_zoneout_old_node != nullptr); + auto hidden_zoneout_new_node = utils::cast<AnfNodePtr>((*equiv)[hidden_zoneout_new_]); + MS_ASSERT(hidden_zoneout_new_node != nullptr); float cell_old, cell_new, hidden_old, hidden_new; - if (GetFloatScalarFromParamValueLite(cell_smooth_old_node, &cell_old) != RET_OK) { + if (GetFloatScalarFromParamValueLite(cell_zoneout_old_node, &cell_old) != RET_OK) { return false; } - if (GetFloatScalarFromParamValueLite(cell_smooth_new_node, &cell_new) != RET_OK) { + if (GetFloatScalarFromParamValueLite(cell_zoneout_new_node, &cell_new) != RET_OK) { return false; } - if (GetFloatScalarFromParamValueLite(hidden_smooth_old_node, &hidden_old) != RET_OK) { + if (GetFloatScalarFromParamValueLite(hidden_zoneout_old_node, &hidden_old) != RET_OK) { return false; } - if (GetFloatScalarFromParamValueLite(hidden_smooth_new_node, &hidden_new) != RET_OK) { + if (GetFloatScalarFromParamValueLite(hidden_zoneout_new_node, &hidden_new) != RET_OK) { return false; } if (cell_old < 0.0f || cell_old > 1.0f || cell_new < 0.0f || cell_new > 1.0f) { - MS_LOG(DEBUG) << "cell smooth value illegal"; + MS_LOG(DEBUG) << "cell zoneout value illegal"; return false; } if (hidden_old < 0.0f || hidden_old > 1.0f || hidden_new < 0.0f || hidden_new > 1.0f) { - MS_LOG(DEBUG) << "hidden smooth value illegal"; + MS_LOG(DEBUG) << "hidden zoneout value illegal"; return false; } if (std::abs(cell_old + cell_new - 1.0f) > EPSILON || std::abs(hidden_old + hidden_new - 1.0f) > EPSILON || std::abs(cell_old - hidden_old) > EPSILON) { - MS_LOG(DEBUG) << "smooth value illegal"; + MS_LOG(DEBUG) << "zoneout value illegal"; return false; } - *smooth = cell_old; + *zoneout_cell = cell_old; + *zoneout_hidden = hidden_old; return true; } @@ -402,7 +409,7 @@ STATUS TfliteLstmCellFusion::GetConcatedParam(const std::vector<AnfNodePtr> &par CNodePtr TfliteLstmCellFusion::CreateLSTMNode(const FuncGraphPtr &func_graph, const EquivPtr &equiv, const EquivPtr &body_equiv, const std::string &base_name, - const float smooth) const { + const float zoneout_cell, const float zoneout_hidden) const { MS_ASSERT(func_graph != nullptr); MS_ASSERT(equiv != nullptr); MS_ASSERT(body_equiv != nullptr); @@ -411,14 +418,11 @@ CNodePtr TfliteLstmCellFusion::CreateLSTMNode(const FuncGraphPtr &func_graph, co * 0:cond_ 1:body_ 2:time_ 3:limit1_ 4:output_ 5:cell_ 6:hidden_ 7:limit2_ 8:input_ * 9:i2i_ 10:i2f_ 11:i2c_ 12:i2o_ 13:c2i_ 14:c2f_ 15:c2c_ 16:c2o_ 17:i_bias_ 18:f_bias_ 19:c_bias_ 20:o_bias_ */ - auto lstm_primitive = std::make_unique<schema::PrimitiveT>(); - std::unique_ptr<schema::LstmT> attr = std::make_unique<schema::LstmT>(); - attr->bidirection = false; - attr->smooth = smooth; - lstm_primitive->value.type = schema::PrimitiveType_Lstm; - lstm_primitive->value.value = attr.release(); - auto lstm_cvalue = lite::PrimitiveC::Create(lstm_primitive.release()); - auto value_node = NewValueNode(std::shared_ptr<lite::PrimitiveC>(lstm_cvalue)); + auto lstm_prim = std::make_shared<ops::LSTM>(); + lstm_prim->set_bidirectional(false); + lstm_prim->set_zoneout_cell(zoneout_cell); + lstm_prim->set_zoneout_hidden(zoneout_hidden); + auto value_node = NewValueNode(lstm_prim); auto &vars = while_input_vars_; @@ -485,7 +489,7 @@ CNodePtr TfliteLstmCellFusion::CreateLSTMNode(const FuncGraphPtr &func_graph, co } bias->set_name(base_name + "_bias"); - if (!utils::isa<CNodePtr>(input) || GetCNodeType(input) != schema::PrimitiveType_TensorListFromTensor) { + if (!utils::isa<CNodePtr>(input) || !CheckPrimitiveType(input, prim::kPrimTensorListFromTensor)) { MS_LOG(DEBUG) << "input is not tensorlistfromtensor op"; return nullptr; } @@ -503,19 +507,13 @@ CNodePtr TfliteLstmCellFusion::CreateOutputGetItem(const FuncGraphPtr &func_grap MS_ASSERT(func_graph != nullptr); MS_ASSERT(node != nullptr); MS_ASSERT(get_items != nullptr); - auto tuple_get_item_prim_ptr = lite::GetTupleGetItemPrim(); - if (tuple_get_item_prim_ptr == nullptr) { - MS_LOG(ERROR) << "GetTupleGetItemPrim return nullptr"; - return nullptr; - } - auto tuple_get_item_prim = NewValueNode(tuple_get_item_prim_ptr); + auto tuple_get_item_prim = std::make_shared<ops::TupleGetItem>(); auto get_item_value = NewValueNode(MakeValue<int>(item_index)); if (tuple_get_item_prim == nullptr || get_item_value == nullptr) { MS_LOG(ERROR) << "NewValueNode is nullptr"; return nullptr; } - std::vector<AnfNodePtr> inputs{tuple_get_item_prim, node, get_item_value}; - CNodePtr get_item_cnode = func_graph->NewCNode(inputs); + CNodePtr get_item_cnode = func_graph->NewCNode(tuple_get_item_prim, {node, get_item_value}); std::vector<int64_t> shape_vector; auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(kFloat32, shape_vector); if (abstract_tensor == nullptr) { @@ -547,7 +545,7 @@ STATUS TfliteLstmCellFusion::AdjustOtherGetItems(const FuncGraphPtr &func_graph, return RET_ERROR; } auto get_item = utils::cast<CNodePtr>(node_user.first); - if (GetCNodeType(get_item) != schema::PrimitiveType_TupleGetItem) { + if (!CheckPrimitiveType(get_item, prim::kPrimTupleGetItem)) { return RET_ERROR; } auto new_inputs = get_item->inputs(); @@ -615,26 +613,12 @@ STATUS TfliteLstmCellFusion::SetAbstractTuple(const CNodePtr &cnode, const int o CNodePtr TfliteLstmCellFusion::CreateSqueezeNode(const FuncGraphPtr &func_graph, const CNodePtr &input_node, const std::vector<int> &axis) const { MS_ASSERT(func_graph != nullptr); - std::unique_ptr<schema::SqueezeT> attr = std::make_unique<schema::SqueezeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new SqueezeT failed"; - return nullptr; - } - attr->axis = axis; - auto new_primitive_t = std::make_unique<schema::PrimitiveT>(); - if (new_primitive_t == nullptr) { - MS_LOG(ERROR) << "primitive_t is nullptr"; - return nullptr; - } - new_primitive_t->value.type = schema::PrimitiveType_Squeeze; - new_primitive_t->value.value = attr.release(); - auto new_primtive_c = std::shared_ptr<lite::PrimitiveC>(lite::PrimitiveC::Create(new_primitive_t.release())); - if (new_primtive_c == nullptr) { - MS_LOG(ERROR) << "primitive_c is nullptr"; - return nullptr; - } - ValueNodePtr value_node = NewValueNode(new_primtive_c); - auto squeeze_cnode = func_graph->NewCNode({value_node, input_node}); + auto squeeze_prim = std::make_shared<ops::Squeeze>(); + std::vector<int64_t> axis_vec; + std::transform(axis.begin(), axis.end(), std::back_inserter(axis_vec), + [](int val) { return static_cast<int64_t>(val); }); + squeeze_prim->set_axis(axis_vec); + auto squeeze_cnode = func_graph->NewCNode(squeeze_prim, {input_node}); squeeze_cnode->set_abstract(input_node->abstract()->Clone()); squeeze_cnode->set_fullname_with_scope("squeeze_" + input_node->fullname_with_scope()); return squeeze_cnode; @@ -685,12 +669,13 @@ const AnfNodePtr TfliteLstmCellFusion::Process(const FuncGraphPtr &func_graph, c if (body_equiv == nullptr || body_equiv->empty()) { return nullptr; } - float smooth = 0.0f; - if (!CheckBodyGraph(func_graph, body_equiv, while_cnode, &smooth)) { + float zoneout_cell = 0.0f; + float zoneout_hidden = 0.0f; + if (!CheckBodyGraph(func_graph, body_equiv, while_cnode, &zoneout_cell, &zoneout_hidden)) { return nullptr; } const std::string lstm_name = "lstm_" + while_cnode->fullname_with_scope(); - auto lstm_node = CreateLSTMNode(func_graph, equiv, body_equiv, lstm_name, smooth); + auto lstm_node = CreateLSTMNode(func_graph, equiv, body_equiv, lstm_name, zoneout_cell, zoneout_hidden); if (lstm_node == nullptr) { return nullptr; } diff --git a/mindspore/lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.h b/mindspore/lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.h index 7a9ab7ed3e..f327d4dfd8 100644 --- a/mindspore/lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.h +++ b/mindspore/lite/tools/optimizer/fusion/tflite_lstm_cell_fusion.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -44,10 +44,10 @@ class TfliteLstmCellFusion : public PatternProcessPass { static CNodePtr CreateOutputGetItem(const FuncGraphPtr &func_graph, const CNodePtr &node, const int item_index); protected: - VarPtr cell_smooth_old_ = nullptr; - VarPtr cell_smooth_new_ = nullptr; - VarPtr hidden_smooth_old_ = nullptr; - VarPtr hidden_smooth_new_ = nullptr; + VarPtr cell_zoneout_old_ = nullptr; + VarPtr cell_zoneout_new_ = nullptr; + VarPtr hidden_zoneout_old_ = nullptr; + VarPtr hidden_zoneout_new_ = nullptr; std::vector<VarPtr> while_input_vars_; lite::STATUS GetFloatScalarFromParamValueLite(const AnfNodePtr &param_value, float *v) const; @@ -58,11 +58,12 @@ class TfliteLstmCellFusion : public PatternProcessPass { AnfNodePtr GetCondGraphPattern(const PrimitiveVarMapPtr &primitive_vars) const; virtual AnfNodePtr GetBodyGraphPattern(const PrimitiveVarMapPtr &primitive_vars) const; virtual CNodePtr CreateLSTMNode(const FuncGraphPtr &func_graph, const EquivPtr &equiv, const EquivPtr &body_equiv, - const std::string &base_name, const float smooth) const; + const std::string &base_name, const float zoneout_cell, + const float zoneout_hidden) const; private: bool CheckBodyGraph(const FuncGraphPtr &func_graph, const EquivPtr &equiv, const CNodePtr &while_cnode, - float *smooth) const; + float *zoneout_cell, float *zoneout_hidden) const; bool CheckReferencedOutputs(const FuncGraphPtr &func_graph, const CNodePtr &while_cnode) const; lite::STATUS GetConcatedParam(const std::vector<AnfNodePtr> &params, const ParameterPtr &new_param, diff --git a/mindspore/lite/tools/optimizer/graph/clip_convert_activation_pass.cc b/mindspore/lite/tools/optimizer/graph/clip_convert_activation_pass.cc index d50523715d..4e2fb11eb1 100644 --- a/mindspore/lite/tools/optimizer/graph/clip_convert_activation_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/clip_convert_activation_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,15 +16,14 @@ #include "tools/optimizer/graph/clip_convert_activation_pass.h" #include <vector> #include <memory> +#include "ops/clip.h" +#include "ops/fusion/activation.h" +#include "ops/op_utils.h" #include "tools/optimizer/common/gllo_utils.h" -#include "src/ops/primitive_c.h" -#include "schema/inner/model_generated.h" #include "src/tensor.h" #include "tools/converter/quantizer/quant_cast.h" #include "src/common/log_adapter.h" -#include "securec/include/securec.h" -using mindspore::lite::PrimitiveC; namespace mindspore::opt { namespace { constexpr size_t kClipMinIndex = 2; @@ -38,20 +37,21 @@ bool ClipConvertActivationPass::Run(const FuncGraphPtr &graph) { if (!utils::isa<CNode>(node)) { continue; } - if (opt::GetCNodeType(node) != schema::PrimitiveType_Clip) { + if (!CheckPrimitiveType(node, prim::kPrimClip)) { continue; } auto clip_cnode = node->cast<CNodePtr>(); MS_ASSERT(clip_cnode->size() >= kClipMinIndex); - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(clip_cnode->input(0)); - MS_ASSERT(primitive_c != nullptr); - auto primT = primitive_c->primitiveT(); - if (primT == nullptr || primT->value.AsClip() == nullptr) { - MS_LOG(ERROR) << "primT is null"; - return false; + auto clip_c = GetValueNode<ops::PrimClipPtr>(clip_cnode->input(0)); + MS_ASSERT(clip_c != nullptr); + float max = -1; + float min = -1; + if (clip_c->GetAttr(ops::kMax) != nullptr) { + max = clip_c->get_max(); + } + if (clip_c->GetAttr(ops::kMin) != nullptr) { + min = clip_c->get_min(); } - float max = primT->value.AsClip()->max; - float min = primT->value.AsClip()->min; if ((min == -1) && (max == -1)) { if (clip_cnode->size() > kClipMinIndex) { auto min_param_value = GetLiteParamValue(clip_cnode->input(kClipMinIndex)); @@ -77,26 +77,12 @@ bool ClipConvertActivationPass::Run(const FuncGraphPtr &graph) { } auto manager = graph->manager(); - // relu node - auto primitive = std::make_unique<schema::PrimitiveT>(); - MS_ASSERT(primitive != nullptr); - primitive->value.type = schema::PrimitiveType_Activation; - auto prim2 = new (std::nothrow) schema::ActivationT; - if (prim2 == nullptr) { - MS_LOG(ERROR) << "new ActivationT failed"; - return false; - } - if (min == 0 && max == 6) { - prim2->type = schema::ActivationType_RELU6; - } else { - prim2->type = schema::ActivationType_HARD_TANH; - prim2->min_val = min; - prim2->max_val = max; + auto primitive_c = std::make_shared<mindspore::ops::Activation>(); + primitive_c->Init(0, min, max, mindspore::RELU6); + if (min != 0 || max != 6) { + primitive_c->set_activation_type(mindspore::HARD_TANH); } - primitive->value.value = prim2; - auto primitiveCValue = PrimitiveC::Create(primitive.release()); - MS_ASSERT(primitiveCValue != nullptr); - auto value_node = NewValueNode(std::shared_ptr<PrimitiveC>(primitiveCValue)); + auto value_node = NewValueNode(primitive_c); std::vector<AnfNodePtr> op_inputs = {value_node}; op_inputs.push_back(clip_cnode->input(1)); auto new_cnode = graph->NewCNode(op_inputs); diff --git a/mindspore/lite/tools/optimizer/graph/clip_convert_activation_pass.h b/mindspore/lite/tools/optimizer/graph/clip_convert_activation_pass.h index 83bba24c20..a5b7968b70 100644 --- a/mindspore/lite/tools/optimizer/graph/clip_convert_activation_pass.h +++ b/mindspore/lite/tools/optimizer/graph/clip_convert_activation_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,7 +17,6 @@ #ifndef MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_CLIP_CONVERT_ACTIVATION_PASS_H_ #define MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_CLIP_CONVERT_ACTIVATION_PASS_H_ #include <string> -#include "schema/inner/model_generated.h" #include "tools/converter/converter_flags.h" #include "backend/optimizer/common/pass.h" #include "src/param_value_lite.h" diff --git a/mindspore/lite/tools/optimizer/graph/functionalize_control_op_pass.cc b/mindspore/lite/tools/optimizer/graph/functionalize_control_op_pass.cc index fa54db1a0e..3e5c671ffd 100644 --- a/mindspore/lite/tools/optimizer/graph/functionalize_control_op_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/functionalize_control_op_pass.cc @@ -14,12 +14,11 @@ * limitations under the License. */ +#include "tools/optimizer/graph/functionalize_control_op_pass.h" #include <algorithm> #include <deque> -#include "tools/optimizer/graph/functionalize_control_op_pass.h" #include "tools/optimizer/graph/functionalize_while.h" -#include "mindspore/lite/include/errorcode.h" -#include "src/ops/primitive_c.h" +#include "include/errorcode.h" namespace mindspore::opt { diff --git a/mindspore/lite/tools/optimizer/graph/functionalize_control_op_pass.h b/mindspore/lite/tools/optimizer/graph/functionalize_control_op_pass.h index 09270bb4ef..3d470943e0 100644 --- a/mindspore/lite/tools/optimizer/graph/functionalize_control_op_pass.h +++ b/mindspore/lite/tools/optimizer/graph/functionalize_control_op_pass.h @@ -23,6 +23,10 @@ #include <memory> #include "backend/optimizer/common/pass.h" #include "tools/converter/converter_flags.h" +#include "tools/converter/ops/enter.h" +#include "tools/converter/ops/exit.h" +#include "tools/converter/ops/loop_cond.h" +#include "tools/converter/ops/next_iteration.h" #include "tools/optimizer/common/gllo_utils.h" using mindspore::lite::converter::FmkType; @@ -34,19 +38,19 @@ class FunctionalizeControlOpPass : public Pass { ~FunctionalizeControlOpPass() override = default; bool Run(const FuncGraphPtr &graph) override; static FuncGraphPtr NewFuncGraph(const std::string &subgraph_name, const FmkType &fmk_type); - static bool IsMerge(const AnfNodePtr &node) { return opt::GetCNodeType(node) == schema::PrimitiveType_Merge; } + static bool IsMerge(const AnfNodePtr &node) { return CheckPrimitiveType(node, prim::kPrimMerge); } static bool IsLoopCond(const AnfNodePtr &node) { - return static_cast<int>(opt::GetCNodeType(node)) == static_cast<int>(lite::ConverterPrimitiveType_LoopCond); + return CheckPrimitiveType(node, std::make_shared<Primitive>(lite::kNameLoopCond)); } static bool IsEnter(const AnfNodePtr &node) { - return static_cast<int>(opt::GetCNodeType(node)) == static_cast<int>(lite::ConverterPrimitiveType_Enter); + return CheckPrimitiveType(node, std::make_shared<Primitive>(lite::kNameEnter)); } static bool IsExit(const AnfNodePtr &node) { - return static_cast<int>(opt::GetCNodeType(node)) == static_cast<int>(lite::ConverterPrimitiveType_Exit); + return CheckPrimitiveType(node, std::make_shared<Primitive>(lite::kNameExit)); } - static bool IsSwitch(const AnfNodePtr &node) { return opt::GetCNodeType(node) == schema::PrimitiveType_Switch; } + static bool IsSwitch(const AnfNodePtr &node) { return CheckPrimitiveType(node, prim::kPrimSwitch); } static bool IsNextIteration(const AnfNodePtr &node) { - return static_cast<int>(opt::GetCNodeType(node)) == static_cast<int>(lite::ConverterPrimitiveType_NextIteration); + return CheckPrimitiveType(node, std::make_shared<Primitive>(lite::kNameNextIteration)); } static bool IsControlFlowOp(const AnfNodePtr &node) { return IsLoopCond(node) || IsEnter(node) || IsMerge(node) || IsSwitch(node) || IsExit(node) || diff --git a/mindspore/lite/tools/optimizer/graph/functionalize_while.cc b/mindspore/lite/tools/optimizer/graph/functionalize_while.cc index 2d1d8b53d4..29c5cf39e4 100644 --- a/mindspore/lite/tools/optimizer/graph/functionalize_while.cc +++ b/mindspore/lite/tools/optimizer/graph/functionalize_while.cc @@ -18,30 +18,22 @@ #include <memory> #include <deque> #include "tools/optimizer/graph/functionalize_while.h" -#include "mindspore/lite/include/errorcode.h" -#include "src/ops/primitive_c.h" -#include "src/ops/while.h" +#include "include/errorcode.h" +#include "ops/make_tuple.h" +#include "ops/return.h" +#include "ops/tuple_get_item.h" +#include "ops/while.h" namespace { mindspore::ValueNodePtr GetWhileAnfPrim() { - auto while_primitiveT = new (std::nothrow) mindspore::schema::PrimitiveT; - if (while_primitiveT == nullptr) { - MS_LOG(ERROR) << "new while_primitiveT failed"; + auto while_primc = std::make_shared<mindspore::ops::While>(); + if (while_primc == nullptr) { + MS_LOG(ERROR) << "new while_primitive failed"; return nullptr; } - while_primitiveT->value.type = mindspore::schema::PrimitiveType_While; - auto whileT = new (std::nothrow) mindspore::schema::WhileT; - whileT->condSubgraphIndex = mindspore::opt::FunctionalizeControlOpPass::GetSubgraphIndex(); - whileT->bodySubgraphIndex = mindspore::opt::FunctionalizeControlOpPass::GetSubgraphIndex(); - while_primitiveT->value.value = whileT; - if (while_primitiveT->value.value == nullptr) { - MS_LOG(ERROR) << "new WhileT failed"; - delete (while_primitiveT); - return nullptr; - } - - auto while_prim = std::make_shared<mindspore::lite::While>(while_primitiveT); - mindspore::ValueNodePtr partial_anf_prim = NewValueNode(while_prim); + while_primc->set_cond_subgraph_index(mindspore::opt::FunctionalizeControlOpPass::GetSubgraphIndex()); + while_primc->set_body_subgraph_index(mindspore::opt::FunctionalizeControlOpPass::GetSubgraphIndex()); + mindspore::ValueNodePtr partial_anf_prim = NewValueNode(while_primc); return partial_anf_prim; } } // namespace @@ -221,7 +213,7 @@ STATUS FunctionalizeWhile::UpdateExitNodeUser() { AbstractBasePtrList abstractList; std::vector<int64_t> shape_vector; abstractList.emplace_back(std::make_shared<abstract::AbstractTensor>(kFloat32, shape_vector)); - auto tuple_get_item_prim_ptr = lite::GetTupleGetItemPrim(); + auto tuple_get_item_prim_ptr = std::make_shared<ops::TupleGetItem>(); if (tuple_get_item_prim_ptr == nullptr) { MS_LOG(ERROR) << "GetTupleGetItemPrim return nullptr"; return RET_NULL_PTR; @@ -349,7 +341,7 @@ STATUS FunctionalizeWhile::IdentifyCondSubgraphInput() { } STATUS FunctionalizeWhile::IdentifyCondSubgraphOutput() { - auto return_prim_ptr = lite::GetReturnPrim(); + auto return_prim_ptr = std::make_shared<ops::Return>(); if (return_prim_ptr == nullptr) { MS_LOG(ERROR) << "GetReturnPrim return nullptr"; return RET_NULL_PTR; @@ -494,7 +486,7 @@ STATUS FunctionalizeWhile::IdentifyBodySubgraphOutput() { "_cnode"); } - auto return_prim_ptr = lite::GetReturnPrim(); + auto return_prim_ptr = std::make_shared<ops::Return>(); if (return_prim_ptr == nullptr) { MS_LOG(ERROR) << "GetReturnPrim return nullptr"; return RET_NULL_PTR; @@ -509,7 +501,7 @@ STATUS FunctionalizeWhile::IdentifyBodySubgraphOutput() { return_cnode->add_input(tmp_output[0]); } else { std::vector<AnfNodePtr> make_tuple_inputs = tmp_output; - auto make_tuple_prim_ptr = lite::GetMakeTuplePrim(); + auto make_tuple_prim_ptr = std::make_shared<ops::MakeTuple>(); if (make_tuple_prim_ptr == nullptr) { MS_LOG(ERROR) << "GetMakeTuplePrim return nullptr"; return RET_NULL_PTR; diff --git a/mindspore/lite/tools/optimizer/graph/group_depthwise_op_convert_pass.cc b/mindspore/lite/tools/optimizer/graph/group_depthwise_op_convert_pass.cc index 4eb333659a..9d9c9d4c5b 100644 --- a/mindspore/lite/tools/optimizer/graph/group_depthwise_op_convert_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/group_depthwise_op_convert_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,39 +17,44 @@ #include <vector> #include <memory> #include <algorithm> +#include "ops/fusion/conv2d_fusion.h" #include "tools/optimizer/common/gllo_utils.h" -#include "src/ops/primitive_c.h" -#include "schema/inner/model_generated.h" #include "src/tensor.h" #include "tools/converter/quantizer/quant_cast.h" #include "src/common/log_adapter.h" #include "securec/include/securec.h" -using mindspore::lite::PrimitiveC; namespace mindspore::opt { namespace { constexpr size_t kConvWeightIndex = 2; constexpr size_t kConvInputIndex = 1; } // namespace + bool GroupDepthwiseOpConvertPass::Run(const FuncGraphPtr &graph) { auto node_list = TopoSort(graph->get_return()); for (auto &node : node_list) { if (!utils::isa<CNode>(node)) { continue; } - if (opt::GetCNodeType(node) != schema::PrimitiveType_DepthwiseConv2D) { + if (!CheckPrimitiveType(node, prim::kPrimConv2DFusion)) { continue; } - auto depthwise_cnode = node->cast<CNodePtr>(); - auto depthwise_primitivec = GetValueNode<std::shared_ptr<PrimitiveC>>(depthwise_cnode->input(0)); - auto attr = depthwise_primitivec->primitiveT()->value.AsDepthwiseConv2D(); - if (attr == nullptr) { + auto conv_cnode = node->cast<CNodePtr>(); + auto prim_node = conv_cnode->input(0); + MS_ASSERT(prim_node != nullptr); + auto prim_value_node = prim_node->cast<ValueNodePtr>(); + MS_ASSERT(prim_value_node != nullptr && prim_value_node->value != nullptr); + auto conv2d_fusion = prim_value_node->value()->cast<std::shared_ptr<mindspore::ops::Conv2DFusion>>(); + if (conv2d_fusion == nullptr) { MS_LOG(ERROR) << "the input of depthwiseConv2d is null"; return false; } - - auto data_node = depthwise_cnode->input(kConvInputIndex)->abstract(); + if (conv2d_fusion->GetAttr(ops::kIsDepthWise) == nullptr || + !GetValue<bool>(conv2d_fusion->GetAttr(ops::kIsDepthWise))) { + continue; + } + auto data_node = conv_cnode->input(kConvInputIndex)->abstract(); if (data_node == nullptr) { MS_LOG(ERROR) << "the node input is invalid."; return false; @@ -59,7 +64,7 @@ bool GroupDepthwiseOpConvertPass::Run(const FuncGraphPtr &graph) { MS_LOG(DEBUG) << "the tensor's shape is dynamic."; return true; } - auto weight_data_node = depthwise_cnode->input(kConvWeightIndex)->abstract(); + auto weight_data_node = conv_cnode->input(kConvWeightIndex)->abstract(); if (weight_data_node == nullptr) { MS_LOG(ERROR) << "the weight node input is invalid."; return false; @@ -69,36 +74,12 @@ bool GroupDepthwiseOpConvertPass::Run(const FuncGraphPtr &graph) { MS_LOG(DEBUG) << "the weight's shape is dynamic."; return true; } - if ((data_shape[3] == 1) || (data_shape[3] != weight_shape[3])) { - auto conv_attr = std::make_unique<schema::Conv2DT>(); - if (conv_attr == nullptr) { - MS_LOG(ERROR) << "conv_attr is null"; - return false; - } - conv_attr->channelIn = data_shape[3]; - conv_attr->channelOut = weight_shape[3]; - - // update attr - conv_attr->group = data_shape[3]; - conv_attr->format = attr->format; - conv_attr->kernelH = attr->kernelH; - conv_attr->kernelW = attr->kernelW; - conv_attr->strideH = attr->strideH; - conv_attr->strideW = attr->strideW; - conv_attr->padMode = attr->padMode; - conv_attr->padUp = attr->padUp; - conv_attr->padDown = attr->padDown; - conv_attr->padLeft = attr->padLeft; - conv_attr->padRight = attr->padRight; - conv_attr->dilateH = attr->dilateH; - conv_attr->dilateW = attr->dilateW; - conv_attr->activationType = attr->activationType; - - depthwise_primitivec->primitiveT()->value.type = schema::PrimitiveType_Conv2D; - depthwise_primitivec->primitiveT()->value.value = conv_attr.release(); - - MS_ASSERT(depthwise_cnode->inputs().size() > kConvWeightIndex); - auto weight_node = depthwise_cnode->input(kConvWeightIndex); + if (data_shape[3] == 1 || data_shape[3] != weight_shape[3]) { + conv2d_fusion->EraseAttr(ops::kIsDepthWise); + conv2d_fusion->set_group(static_cast<int64_t>(data_shape[3])); + conv2d_fusion->set_in_channel(data_shape[3]); + MS_ASSERT(conv_cnode->inputs().size() > kConvWeightIndex); + auto weight_node = conv_cnode->input(kConvWeightIndex); MS_ASSERT(weight_node != nullptr); auto weight_value = GetLiteParamValue(weight_node); if (weight_value == nullptr) { diff --git a/mindspore/lite/tools/optimizer/graph/group_depthwise_op_convert_pass.h b/mindspore/lite/tools/optimizer/graph/group_depthwise_op_convert_pass.h index fd696c22e5..0d4faf95eb 100644 --- a/mindspore/lite/tools/optimizer/graph/group_depthwise_op_convert_pass.h +++ b/mindspore/lite/tools/optimizer/graph/group_depthwise_op_convert_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2019 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ #ifndef LITE_GROUP_DEPTHWISE_OP_CONVERT_PASS_H #define LITE_GROUP_DEPTHWISE_OP_CONVERT_PASS_H #include <string> -#include "schema/inner/model_generated.h" #include "tools/converter/converter_flags.h" #include "backend/optimizer/common/pass.h" #include "src/param_value_lite.h" diff --git a/mindspore/lite/tools/optimizer/graph/if_pass.cc b/mindspore/lite/tools/optimizer/graph/if_pass.cc index 239dcb8ddd..f7681d46c0 100644 --- a/mindspore/lite/tools/optimizer/graph/if_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/if_pass.cc @@ -16,36 +16,21 @@ #include "tools/optimizer/graph/if_pass.h" #include <vector> #include <memory> -#include <algorithm> #include "mindspore/lite/include/errorcode.h" -#include "mindspore/lite/src/ops/primitive_c.h" -#include "tools/anf_importer/import_from_meta_graphT.h" #include "tools/optimizer/common/gllo_utils.h" -#include "src/ops/primitive_c.h" -#include "schema/inner/model_generated.h" -#include "src/tensor.h" #include "src/common/log_adapter.h" -#include "src/ops/switch.h" -#include "src/ops/partial.h" +#include "ops/switch.h" namespace mindspore::opt { ValueNodePtr IfPass::GetSwitchAnfPrim() { - std::unique_ptr<schema::PrimitiveT> switch_primitiveT(new (std::nothrow) schema::PrimitiveT); - if (switch_primitiveT == nullptr) { - MS_LOG(ERROR) << "new switch_primitiveT failed"; + auto switch_prim = std::make_shared<ops::Switch>(); + if (switch_prim == nullptr) { + MS_LOG(ERROR) << "new prim failed."; return nullptr; } - switch_primitiveT->value.type = schema::PrimitiveType_Switch; - switch_primitiveT->value.value = new (std::nothrow) schema::SwitchT; - if (switch_primitiveT->value.value == nullptr) { - MS_LOG(ERROR) << "new MakeTupleT failed"; - return nullptr; - } - - auto partial_prim = std::make_shared<lite::Partial>(switch_primitiveT.release()); - ValueNodePtr partial_anf_prim = NewValueNode(partial_prim); - return partial_anf_prim; + ValueNodePtr switch_anf_prim = NewValueNode(switch_prim); + return switch_anf_prim; } void IfPass::ReplaceInput(const std::vector<AnfNodePtr> &node_list, AnfNodePtr new_input_cnode, std::string para_name) { @@ -71,7 +56,7 @@ bool IfPass::Run(const FuncGraphPtr &graph) { if (!utils::isa<CNodePtr>(node)) { continue; } - if (opt::GetCNodeType(node) != schema::PrimitiveType_If) { + if (!CheckPrimitiveType(node, prim::kPrimIf)) { continue; } auto if_cnode = node->cast<CNodePtr>(); diff --git a/mindspore/lite/tools/optimizer/graph/infershape_pass.cc b/mindspore/lite/tools/optimizer/graph/infershape_pass.cc index e1aa855aed..673dae5389 100644 --- a/mindspore/lite/tools/optimizer/graph/infershape_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/infershape_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,14 +17,16 @@ #include <vector> #include <memory> #include <algorithm> -#include "mindspore/lite/include/errorcode.h" -#include "mindspore/lite/src/ops/primitive_c.h" -#include "tools/anf_importer/import_from_meta_graphT.h" - -using mindspore::lite::RET_INFER_INVALID; +#include "include/errorcode.h" +#include "tools/common/node_util.h" +#include "src/common/common.h" +#include "src/ops/populate/populate_register.h" +#include "src/ops/ops_utils.h" +#include "src/runtime/infer_manager.h" namespace mindspore::opt { - +namespace { +constexpr size_t INITIAL_SIZE = 1024; ParamValueLitePtr NewParamValueLitePtr(lite::Tensor *tensor) { auto para_value_lite = std::make_shared<ParamValueLite>(); if (para_value_lite == nullptr) { @@ -37,14 +39,15 @@ ParamValueLitePtr NewParamValueLitePtr(lite::Tensor *tensor) { return para_value_lite; } -bool IsSpecialType(schema::PrimitiveType type) { - if ((type == schema::PrimitiveType_TupleGetItem) || (type == schema::PrimitiveType_Depend) || - (type == schema::PrimitiveType_ControlDepend) || - (type == schema::PrimitiveType_MakeTuple || type == schema::PrimitiveType_Return)) { +bool IsSpecialType(const CNodePtr &cnode) { + if (CheckPrimitiveType(cnode, prim::kPrimTupleGetItem) || CheckPrimitiveType(cnode, prim::kPrimDepend) || + CheckPrimitiveType(cnode, prim::kPrimControlDepend) || CheckPrimitiveType(cnode, kPrimMakeTuple) || + CheckPrimitiveType(cnode, kPrimReturn)) { return true; } return false; } +} // namespace abstract::AbstractTensorPtr InferShapePass::ConvertLiteTensorToAbstractTensor(lite::Tensor *tensor) { MS_ASSERT(nullptr != tensor); @@ -160,7 +163,7 @@ STATUS InferShapePass::GetCNodeInputTensors(const CNodePtr &cnode, std::vector<l } if (utils::isa<ValueNodePtr>(cnode->input(i))) { - MS_LOG(WARNING) << cnode->fullname_with_scope() << "'s input[" << i << "] is value node"; + MS_LOG(DEBUG) << cnode->fullname_with_scope() << "'s input[" << i << "] is value node"; continue; } @@ -355,7 +358,7 @@ bool InferShapePass::Run(const FuncGraphPtr &func_graph) { continue; } auto cnode = node->cast<CNodePtr>(); - auto origin_primc = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(cnode->input(0)); + auto origin_primc = GetValueNode<PrimitiveCPtr>(cnode->input(0)); if (origin_primc == nullptr) { auto sub_func_graph = GetValueNode<FuncGraphPtr>(cnode->input(0)); if (sub_func_graph == nullptr) { @@ -363,16 +366,10 @@ bool InferShapePass::Run(const FuncGraphPtr &func_graph) { return false; } else { MS_LOG(WARNING) << "subgraph infer shape invalid."; - return RET_INFER_INVALID; + return lite::RET_INFER_INVALID; } } - auto origin_primt = origin_primc->primitiveT(); - if (origin_primt == nullptr) { - MS_LOG(ERROR) << "origin_primt is nullptr"; - return false; - } - auto type = GetCNodeType(cnode); - if (IsSpecialType(type)) { + if (IsSpecialType(cnode)) { continue; } std::vector<lite::Tensor *> input_tensors; @@ -389,22 +386,42 @@ bool InferShapePass::Run(const FuncGraphPtr &func_graph) { FreeTensors(&output_tensors); continue; } - auto primt = std::make_unique<schema::PrimitiveT>(); - if (primt == nullptr) { - MS_LOG(ERROR) << "primt is nullptr"; + auto prim_t = lite::GetPrimitiveT(cnode->input(0)); + if (prim_t == nullptr) { + MS_LOG(ERROR) << "prim_t is nullptr"; + FreeTensors(&input_tensors); + FreeTensors(&output_tensors); + return false; + } + + flatbuffers::FlatBufferBuilder fbb(INITIAL_SIZE); + auto prim = lite::ConvertToPrimitive(prim_t, &fbb); + if (prim == nullptr) { + MS_LOG(ERROR) << "get primitive failed."; + FreeTensors(&input_tensors); + FreeTensors(&output_tensors); + fbb.Clear(); + return false; + } + auto parameter_gen = + lite::PopulateRegistry::GetInstance()->GetParameterCreator(prim->value_type(), lite::SCHEMA_CUR); + if (parameter_gen == nullptr) { + MS_LOG(ERROR) << "PopulateParameter return nullptr, type: " << schema::EnumNamePrimitiveType(prim->value_type()); FreeTensors(&input_tensors); FreeTensors(&output_tensors); + fbb.Clear(); return false; } - *primt = *origin_primt; - auto primc = std::shared_ptr<lite::PrimitiveC>(lite::PrimitiveC::Create(primt.release())); - if (primc == nullptr) { - MS_LOG(ERROR) << "primc is nullptr"; + auto parameter = parameter_gen(prim); + if (parameter == nullptr) { + MS_LOG(ERROR) << "parameter is nullptr."; FreeTensors(&input_tensors); FreeTensors(&output_tensors); + fbb.Clear(); return false; } - status = primc->InferShape(input_tensors, output_tensors); + parameter->infer_flag_ = true; + status = KernelInferShape(input_tensors, &output_tensors, parameter); if (status == RET_OK) { status = SetCNodeAbstract(output_tensors, cnode); if (status != RET_OK) { @@ -413,6 +430,8 @@ bool InferShapePass::Run(const FuncGraphPtr &func_graph) { } FreeTensors(&input_tensors); FreeTensors(&output_tensors); + free(parameter); + fbb.Clear(); } return true; } diff --git a/mindspore/lite/tools/optimizer/graph/infershape_pass.h b/mindspore/lite/tools/optimizer/graph/infershape_pass.h index b316430f7d..c753f0cecd 100644 --- a/mindspore/lite/tools/optimizer/graph/infershape_pass.h +++ b/mindspore/lite/tools/optimizer/graph/infershape_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/optimizer/graph/inputs_adjust_pass.cc b/mindspore/lite/tools/optimizer/graph/inputs_adjust_pass.cc index 0e7a867948..040a237d31 100644 --- a/mindspore/lite/tools/optimizer/graph/inputs_adjust_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/inputs_adjust_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2021 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,10 +14,9 @@ * limitations under the License. */ #include "tools/optimizer/graph/inputs_adjust_pass.h" -#include <vector> #include <string> #include <memory> -#include "src/ops/primitive_c.h" +#include "ops/primitive_c.h" namespace mindspore::opt { STATUS InputAdjustPass::AddAttrToInput(const FuncGraphPtr &func_graph, const CNodePtr &cnode, int input_num, @@ -44,21 +43,21 @@ STATUS InputAdjustPass::AddAttrToInput(const FuncGraphPtr &func_graph, const CNo } switch (flag) { case 1: { - auto value_data = GetValue<int32_t>(value_ptr); + auto value_data = CastToInt(value_ptr).front(); auto param_node = BuildIntValueParameterNode(func_graph, value_data, cnode->fullname_with_scope() + "_" + attr_name); inputs.push_back(param_node); break; } case 2: { - auto value_data = GetValue<std::vector<int32_t>>(value_ptr); + auto value_data = CastToInt(value_ptr); auto param_node = BuildIntVecParameterNode(func_graph, value_data, cnode->fullname_with_scope() + "_" + attr_name); inputs.push_back(param_node); break; } case 3: { - auto value_data = GetValue<std::vector<std::vector<int32_t>>>(value_ptr); + auto value_data = CastToVec2DInt(value_ptr); auto param_node = BuildIntVec2DParameterNode(func_graph, value_data, cnode->fullname_with_scope() + "_" + attr_name); inputs.push_back(param_node); @@ -86,7 +85,7 @@ bool InputAdjustPass::Run(const FuncGraphPtr &func_graph) { auto manager = Manage(func_graph, true); if (manager == nullptr) { MS_LOG(ERROR) << "manager is nullptr."; - return lite::RET_NULL_PTR; + return lite ::RET_NULL_PTR; } auto node_list = TopoSort(func_graph->get_return()); STATUS status = lite::RET_OK; @@ -95,8 +94,34 @@ bool InputAdjustPass::Run(const FuncGraphPtr &func_graph) { if (cnode == nullptr) { continue; } - - if (GetCNodeType(node) == schema::PrimitiveType_Resize) { + if (CheckPrimitiveType(node, prim::kPrimTranspose)) { + MS_LOG(INFO) << "Adjust Transpose"; + status = AddAttrToInput(func_graph, cnode, 2, "perm", 2); + } else if (CheckPrimitiveType(node, prim::kPrimReshape)) { + MS_LOG(INFO) << "Adjust Reshape"; + status = AddAttrToInput(func_graph, cnode, 2, "shape", 2); + } else if (CheckPrimitiveType(node, prim::kPrimGather)) { + MS_LOG(INFO) << "Adjust Gather"; + status = AddAttrToInput(func_graph, cnode, 3, "axis", 1); + } else if (CheckPrimitiveType(node, prim::kPrimCast)) { + MS_LOG(INFO) << "Adjust Cast"; + status = AddAttrToInput(func_graph, cnode, 2, "to", 1); + } else if (CheckPrimitiveType(node, prim::kPrimTopKFusion)) { + MS_LOG(INFO) << "Adjust TopKFusion"; + status = AddAttrToInput(func_graph, cnode, 2, "k", 1); + } else if (CheckPrimitiveType(node, prim::kPrimTileFusion)) { + MS_LOG(INFO) << "Adjust TileFusion"; + status = AddAttrToInput(func_graph, cnode, 2, "multiples", 2); + } else if (CheckPrimitiveType(node, prim::kPrimReduceFusion)) { + MS_LOG(INFO) << "Adjust ReduceFusion"; + status = AddAttrToInput(func_graph, cnode, 2, "axes", 2); + } else if (CheckPrimitiveType(node, prim::kPrimPadFusion)) { + MS_LOG(INFO) << "Adjust PadFusion"; + status = AddAttrToInput(func_graph, cnode, 2, "paddings", 3); + } else if (CheckPrimitiveType(node, prim::kPrimPowFusion)) { + MS_LOG(INFO) << "Adjust PowFuison"; + status = AddAttrToInput(func_graph, cnode, 2, "power", 4); + } else if (CheckPrimitiveType(node, prim::kPrimResize)) { status = AddAttrToInput(func_graph, cnode, 2, "zoom_factor", 1); } if (status != lite::RET_OK && status != lite::RET_NO_CHANGE) { diff --git a/mindspore/lite/tools/optimizer/graph/inputs_adjust_pass.h b/mindspore/lite/tools/optimizer/graph/inputs_adjust_pass.h index 368174c94e..8d3a927478 100644 --- a/mindspore/lite/tools/optimizer/graph/inputs_adjust_pass.h +++ b/mindspore/lite/tools/optimizer/graph/inputs_adjust_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2021 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ #include "tools/optimizer/common/gllo_utils.h" #include "backend/optimizer/common/pass.h" #include "src/param_value_lite.h" -#include "mindspore/lite/include/errorcode.h" +#include "include/errorcode.h" using mindspore::lite::STATUS; namespace mindspore::opt { diff --git a/mindspore/lite/tools/optimizer/graph/mindir_adjust_pass.cc b/mindspore/lite/tools/optimizer/graph/mindir_adjust_pass.cc index e713634e63..7eb9fac3ec 100644 --- a/mindspore/lite/tools/optimizer/graph/mindir_adjust_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/mindir_adjust_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,15 +18,213 @@ #include <vector> #include <memory> -#include "src/ops/primitive_c.h" #include "tools/converter/converter_context.h" -#include "tools/converter/quantizer/quant_cast.h" +#include "tools/converter/quant_param_holder.h" +#include "tools/converter/quantizer/quantize_util.h" #include "src/common/log_adapter.h" #include "src/tensor.h" -using mindspore::lite::PrimitiveC; namespace mindspore { namespace opt { +namespace { +constexpr size_t kDoubleNum = 2; +void FillDefaultInputQuantParamIfNeed(const PrimitivePtr &prim, const size_t &input_size) { + auto quant_param_valueptr = prim->GetAttr("quant_params"); + if (quant_param_valueptr == nullptr) { + prim->AddAttr("quant_params", std::make_shared<lite::QuantParamHolder>()); + } + auto quant_param_holder = prim->GetAttr("quant_params")->cast<lite::QuantParamHolderPtr>(); + std::vector<schema::QuantParamT> quants; + schema::QuantParamT quant_param; + auto input_quant_params = quant_param_holder->input_quant_params(); + if (input_quant_params.size() == kDoubleNum) { + quants.clear(); + quant_param.min = 0.0; + quant_param.max = 0.0; + quant_param.dstDtype = kNumberTypeInt32; + quant_param.inited = input_quant_params.at(0).at(0).inited && input_quant_params.at(1).at(0).inited; + quant_param.inited = false; + quant_param.zeroPoint = 0; + if (quant_param.inited) { + quant_param.scale = input_quant_params.at(0).at(0).scale * input_quant_params.at(1).at(0).scale; + } + quant_param.roundType = 1; + quant_param.multiplier = 1; + quants.emplace_back(quant_param); + input_quant_params.emplace_back(quants); + } + // fill input_quant_param_ by not inited quant_parm + if (input_quant_params.size() < input_size) { + schema::QuantParamT tmpQuantParam; + quants.emplace_back(tmpQuantParam); + input_quant_params.insert(input_quant_params.end(), input_size - input_quant_params.size(), quants); + } + quant_param_holder->set_input_quant_params(input_quant_params); +} + +int ConvertInputQuantParam(const PrimitivePtr &prim, bool narrow_range, int32_t numbits) { + auto quant_param_valueptr = prim->GetAttr("quant_params"); + if (quant_param_valueptr == nullptr) { + prim->AddAttr("quant_params", std::make_shared<lite::QuantParamHolder>()); + } + auto quant_param_holder = prim->GetAttr("quant_params")->cast<lite::QuantParamHolderPtr>(); + std::vector<schema::QuantParamT> quants; + schema::QuantParamT quant_param; + auto inputMin = prim->GetAttr("input_minq"); + auto inputMax = prim->GetAttr("input_maxq"); + if (inputMin != nullptr && inputMax != nullptr) { + auto inputMinPtr = inputMin->cast<tensor::TensorPtr>(); + auto inputMaxPtr = inputMax->cast<tensor::TensorPtr>(); + auto *minBuf = static_cast<float *>(inputMinPtr->data_c()); + auto *maxBuf = static_cast<float *>(inputMaxPtr->data_c()); + quant_param.min = *minBuf; + quant_param.max = *maxBuf; + auto ret = + lite::quant::CalQuantizationParams(&quant_param, quant_param.min, quant_param.max, narrow_range, numbits); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Can't calculate quant parameters"; + return ret; + } + quants.emplace_back(quant_param); + quant_param_holder->AddInputQuantParam(quants); + } else { + std::vector<schema::QuantParamT> notinited_quant_params(1); + quant_param_holder->AddInputQuantParam(notinited_quant_params); + } + + quants.clear(); + auto filterMin = prim->GetAttr("filter_minq"); + auto filterMax = prim->GetAttr("filter_maxq"); + if (filterMin != nullptr && filterMax != nullptr) { + auto filterMinPtr = filterMin->cast<tensor::TensorPtr>(); + auto filterMaxPtr = filterMax->cast<tensor::TensorPtr>(); + auto *minBuf = static_cast<float *>(filterMinPtr->data_c()); + auto *maxBuf = static_cast<float *>(filterMaxPtr->data_c()); + quant_param.min = FLT_MAX; + quant_param.max = FLT_MIN; + for (int i = 0; i < filterMinPtr->ElementsNum(); ++i) { + quant_param.min = (*(minBuf) < quant_param.min) ? (*minBuf) : quant_param.min; + quant_param.max = (*(maxBuf) > quant_param.max) ? (*maxBuf) : quant_param.max; + minBuf++; + maxBuf++; + } + auto ret = lite::quant::CalQuantizationParams(&quant_param, quant_param.min, quant_param.max, true, numbits); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Can't calculate quant parameters"; + return ret; + } + quants.emplace_back(quant_param); + quant_param_holder->AddInputQuantParam(quants); + } else { + std::vector<schema::QuantParamT> notinited_quant_params(1); + quant_param_holder->AddInputQuantParam(notinited_quant_params); + } + return lite::RET_OK; +} + +int ConvertOutputQuantParam(const PrimitivePtr &prim, bool narrow_range, int32_t numbits) { + auto quant_param_valueptr = prim->GetAttr("quant_params"); + if (quant_param_valueptr == nullptr) { + prim->AddAttr("quant_params", std::make_shared<lite::QuantParamHolder>()); + } + auto quant_param_holder = prim->GetAttr("quant_params")->cast<lite::QuantParamHolderPtr>(); + std::vector<schema::QuantParamT> quants; + schema::QuantParamT quant_param; + auto outputMin = prim->GetAttr("output_minq"); + auto outputMax = prim->GetAttr("output_maxq"); + if (outputMin != nullptr && outputMax != nullptr) { + auto outputMinPtr = outputMin->cast<tensor::TensorPtr>(); + auto outputMaxPtr = outputMax->cast<tensor::TensorPtr>(); + auto *minBuf = static_cast<float *>(outputMinPtr->data_c()); + auto *maxBuf = static_cast<float *>(outputMaxPtr->data_c()); + quant_param.min = *minBuf; + quant_param.max = *maxBuf; + auto ret = + lite::quant::CalQuantizationParams(&quant_param, quant_param.min, quant_param.max, narrow_range, numbits); + if (ret != RET_OK) { + MS_LOG(ERROR) << "Can't calculate quant parameters"; + return ret; + } + quants.emplace_back(quant_param); + quant_param_holder->AddOutputQuantParam(quants); + } else { + schema::QuantParamT tmpQuantParam; + quants.emplace_back(tmpQuantParam); + quant_param_holder->AddOutputQuantParam(quants); + } + return lite::RET_OK; +} + +void CheckQuantParams(const PrimitivePtr &prim) { + auto quant_param_valueptr = prim->GetAttr("quant_params"); + if (quant_param_valueptr == nullptr) { + prim->AddAttr("quant_params", std::make_shared<lite::QuantParamHolder>()); + } + auto quant_param_holder = prim->GetAttr("quant_params")->cast<lite::QuantParamHolderPtr>(); + auto input_quant_params = quant_param_holder->input_quant_params(); + bool is_quant = false; + for (size_t i = 0; i < input_quant_params.size(); ++i) { + if (!input_quant_params.at(i).empty() && input_quant_params.at(i).at(0).inited) { + is_quant = true; + break; + } + } + auto output_quant_params = quant_param_holder->output_quant_params(); + for (size_t i = 0; i < output_quant_params.size(); ++i) { + if (!output_quant_params.at(i).empty() && output_quant_params.at(i).at(0).inited) { + is_quant = true; + } + } + if (!is_quant) { + prim->EraseAttr("quant_params"); + } +} + +int ConvertQuantParam(const PrimitivePtr &prim, const std::vector<AnfNodePtr> &inputs) { + auto quant_param_holder = std::make_shared<lite::QuantParamHolder>(); + prim->AddAttr("quant_params", quant_param_holder); + auto narrow_range = prim->GetAttr("narrow_range"); + bool narrow_range_param = false; + if (narrow_range != nullptr) { + if (utils::isa<tensor::TensorPtr>(narrow_range)) { + auto narrow_range_tensor = narrow_range->cast<tensor::TensorPtr>(); + narrow_range_param = *reinterpret_cast<bool *>(narrow_range_tensor->data_c()); + } else if (utils::isa<ImmTraits<bool>::type>(narrow_range)) { + narrow_range_param = GetValue<bool>(narrow_range); + } else { + MS_LOG(ERROR) << "valueptr is invalid."; + return lite::RET_ERROR; + } + } + auto num_bits = prim->GetAttr("num_bits"); + int32_t num_bits_param = 8; + if (num_bits != nullptr) { + if (utils::isa<tensor::TensorPtr>(num_bits)) { + auto num_bits_tensor = num_bits->cast<tensor::TensorPtr>(); + num_bits_param = *reinterpret_cast<int64_t *>(num_bits_tensor->data_c()); + } else if (utils::isa<ImmTraits<int64_t>::type>(num_bits)) { + num_bits_param = GetValue<int64_t>(num_bits); + } else { + MS_LOG(ERROR) << "valueptr is invalid."; + return lite::RET_ERROR; + } + } + auto status = ConvertInputQuantParam(prim, narrow_range_param, num_bits_param); + if (status != lite::RET_OK) { + MS_LOG(ERROR) << "compute int quant param failed."; + return status; + } + FillDefaultInputQuantParamIfNeed(prim, inputs.size()); + status = ConvertOutputQuantParam(prim, narrow_range_param, num_bits_param); + if (status != lite::RET_OK) { + MS_LOG(ERROR) << "compute output quant param failed."; + return status; + } + CheckQuantParams(prim); + return lite::RET_OK; +} +} // namespace + int MindirAdjustPass::ValueNodeInt64Convert(AnfNodePtr anf_node) { if (!utils::isa<ValueNodePtr>(anf_node)) { return lite::RET_NO_CHANGE; @@ -120,7 +318,7 @@ int MindirAdjustPass::ParameterNodeConvert(AnfNodePtr anf_node) { return lite::RET_OK; } -int MindirAdjustPass::PrimitiveConvert(std::shared_ptr<AnfNode> anf_node) { +int MindirAdjustPass::ComputeQuantParams(std::shared_ptr<AnfNode> anf_node) { if (!utils::isa<CNodePtr>(anf_node)) { MS_LOG(INFO) << "only cnode need to convert primitive."; return lite::RET_NO_CHANGE; @@ -135,10 +333,6 @@ int MindirAdjustPass::PrimitiveConvert(std::shared_ptr<AnfNode> anf_node) { MS_LOG(ERROR) << "value node is invalid."; return lite::RET_NULL_PTR; } - if (utils::isa<PrimitiveCPtr>(value_node->value())) { - MS_LOG(INFO) << "the value has been primitiveC."; - return lite::RET_NO_CHANGE; - } auto primitive = value_node->value()->cast<PrimitivePtr>(); if (primitive == nullptr) { MS_LOG(ERROR) << "the value is not primitive."; @@ -146,19 +340,9 @@ int MindirAdjustPass::PrimitiveConvert(std::shared_ptr<AnfNode> anf_node) { } auto inputs = cnode->inputs(); inputs.erase(inputs.begin()); - if (!CheckPrimitiveType(anf_node, prim::kPrimReturn) && !CheckPrimitiveType(anf_node, prim::kPrimMakeTuple)) { - auto primitive_c = PrimitiveC::Create(*primitive, inputs, quant_type_, train_flag_); - if (primitive_c == nullptr) { - MS_LOG(ERROR) << "fail to create a primitive_c: " << cnode->fullname_with_scope(); - lite::NoSupportOp::GetInstance()->InsertOp(primitive->name()); - return lite::RET_NOT_FIND_OP; - } - value_node->set_value(primitive_c); - } else { - auto primitiveT = std::make_unique<schema::PrimitiveT>(); - primitiveT->value.type = (CheckPrimitiveType(anf_node, prim::kPrimReturn) ? schema::PrimitiveType_Return - : schema::PrimitiveType_MakeTuple); - value_node->set_value(std::make_shared<PrimitiveC>(primitiveT.release())); + if (ConvertQuantParam(primitive, inputs) != lite::RET_OK) { + MS_LOG(ERROR) << "compute quant param failed."; + return lite::RET_ERROR; } return lite::RET_OK; } @@ -176,11 +360,10 @@ bool MindirAdjustPass::Run(const FuncGraphPtr &graph) { if (utils::isa<ParameterPtr>(node)) { status = ParameterNodeConvert(node); } else if (utils::isa<CNodePtr>(node)) { - status = PrimitiveConvert(node); + status = ComputeQuantParams(node); } else if (utils::isa<ValueNodePtr>(node)) { status = ValueNodeInt64Convert(node); } - if (status != lite::RET_OK && status != lite::RET_NO_CHANGE) { lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(status); success_flag = false; diff --git a/mindspore/lite/tools/optimizer/graph/mindir_adjust_pass.h b/mindspore/lite/tools/optimizer/graph/mindir_adjust_pass.h index dbc47652c7..ab04f430b2 100644 --- a/mindspore/lite/tools/optimizer/graph/mindir_adjust_pass.h +++ b/mindspore/lite/tools/optimizer/graph/mindir_adjust_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ class MindirAdjustPass : public Pass { int ValueNodeInt64Convert(AnfNodePtr anf_node); void SetTrainFlag(bool train_flag) { train_flag_ = train_flag; } int ParameterNodeConvert(AnfNodePtr anf_node); - int PrimitiveConvert(AnfNodePtr anf_node); + int ComputeQuantParams(AnfNodePtr anf_node); bool Run(const FuncGraphPtr &graph) override; protected: diff --git a/mindspore/lite/tools/optimizer/graph/mindir_inputs_adjust_pass.cc b/mindspore/lite/tools/optimizer/graph/mindir_inputs_adjust_pass.cc deleted file mode 100644 index 490cec179f..0000000000 --- a/mindspore/lite/tools/optimizer/graph/mindir_inputs_adjust_pass.cc +++ /dev/null @@ -1,236 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "tools/optimizer/graph/mindir_inputs_adjust_pass.h" -#include <vector> -#include <memory> -#include "src/common/log_adapter.h" -#include "src/ops/primitive_c.h" -#include "src/tensor.h" - -using mindspore::lite::PrimitiveC; -namespace mindspore { -namespace opt { -namespace { -template <typename T> -void CopyAttrForArgMinMax(T *left, T *right) { - MS_ASSERT(left != null && right != nullptr); - left->axis = right->axis; - left->outMaxValue = right->outMaxValue; - left->axisType = right->axisType; - left->keepDims = right->keepDims; - left->topK = right->topK; -} -} // namespace - -bool MindirInputAdjustOpPass::CheckCNodeIsArgMinMax(const CNodePtr &cnode) { - MS_ASSERT(cnode != nullptr); - auto prim_node = cnode->inputs().at(0); - MS_ASSERT(prim_node != nullptr); - auto prim_value_node = prim_node->cast<ValueNodePtr>(); - if (prim_value_node == nullptr) { - MS_LOG(DEBUG) << "cnode first input is not valueNode."; - return false; - } - auto value = prim_value_node->value(); - MS_ASSERT(value != nullptr); - auto prim_c = value->cast<PrimitiveCPtr>(); - if (prim_c == nullptr) { - MS_LOG(DEBUG) << "prim is not primitiveC."; - return false; - } - auto prim = prim_c->primitiveT(); - MS_ASSERT(prim != nullptr); - return prim->value.type == schema::PrimitiveType_ArgMax || prim->value.type == schema::PrimitiveType_ArgMin; -} - -int MindirInputAdjustOpPass::AdjustArgMinMaxInputs(std::vector<AnfNodePtr> *inputs, bool index_or_value) { - MS_ASSERT(inputs != nullptr); - auto prim_node = inputs->at(0); - MS_ASSERT(prim_node != nullptr); - auto prim_value_node = prim_node->cast<ValueNodePtr>(); - if (prim_value_node == nullptr) { - MS_LOG(ERROR) << "cnode first input is not valueNode."; - return lite::RET_ERROR; - } - auto prim_value = prim_value_node->value(); - if (prim_value == nullptr) { - MS_LOG(ERROR) << "valueNode value is nullptr."; - return lite::RET_ERROR; - } - auto prim_c = prim_value->cast<PrimitiveCPtr>(); - if (prim_c == nullptr) { - MS_LOG(ERROR) << "value is not primitiveC."; - return lite::RET_ERROR; - } - auto prim = prim_c->primitiveT(); - MS_ASSERT(prim != nullptr && prim->value.value != nullptr); - auto attr = prim->value.value; - if (prim->value.type == schema::PrimitiveType_ArgMax) { - reinterpret_cast<schema::ArgMaxT *>(attr)->outMaxValue = index_or_value; - } else if (prim->value.type == schema::PrimitiveType_ArgMin) { - reinterpret_cast<schema::ArgMinT *>(attr)->outMaxValue = index_or_value; - } - return lite::RET_OK; -} - -int MindirInputAdjustOpPass::CopyPrimitiveCForArgMinMax(std::vector<AnfNodePtr> *inputs) { - MS_ASSERT(inputs != nullptr); - auto prim_node = inputs->at(0); - MS_ASSERT(prim_node != nullptr); - auto prim_value_node = prim_node->cast<ValueNodePtr>(); - if (prim_value_node == nullptr) { - MS_LOG(ERROR) << "cnode first input is not valueNode."; - return lite::RET_ERROR; - } - auto prim_value = prim_value_node->value(); - if (prim_value == nullptr) { - MS_LOG(ERROR) << "valueNode value is nullptr."; - return lite::RET_ERROR; - } - auto prim_c = prim_value->cast<PrimitiveCPtr>(); - if (prim_c == nullptr) { - MS_LOG(ERROR) << "value is not primitiveC."; - return lite::RET_ERROR; - } - auto prim = prim_c->primitiveT(); - MS_ASSERT(prim != nullptr && prim->value.value != nullptr); - auto primitive = std::make_unique<schema::PrimitiveT>(); - if (prim->value.type == schema::PrimitiveType_ArgMax) { - primitive->value.type = schema::PrimitiveType_ArgMax; - auto attr = std::make_unique<schema::ArgMaxT>(); - CopyAttrForArgMinMax<schema::ArgMaxT>(attr.get(), reinterpret_cast<schema::ArgMaxT *>(prim->value.value)); - primitive->value.value = attr.release(); - } else { - primitive->value.type = schema::PrimitiveType_ArgMin; - auto attr = std::make_unique<schema::ArgMinT>(); - CopyAttrForArgMinMax<schema::ArgMinT>(attr.get(), reinterpret_cast<schema::ArgMinT *>(prim->value.value)); - primitive->value.value = attr.release(); - } - auto primitive_c = PrimitiveC::Create(primitive.release()); - auto value_node = NewValueNode(std::shared_ptr<PrimitiveC>(primitive_c)); - inputs->erase(inputs->begin()); - inputs->insert(inputs->begin(), value_node); - return lite::RET_OK; -} - -int MindirInputAdjustOpPass::BuildCNodeForArgMinMax(const FuncGraphPtr &graph, const CNodePtr &tuple_get_item, - const CNodePtr &argmin_max) { - MS_ASSERT(graph != nullptr && tuple_get_item != nullptr && argmin_max != nullptr); - auto inputs = argmin_max->inputs(); - if (CopyPrimitiveCForArgMinMax(&inputs) != lite::RET_OK) { - MS_LOG(ERROR) << "copy argmin or argmax failed."; - return lite::RET_ERROR; - } - if (AdjustArgMinMaxInputs(&inputs, false) != lite::RET_OK) { - MS_LOG(ERROR) << "adjust argmin or argmax attr failed."; - return lite::RET_ERROR; - } - auto new_cnode = graph->NewCNode(inputs); - new_cnode->set_fullname_with_scope(argmin_max->fullname_with_scope() + "_index"); - auto type_ptr = TypeIdToType(kTypeUnknown); - std::vector<int64_t> shape_vector; - new_cnode->set_abstract(std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector)); - auto manager = graph->manager(); - MS_ASSERT(manager != nullptr); - manager->Replace(tuple_get_item, new_cnode); - return lite::RET_OK; -} - -int MindirInputAdjustOpPass::AdjustArgMinMax(const FuncGraphPtr &graph, const CNodePtr &tuple_get_item, - const CNodePtr &argmin_max) { - MS_ASSERT(graph != nullptr && tuple_get_item != nullptr && argmin_max != nullptr); - auto inputs = argmin_max->inputs(); - if (AdjustArgMinMaxInputs(&inputs, true) != lite::RET_OK) { - MS_LOG(ERROR) << "adjust argmin or argmax attr failed."; - return lite::RET_ERROR; - } - auto type_ptr = TypeIdToType(kTypeUnknown); - std::vector<int64_t> shape_vector; - auto abtract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector); - argmin_max->set_abstract(abtract_tensor); - auto manager = graph->manager(); - MS_ASSERT(manager != nullptr); - manager->Replace(tuple_get_item, argmin_max); - return lite::RET_OK; -} - -int MindirInputAdjustOpPass::AdjustTupleGetItemWithArgMinMax(const FuncGraphPtr &graph, const CNodePtr &cnode) { - MS_ASSERT(graph != nullptr && cnode != nullptr); - auto inputs = cnode->inputs(); - if (inputs.size() != 3) { - MS_LOG(ERROR) << "tupleGetItem inputs size is invalid: " << inputs.size(); - return lite::RET_ERROR; - } - auto argmin_max = inputs.at(1); - MS_ASSERT(argmin_max != nullptr); - auto argmin_max_cnode = argmin_max->cast<CNodePtr>(); - if (argmin_max_cnode == nullptr) { - MS_LOG(ERROR) << "the second input is not a cnode."; - return lite::RET_ERROR; - } - if (!CheckCNodeIsArgMinMax(argmin_max_cnode)) { - MS_LOG(DEBUG) << "tuple_get_item first input is not argmin and argmax."; - return lite::RET_OK; - } - auto index_vnode = inputs.at(2); - auto value_node = index_vnode->cast<ValueNodePtr>(); - if (value_node == nullptr) { - MS_LOG(ERROR) << "TupleGetItem's input 2 is not valuenode"; - return lite::RET_ERROR; - } - int index = lite::CastToInt(value_node->value()).front(); - if (index == 0) { - if (BuildCNodeForArgMinMax(graph, cnode, argmin_max_cnode) != lite::RET_OK) { - MS_LOG(ERROR) << "build new cnode failed."; - return lite::RET_ERROR; - } - } else if (index == 1) { - if (AdjustArgMinMax(graph, cnode, argmin_max_cnode) != lite::RET_OK) { - MS_LOG(ERROR) << "adjust argmin_max failed."; - return lite::RET_ERROR; - } - } - return lite::RET_OK; -} - -bool MindirInputAdjustOpPass::Run(const FuncGraphPtr &graph) { - MS_ASSERT(graph != nullptr); - auto manager = Manage(graph, true); - if (manager == nullptr) { - MS_LOG(ERROR) << "manager is nullptr."; - return lite::RET_NULL_PTR; - } - auto node_list = TopoSort(graph->get_return()); - int status = lite::RET_OK; - for (auto &node : node_list) { - auto cnode = node->cast<CNodePtr>(); - if (cnode == nullptr) { - MS_LOG(DEBUG) << "node is not cnode."; - continue; - } - auto type = opt::GetCNodeType(node); - if (type == schema::PrimitiveType_TupleGetItem) { - status = AdjustTupleGetItemWithArgMinMax(graph, cnode); - } - if (status != lite::RET_OK && status != lite::RET_NO_CHANGE) { - MS_LOG(ERROR) << "adjust input pass is failed."; - return false; - } - } - return true; -} -} // namespace opt -} // namespace mindspore diff --git a/mindspore/lite/tools/optimizer/graph/mindir_inputs_adjust_pass.h b/mindspore/lite/tools/optimizer/graph/mindir_inputs_adjust_pass.h deleted file mode 100644 index 7040f81253..0000000000 --- a/mindspore/lite/tools/optimizer/graph/mindir_inputs_adjust_pass.h +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_MINDIR_INPUTS_ADJUST_PASS_H_ -#define MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_MINDIR_INPUTS_ADJUST_PASS_H_ - -#include <string> -#include <vector> -#include "backend/optimizer/common/pass.h" -#include "tools/converter/converter_flags.h" -#include "tools/optimizer/common/gllo_utils.h" -#include "src/param_value_lite.h" - -namespace mindspore::opt { -class MindirInputAdjustOpPass : public Pass { - public: - MindirInputAdjustOpPass() : Pass("mindir_inputs_adjust_pass") {} - ~MindirInputAdjustOpPass() override = default; - bool CheckCNodeIsArgMinMax(const CNodePtr &cnode); - int AdjustArgMinMaxInputs(std::vector<AnfNodePtr> *inputs, bool index_or_value); - int CopyPrimitiveCForArgMinMax(std::vector<AnfNodePtr> *inputs); - int BuildCNodeForArgMinMax(const FuncGraphPtr &graph, const CNodePtr &tuple_get_item, const CNodePtr &argmin_max); - int AdjustArgMinMax(const FuncGraphPtr &graph, const CNodePtr &tuple_get_item, const CNodePtr &argmin_max); - int AdjustTupleGetItemWithArgMinMax(const FuncGraphPtr &graph, const CNodePtr &cnode); - bool Run(const FuncGraphPtr &graph) override; -}; -} // namespace mindspore::opt -#endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_MINDIR_INPUTS_ADJUST_PASS_H_ diff --git a/mindspore/lite/tools/optimizer/graph/onnx_inputs_adjust_pass.cc b/mindspore/lite/tools/optimizer/graph/onnx_inputs_adjust_pass.cc index e0ca9b82ba..ea9680119c 100644 --- a/mindspore/lite/tools/optimizer/graph/onnx_inputs_adjust_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/onnx_inputs_adjust_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,132 +15,48 @@ */ #include "tools/optimizer/graph/onnx_inputs_adjust_pass.h" #include <algorithm> +#include <vector> +#include <string> #include <functional> #include <memory> -#include <string> -#include <vector> -#include "mindspore/lite/include/errorcode.h" -#include "src/ops/primitive_c.h" +#include "ops/fusion/conv2d_fusion.h" +#include "ops/fusion/conv2d_transpose_fusion.h" +#include "ops/resize.h" +#include "include/errorcode.h" namespace mindspore::opt { -bool OnnxInputAdjustOpPass::CheckInputs(const CNodePtr &cnode) { - if (cnode == nullptr) { - MS_LOG(ERROR) << "cnode is nullptr."; - return false; - } - if (std::any_of(cnode->inputs().begin(), cnode->inputs().end(), - [](const AnfNodePtr &anf_node) { return anf_node == nullptr; })) { - MS_LOG(ERROR) << "input is nullptr."; - return false; - } - return true; -} - -ParameterPtr OnnxInputAdjustOpPass::BuildParameterNode(const FuncGraphPtr &func_graph, const std::vector<int> &data, - const std::string &node_name) { - MS_ASSERT(func_graph != nullptr); - MS_ASSERT(data.size() != 0); - auto param_node = func_graph->add_parameter(); - auto type_ptr = TypeIdToType(kNumberTypeInt32); - std::vector<int64_t> shape_vector{static_cast<int64_t>(data.size())}; - auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector); - param_node->set_abstract(abstract_tensor); - param_node->set_name(node_name); - ParamValueLitePtr param_value = std::make_shared<ParamValueLite>(); - MS_ASSERT(param_value != nullptr); - std::vector<int> shape{static_cast<int>(data.size())}; - param_value->set_tensor_shape(shape); - param_value->set_tensor_type(kNumberTypeInt32); - param_value->set_format(schema::Format::Format_NCHW); - char *default_data = new char[data.size() * sizeof(int)]; - if (memcpy_s(default_data, data.size() * sizeof(int), data.data(), data.size() * sizeof(int)) != EOK) { - MS_LOG(ERROR) << "memcpy data failed."; - delete[] default_data; - return nullptr; - } - param_value->SetTensorData(default_data, data.size() * sizeof(int)); - param_node->set_default_param(param_value); - return param_node; -} - -ParameterPtr OnnxInputAdjustOpPass::BuildParameterNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, - const ParamValueLitePtr &param_value) { - MS_ASSERT(func_graph != nullptr); +STATUS OnnxInputAdjustOpPass::AddAttrToInput(const FuncGraphPtr &func_graph, const CNodePtr &cnode, int input_num, + const std::string &attr_name) { MS_ASSERT(cnode != nullptr); - MS_ASSERT(param_value != nullptr); - auto param_node = func_graph->add_parameter(); - auto shape = param_value->tensor_shape(); - std::vector<int64_t> shape_vector; - std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), - [](const int &val) { return static_cast<int64_t>(val); }); - auto data_type = param_value->tensor_type() == kNumberTypeInt64 ? kNumberTypeInt32 : param_value->tensor_type(); - auto abstract_tensor = std::make_shared<abstract::AbstractTensor>(TypeIdToType(data_type), shape_vector); - param_node->set_abstract(abstract_tensor); - if (utils::isa<CNodePtr>(node)) { - param_node->set_name(node->cast<CNodePtr>()->fullname_with_scope()); - } else if (utils::isa<ParameterPtr>(node)) { - param_node->set_name(node->cast<ParameterPtr>()->name()); - } - ParamValueLitePtr param_value_new = std::make_shared<ParamValueLite>(); - param_value_new->set_format(param_value->format()); - param_value_new->set_tensor_shape(shape); - size_t data_count = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>()); - if (param_value->tensor_size() == 0) { - if (param_value->tensor_type() == kNumberTypeInt64) { - param_value_new->set_tensor_type(kNumberTypeInt32); - } - param_node->set_default_param(param_value_new); - return param_node; - } - if (param_value->tensor_type() == kNumberTypeInt64) { - param_value_new->set_tensor_type(kNumberTypeInt32); - auto *tensor_data = new (std::nothrow) int[data_count]; - if (tensor_data == nullptr) { - MS_LOG(ERROR) << "new data failed"; - return nullptr; - } - auto *origin_data = reinterpret_cast<int64_t *>(param_value->tensor_addr()); - for (size_t i = 0; i < data_count; ++i) { - if (origin_data[i] > static_cast<int64_t>(INT32_MAX) || origin_data[i] < static_cast<int64_t>(INT32_MIN)) { - MS_LOG(WARNING) << "int64 data " << origin_data[i] << "too big to fit into int32"; - tensor_data[i] = origin_data[i] > 0 ? INT32_MAX : INT32_MIN; - } else { - tensor_data[i] = static_cast<int>(origin_data[i]); - } + if (!CheckInputs(cnode)) { + MS_LOG(ERROR) << "input is invalid."; + return lite::RET_INPUT_TENSOR_ERROR; + } + auto primitive_c = GetValueNode<PrimitiveCPtr>(cnode->input(0)); + MS_LOG(INFO) << "supplement " << attr_name << " attr to input"; + auto value_ptr = primitive_c->GetAttr(attr_name); + auto inputs = cnode->inputs(); + if (static_cast<int>(inputs.size()) > input_num) { + if (value_ptr != nullptr) { + primitive_c->EraseAttr(attr_name); } - param_value_new->SetTensorData(tensor_data, data_count * sizeof(int32_t)); + MS_LOG(DEBUG) << "input num has been meet, which is " << inputs.size(); + return lite::RET_OK; + } else if (static_cast<int>(inputs.size()) < input_num) { + MS_LOG(ERROR) << "input num is invalid."; + return lite::RET_ERROR; + } + if (value_ptr != nullptr) { + auto value_data = GetValue<std::vector<int32_t>>(value_ptr); + auto param_node = BuildIntVecParameterNode(func_graph, value_data, cnode->fullname_with_scope() + "_" + attr_name); + inputs.push_back(param_node); + cnode->set_inputs(inputs); + primitive_c->EraseAttr(attr_name); } else { - param_value_new->set_tensor_type(param_value->tensor_type()); - char *tensor_data = new (std::nothrow) char[param_value->tensor_size()]; - if (tensor_data == nullptr) { - MS_LOG(ERROR) << "new data failed"; - return nullptr; - } - if (memcpy_s(tensor_data, param_value->tensor_size(), param_value->tensor_addr(), param_value->tensor_size()) != - RET_OK) { - MS_LOG(ERROR) << "memcpy data failed."; - delete[] tensor_data; - return nullptr; - } - param_value_new->SetTensorData(tensor_data, param_value->tensor_size()); + MS_LOG(ERROR) << "there is no attr :" << attr_name; + return lite::RET_ERROR; } - param_node->set_default_param(param_value_new); - return param_node; -} -STATUS OnnxInputAdjustOpPass::StridedSliceAttrToInput(const FuncGraphPtr &func_graph, const CNodePtr &cnode, - const std::string &attr_name) { - MS_ASSERT(func_graph != nullptr); - MS_ASSERT(cnode != nullptr); - auto inputs = cnode->inputs(); - auto primitive_c = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(cnode->input(0)); - auto value_ptr = primitive_c->GetAttr(attr_name); - MS_ASSERT(value_ptr != nullptr); - std::vector<int> value_data = GetValue<std::vector<int>>(value_ptr); - auto param_node = BuildParameterNode(func_graph, value_data, cnode->fullname_with_scope() + "_" + attr_name); - inputs.push_back(param_node); - cnode->set_inputs(inputs); - primitive_c->EraseAttr(attr_name); return lite::RET_OK; } @@ -187,279 +103,28 @@ STATUS OnnxInputAdjustOpPass::ReplaceInt64ParameterNode(const FuncGraphPtr &func return lite::RET_OK; } -STATUS OnnxInputAdjustOpPass::AdjustPower(const CNodePtr &cnode) { - MS_ASSERT(cnode != nullptr); - if (!CheckInputs(cnode)) { - MS_LOG(ERROR) << "input is invalid."; - return lite::RET_INPUT_TENSOR_ERROR; - } - if (cnode->inputs().size() != 3) { - MS_LOG(ERROR) << "onnx power inputs is 2, but now is " << cnode->inputs().size() - 1; - return lite::RET_ERROR; - } - auto pow_param = cnode->input(2)->cast<ParameterPtr>(); - if (pow_param == nullptr || !pow_param->has_default()) { - MS_LOG(ERROR) << "pow is from other node, which hasn't been supported."; - return lite::RET_NOT_SUPPORT; - } - auto pow_default = pow_param->default_param()->cast<ParamValueLitePtr>(); - if (pow_default == nullptr) { - MS_LOG(ERROR) << "pow is not a paramValueLite."; - return lite::RET_NULL_PTR; - } - if (std::accumulate(pow_default->tensor_shape().begin(), pow_default->tensor_shape().end(), 1, - std::multiplies<int>()) != 1) { - MS_LOG(ERROR) << "the pow element num is bigger than 1, which don't support now."; - return lite::RET_NOT_SUPPORT; - } - if (pow_default->tensor_addr() == nullptr) { - MS_LOG(ERROR) << "power's attr pow can't be obtained."; - return lite::RET_INVALID_OP_ATTR; - } - auto primitive_c = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(cnode->input(0)); - if (primitive_c == nullptr || primitive_c->primitiveT() == nullptr || - primitive_c->primitiveT()->value.value == nullptr) { - MS_LOG(ERROR) << "get primitive_c failed."; - return lite::RET_NULL_PTR; - } - reinterpret_cast<schema::PowerT *>(primitive_c->primitiveT()->value.value)->power = - *reinterpret_cast<float *>(pow_default->tensor_addr()); - auto inputs = cnode->inputs(); - inputs.pop_back(); - cnode->set_inputs(inputs); - return lite::RET_OK; -} - -STATUS OnnxInputAdjustOpPass::AdjustStridedSlice(const FuncGraphPtr &func_graph, const CNodePtr &cnode) { - MS_ASSERT(cnode != nullptr); - if (!CheckInputs(cnode)) { - MS_LOG(ERROR) << "input is invalid."; - return lite::RET_INPUT_TENSOR_ERROR; - } - if (cnode->inputs().size() == 2) { - if (StridedSliceAttrToInput(func_graph, cnode, "starts") != lite::RET_OK || - StridedSliceAttrToInput(func_graph, cnode, "ends") != lite::RET_OK || - StridedSliceAttrToInput(func_graph, cnode, "axes") != lite::RET_OK || - StridedSliceAttrToInput(func_graph, cnode, "steps") != lite::RET_OK) { - MS_LOG(ERROR) << "attr to input failed."; - return lite::RET_ERROR; - } - } else if (cnode->inputs().size() < 4) { - MS_LOG(ERROR) << "onnx slice's input size need to be larger than 2, now is " << cnode->inputs().size() - 1; - return lite::RET_INPUT_TENSOR_ERROR; - } - int size = 0; - for (size_t i = 2; i < cnode->inputs().size(); ++i) { - const auto &param_node = cnode->input(2)->cast<ParameterPtr>(); - if (param_node == nullptr || !param_node->has_default()) { - continue; - } - const auto &default_data = param_node->default_param()->cast<ParamValueLitePtr>(); - if (default_data == nullptr) { - MS_LOG(ERROR) << "this input is not a paramValueLite."; - return lite::RET_ERROR; - } - auto shape = default_data->tensor_shape(); - size = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>()); - break; - } - auto inputs = cnode->inputs(); - switch (cnode->inputs().size()) { - case 4: { - std::vector<int> axes; - for (int i = 0; i < size; ++i) { - axes.push_back(i); - } - auto new_param_node = BuildParameterNode(func_graph, axes, cnode->fullname_with_scope() + "_axes"); - if (new_param_node == nullptr) { - MS_LOG(ERROR) << "new a parameter node failed."; - } - inputs.push_back(new_param_node); - } - case 5: { - std::vector<int> steps; - for (int i = 0; i < size; ++i) { - steps.push_back(1); - } - auto new_param_node = BuildParameterNode(func_graph, steps, cnode->fullname_with_scope() + "_steps"); - if (new_param_node == nullptr) { - MS_LOG(ERROR) << "new a parameter node failed."; - } - inputs.push_back(new_param_node); - break; - } - default: - MS_LOG(DEBUG) << "no need to adjust."; - return lite::RET_NO_CHANGE; - } - cnode->set_inputs(inputs); - return lite::RET_OK; -} - STATUS OnnxInputAdjustOpPass::AdjustResize(const CNodePtr &cnode) { MS_ASSERT(cnode != nullptr); auto node = cnode->input(0); - MS_ASSERT(value_node != nullptr); - auto value_node = node->cast<ValueNodePtr>(); - if (value_node == nullptr) { - MS_LOG(ERROR) << "cnode input0 is not a valuenode."; - return lite::RET_ERROR; - } - MS_ASSERT(value_node->value() != nullptr); - auto primitive_c = value_node->value()->cast<PrimitiveCPtr>(); - if (primitive_c == nullptr) { - MS_LOG(ERROR) << "cnode has no primitive_c."; - return lite::RET_ERROR; - } - auto primitive = primitive_c->primitiveT(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "cnode has no schema::primitive."; - return lite::RET_ERROR; - } - if (primitive->value.type != schema::PrimitiveType_Resize) { - MS_LOG(DEBUG) << "cnode is not cast node."; - return RET_OK; - } - auto value = primitive->value.value; - if (value == nullptr) { - MS_LOG(ERROR) << "value is nullptr."; + MS_ASSERT(node != nullptr); + auto resize_prim = GetValueNode<std::shared_ptr<ops::Resize>>(node); + if (resize_prim == nullptr) { + MS_LOG(ERROR) << "cnode is invalid."; return lite::RET_ERROR; } - auto attr = reinterpret_cast<schema::ResizeT *>(value); - if (cnode->inputs().size() > 3 && - attr->coordinateTransformMode != schema::CoordinateTransformMode_TF_CROP_AND_RESIZE) { - auto new_resize_inputs = cnode->inputs(); - new_resize_inputs.erase(new_resize_inputs.begin() + 2); - cnode->set_inputs(new_resize_inputs); + if (resize_prim->GetAttr(ops::kCoordinateTransformMode) == nullptr) { + return lite::RET_OK; } - if (cnode->inputs().size() > 3 && attr->coordinateTransformMode == schema::CoordinateTransformMode_HALF_PIXEL) { + if (cnode->inputs().size() > 4 && resize_prim->get_coordinate_transform_mode() == mindspore::HALF_PIXEL) { std::vector<AnfNodePtr> new_resize_inputs; new_resize_inputs.push_back(cnode->inputs()[0]); new_resize_inputs.push_back(cnode->inputs()[1]); new_resize_inputs.push_back(cnode->inputs()[4]); cnode->set_inputs(new_resize_inputs); - } - return lite::RET_OK; -} - -STATUS OnnxInputAdjustOpPass::AdjustConvOrDeConv(const CNodePtr &cnode) { - MS_ASSERT(cnode != nullptr); - if (!CheckInputs(cnode)) { - MS_LOG(ERROR) << "input is invalid."; - return lite::RET_INPUT_TENSOR_ERROR; - } - auto type = opt::GetCNodeType(cnode); - if (type != schema::PrimitiveType_Conv2D && type != schema::PrimitiveType_DeConv2D) { - MS_LOG(DEBUG) << "node is not conv2d and deconv2d."; - return lite::RET_NO_CHANGE; - } - if (cnode->inputs().size() < 3) { - MS_LOG(ERROR) << "conv2d or deconv2d's input size is error, which is " << cnode->inputs().size() - 1; - return lite::RET_ERROR; - } - auto weight_param_node = cnode->input(2)->cast<ParameterPtr>(); - if (weight_param_node == nullptr || !weight_param_node->has_default()) { - MS_LOG(INFO) << "weight tensor is not const tensor, which hasn't been supported."; - return lite::RET_NOT_SUPPORT; - } - auto weight_param_value = weight_param_node->default_param()->cast<ParamValueLitePtr>(); - if (weight_param_value == nullptr) { - MS_LOG(ERROR) << "weight is not a paramValueLite."; - return lite::RET_ERROR; - } - auto primitive_c = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(cnode->input(0)); - if (primitive_c == nullptr || primitive_c->primitiveT() == nullptr || - primitive_c->primitiveT()->value.value == nullptr) { - MS_LOG(ERROR) << "get primitive_c failed."; - return lite::RET_NULL_PTR; - } - if (type == schema::PrimitiveType_Conv2D) { - weight_param_value->set_format(reinterpret_cast<schema::Conv2DT *>(primitive_c->primitiveT()->value.value)->format); - } else { - weight_param_value->set_format( - reinterpret_cast<schema::DeConv2DT *>(primitive_c->primitiveT()->value.value)->format); - } - return lite::RET_OK; -} - -STATUS OnnxInputAdjustOpPass::AdjustTile(const CNodePtr &cnode) { - MS_ASSERT(cnode != nullptr); - if (!CheckInputs(cnode)) { - MS_LOG(ERROR) << "input is invalid."; - return lite::RET_INPUT_TENSOR_ERROR; - } - if (cnode->inputs().size() != 3) { - MS_LOG(ERROR) << "x tile input size should be 2, now is " << cnode->inputs().size() - 1; - return lite::RET_INPUT_TENSOR_ERROR; - } - auto multiples_node = cnode->input(2)->cast<ParameterPtr>(); - if (multiples_node == nullptr || !multiples_node->has_default()) { - MS_LOG(INFO) << "multiples tensor is not const tensor, which hasn't been supported."; - return lite::RET_NOT_SUPPORT; - } - auto multiples_param_value = multiples_node->cast<ParamValueLitePtr>(); - if (multiples_param_value == nullptr) { - MS_LOG(ERROR) << "weight is not a paramValueLite."; - return lite::RET_ERROR; - } - size_t dims_size = multiples_param_value->tensor_size() / sizeof(int); - if (dims_size == 0) { - MS_LOG(INFO) << "multiples tensor is not const tensor, which hasn't been supported."; - return lite::RET_NOT_SUPPORT; - } - std::vector<int> multiples(dims_size, 0); - if (memcpy_s(multiples.data(), dims_size * sizeof(int), multiples_param_value->tensor_addr(), - dims_size * sizeof(int)) != EOK) { - MS_LOG(ERROR) << "memcpy_s failed."; - return lite::RET_ERROR; - } - std::vector<int> dims; - for (size_t i = 0; i < dims_size; ++i) { - dims.push_back(i); - } - auto primitive_c = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(cnode->input(0)); - if (primitive_c == nullptr || primitive_c->primitiveT() == nullptr || - primitive_c->primitiveT()->value.value == nullptr) { - MS_LOG(ERROR) << "get primitive_c failed."; - return lite::RET_NULL_PTR; - } - reinterpret_cast<schema::TileT *>(primitive_c->primitiveT()->value.value)->multiples = multiples; - reinterpret_cast<schema::TileT *>(primitive_c->primitiveT()->value.value)->dims = dims; - return lite::RET_OK; -} - -STATUS OnnxInputAdjustOpPass::AdjustCast(const CNodePtr &cnode) { - MS_ASSERT(cnode != nullptr); - auto node = cnode->input(0); - MS_ASSERT(value_node != nullptr); - auto value_node = node->cast<ValueNodePtr>(); - if (value_node == nullptr) { - MS_LOG(ERROR) << "cnode input0 is not a valuenode."; - return lite::RET_ERROR; - } - MS_ASSERT(value_node->value() != nullptr); - auto primitive_c = value_node->value()->cast<PrimitiveCPtr>(); - if (primitive_c == nullptr) { - MS_LOG(ERROR) << "cnode has no primitive_c."; - return lite::RET_ERROR; - } - auto primitive = primitive_c->primitiveT(); - if (primitive == nullptr) { - MS_LOG(ERROR) << "cnode has no schema::primitive."; - return lite::RET_ERROR; - } - if (primitive->value.type != schema::PrimitiveType_Cast) { - MS_LOG(DEBUG) << "cnode is not cast node."; - return RET_OK; - } - auto value = primitive->value.value; - if (value == nullptr) { - MS_LOG(ERROR) << "value is nullptr."; - return lite::RET_ERROR; - } - auto attr = reinterpret_cast<schema::CastT *>(value); - if (attr->dstT == kNumberTypeInt64) { - attr->dstT = kNumberTypeInt32; + } else if (cnode->inputs().size() == 4) { + auto new_input = cnode->inputs(); + new_input.erase(new_input.begin() + 2); + cnode->set_inputs(new_input); } return lite::RET_OK; } @@ -467,7 +132,7 @@ STATUS OnnxInputAdjustOpPass::AdjustCast(const CNodePtr &cnode) { STATUS OnnxInputAdjustOpPass::ReplaceConstant(const FuncGraphPtr &func_graph, const CNodePtr &cnode) { MS_ASSERT(func_graph != nullptr); MS_ASSERT(cnode != nullptr); - if (cnode->inputs().size() < 1 || cnode->input(0) == nullptr) { + if (cnode->inputs().empty() || cnode->input(0) == nullptr) { MS_LOG(ERROR) << "constant cnode has no primitive."; return lite::RET_ERROR; } @@ -510,8 +175,8 @@ STATUS OnnxInputAdjustOpPass::ReplaceConstant(const FuncGraphPtr &func_graph, co STATUS OnnxInputAdjustOpPass::ReplaceTransposeWithGraphInput(const FuncGraphPtr &func_graph, const CNodePtr &cnode) { MS_ASSERT(func_graph != nullptr); MS_ASSERT(cnode != nullptr); - if (cnode->inputs().size() != 2) { - MS_LOG(ERROR) << "onnx transpose input size is 1, now is " << cnode->inputs().size() - 1; + if (cnode->inputs().size() != 3) { + MS_LOG(ERROR) << "onnx transpose input size is 2, now is " << cnode->inputs().size() - 1; return lite::RET_ERROR; } auto anf_node = cnode->input(1); @@ -531,21 +196,27 @@ STATUS OnnxInputAdjustOpPass::ReplaceTransposeWithGraphInput(const FuncGraphPtr MS_LOG(DEBUG) << "only adjust 4 dims graph input."; return lite::RET_OK; } - auto prim_anf = cnode->input(0); - if (prim_anf == nullptr || !utils::isa<ValueNodePtr>(prim_anf)) { - MS_LOG(ERROR) << "cnode input0 is invalid."; + auto perm_anf = cnode->input(2); + auto perm_param = perm_anf->cast<ParameterPtr>(); + if (perm_param == nullptr || !perm_param->has_default() || + !utils::isa<ParamValueLitePtr>(perm_param->default_param())) { + MS_LOG(DEBUG) << "transpose second input is not parameter node."; + return lite::RET_OK; + } + auto perm_value = perm_param->default_param()->cast<ParamValueLitePtr>(); + if (perm_value->tensor_shape().empty()) { + MS_LOG(ERROR) << "transpose second input is invalid."; return lite::RET_ERROR; } - auto value_node = prim_anf->cast<ValueNodePtr>(); - MS_ASSERT(value_node->value() != nullptr); - auto prim = value_node->value()->cast<PrimitiveCPtr>(); - MS_ASSERT(prim != nullptr && prim->primitiveT() != nullptr && prim->primitiveT()->value.value != nullptr); - auto attr = reinterpret_cast<schema::TransposeT *>(prim->primitiveT()->value.value); - auto perm = attr->perm; - std::vector<int> transpose_attr; - std::transform(perm.begin(), perm.end(), std::back_inserter(transpose_attr), + std::vector<int> perm(perm_value->tensor_shape()[0]); + if (memcpy_s(perm.data(), perm_value->tensor_size(), perm_value->tensor_addr(), perm_value->tensor_size()) != EOK) { + MS_LOG(ERROR) << "memcpy data failed."; + return lite::RET_ERROR; + } + std::vector<int> transpose_perm; + std::transform(perm.begin(), perm.end(), std::back_inserter(transpose_perm), [](const int &val) { return val < 0 ? val + 4 : val; }); - if (transpose_attr[0] == 0 && transpose_attr[1] == 3 && transpose_attr[2] == 1) { + if (transpose_perm[0] == 0 && transpose_perm[1] == 3 && transpose_perm[2] == 1) { auto channel = shape_vector[3]; shape_vector.pop_back(); shape_vector.insert(shape_vector.begin() + 1, channel); @@ -557,6 +228,72 @@ STATUS OnnxInputAdjustOpPass::ReplaceTransposeWithGraphInput(const FuncGraphPtr return lite::RET_OK; } +STATUS OnnxInputAdjustOpPass::AdjustStridedSlice(const FuncGraphPtr &func_graph, const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + if (!CheckInputs(cnode)) { + MS_LOG(ERROR) << "input is invalid."; + return lite::RET_INPUT_TENSOR_ERROR; + } + if (cnode->inputs().size() == 2) { + if (AddAttrToInput(func_graph, cnode, 2, "starts") != lite::RET_OK || + AddAttrToInput(func_graph, cnode, 3, "ends") != lite::RET_OK || + AddAttrToInput(func_graph, cnode, 4, "axes") != lite::RET_OK || + AddAttrToInput(func_graph, cnode, 5, "steps") != lite::RET_OK) { + MS_LOG(ERROR) << "attr to input failed."; + return lite::RET_ERROR; + } + } else if (cnode->inputs().size() <= 3) { + MS_LOG(ERROR) << "onnx slice's input size need to be >2, now is " << cnode->inputs().size() - 1; + return lite::RET_INPUT_TENSOR_ERROR; + } + int size = 0; + for (size_t i = 2; i < cnode->inputs().size(); ++i) { + const auto &param_node = cnode->input(2)->cast<ParameterPtr>(); + if (param_node == nullptr || !param_node->has_default()) { + continue; + } + const auto &default_data = param_node->default_param()->cast<ParamValueLitePtr>(); + if (default_data == nullptr) { + MS_LOG(ERROR) << "this input is not a paramValueLite."; + return lite::RET_ERROR; + } + auto shape = default_data->tensor_shape(); + size = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>()); + break; + } + auto inputs = cnode->inputs(); + switch (cnode->inputs().size()) { + case 4: { + std::vector<int32_t> axes; + for (int i = 0; i < size; ++i) { + axes.push_back(i); + } + auto new_param_node = BuildIntVecParameterNode(func_graph, axes, cnode->fullname_with_scope() + "_axises"); + if (new_param_node == nullptr) { + MS_LOG(ERROR) << "new a parameter node failed."; + } + inputs.push_back(new_param_node); + } + case 5: { + std::vector<int32_t> steps; + for (int i = 0; i < size; ++i) { + steps.push_back(1); + } + auto new_param_node = BuildIntVecParameterNode(func_graph, steps, cnode->fullname_with_scope() + "_steps"); + if (new_param_node == nullptr) { + MS_LOG(ERROR) << "new a parameter node failed."; + } + inputs.push_back(new_param_node); + break; + } + default: + MS_LOG(DEBUG) << "no need to adjust."; + return lite::RET_NO_CHANGE; + } + cnode->set_inputs(inputs); + return lite::RET_OK; +} + bool OnnxInputAdjustOpPass::Run(const FuncGraphPtr &func_graph) { MS_ASSERT(func_graph != nullptr); auto manager = Manage(func_graph, true); @@ -580,22 +317,13 @@ bool OnnxInputAdjustOpPass::Run(const FuncGraphPtr &func_graph) { MS_LOG(DEBUG) << "node is not cnode."; continue; } - auto type = opt::GetCNodeType(node); - if (type == schema::PrimitiveType_Power) { - status = AdjustPower(cnode); - } else if (type == schema::PrimitiveType_StridedSlice) { - status = AdjustStridedSlice(func_graph, cnode); - } else if (type == schema::PrimitiveType_Conv2D || type == schema::PrimitiveType_DeConv2D) { - status = AdjustConvOrDeConv(cnode); - } else if (type == schema::PrimitiveType_Tile) { - status = AdjustConvOrDeConv(cnode); - } else if (type == schema::PrimitiveType_Constant) { + if (CheckPrimitiveType(node, prim::kPrimConstant)) { status = ReplaceConstant(func_graph, cnode); - } else if (type == schema::PrimitiveType_Cast) { - status = AdjustCast(cnode); - } else if (type == schema::PrimitiveType_Transpose) { + } else if (CheckPrimitiveType(node, prim::kPrimTranspose)) { status = ReplaceTransposeWithGraphInput(func_graph, cnode); - } else if (type == schema::PrimitiveType_Resize) { + } else if (CheckPrimitiveType(node, prim::kPrimStridedSlice)) { + status = AdjustStridedSlice(func_graph, cnode); + } else if (CheckPrimitiveType(node, prim::kPrimResize)) { status = AdjustResize(cnode); } else { continue; diff --git a/mindspore/lite/tools/optimizer/graph/onnx_inputs_adjust_pass.h b/mindspore/lite/tools/optimizer/graph/onnx_inputs_adjust_pass.h index 24909d372d..f7ad803b07 100644 --- a/mindspore/lite/tools/optimizer/graph/onnx_inputs_adjust_pass.h +++ b/mindspore/lite/tools/optimizer/graph/onnx_inputs_adjust_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,21 +28,13 @@ class OnnxInputAdjustOpPass : public Pass { public: OnnxInputAdjustOpPass() : Pass("onnx_input_adjust") {} ~OnnxInputAdjustOpPass() override = default; - bool CheckInputs(const CNodePtr &cnode); - ParameterPtr BuildParameterNode(const FuncGraphPtr &func_graph, const std::vector<int> &data, - const std::string &node_name); - ParameterPtr BuildParameterNode(const FuncGraphPtr &func_graph, const AnfNodePtr &node, - const ParamValueLitePtr &param_value); - STATUS StridedSliceAttrToInput(const FuncGraphPtr &func_graph, const CNodePtr &cnode, const std::string &attr_name); - STATUS ReplaceInt64ParameterNode(const FuncGraphPtr &func_graph, const ParameterPtr &param_node); - STATUS AdjustPower(const CNodePtr &cnode); - STATUS AdjustStridedSlice(const FuncGraphPtr &func_graph, const CNodePtr &cnode); - STATUS AdjustConvOrDeConv(const CNodePtr &cnode); - STATUS AdjustTile(const CNodePtr &cnode); - STATUS AdjustCast(const CNodePtr &cnode); + static STATUS ReplaceInt64ParameterNode(const FuncGraphPtr &func_graph, const ParameterPtr &param_node); + static STATUS ReplaceConstant(const FuncGraphPtr &func_graph, const CNodePtr &cnode); + static STATUS ReplaceTransposeWithGraphInput(const FuncGraphPtr &func_graph, const CNodePtr &cnode); + static STATUS AddAttrToInput(const FuncGraphPtr &func_graph, const CNodePtr &cnode, int input_num, + const std::string &attr_name); + static STATUS AdjustStridedSlice(const FuncGraphPtr &func_graph, const CNodePtr &cnode); STATUS AdjustResize(const CNodePtr &cnode); - STATUS ReplaceConstant(const FuncGraphPtr &func_graph, const CNodePtr &cnode); - STATUS ReplaceTransposeWithGraphInput(const FuncGraphPtr &func_graph, const CNodePtr &cnode); bool Run(const FuncGraphPtr &func_graph) override; }; } // namespace mindspore::opt diff --git a/mindspore/lite/tools/optimizer/graph/primitive_adjust_pass.cc b/mindspore/lite/tools/optimizer/graph/primitive_adjust_pass.cc index 71f89a3e2e..ef604e1fa6 100644 --- a/mindspore/lite/tools/optimizer/graph/primitive_adjust_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/primitive_adjust_pass.cc @@ -19,7 +19,6 @@ #include <memory> #include <set> #include <string> -#include "ops/abs.h" #include "ops/batch_norm.h" #include "ops/elu.h" #include "ops/depthwise_conv2d.h" @@ -41,13 +40,18 @@ #include "ops/fusion/max_pool_fusion.h" #include "ops/fusion/mul_fusion.h" #include "ops/fusion/pad_fusion.h" +#include "ops/fusion/pow_fusion.h" #include "ops/fusion/prelu_fusion.h" #include "ops/fusion/reduce_fusion.h" #include "ops/fusion/scale_fusion.h" +#include "ops/fusion/slice_fusion.h" #include "ops/fusion/sub_fusion.h" #include "ops/fusion/tile_fusion.h" #include "ops/fusion/topk_fusion.h" -#include "ops/gather.h" +#include "ops/grad/activation_grad.h" +#include "ops/grad/avg_pool_grad.h" +#include "ops/grad/batch_norm_grad.h" +#include "ops/grad/max_pool_grad.h" #include "ops/gelu.h" #include "ops/leaky_relu.h" #include "ops/mat_mul.h" @@ -63,15 +67,17 @@ #include "ops/relu6.h" #include "ops/resize.h" #include "ops/resize_bilinear.h" +#include "ops/resize_nearest_neighbor.h" #include "ops/sigmoid.h" +#include "ops/stack.h" #include "ops/tanh.h" -using mindspore::ops::kNameAbs; using mindspore::ops::kNameAdd; using mindspore::ops::kNameAdder; using mindspore::ops::kNameArgMax; using mindspore::ops::kNameArgMin; using mindspore::ops::kNameAvgPool; +using mindspore::ops::kNameAvgPoolGrad; using mindspore::ops::kNameBatchNorm; using mindspore::ops::kNameConv2D; using mindspore::ops::kNameConv2DBackpropFilter; @@ -86,8 +92,10 @@ using mindspore::ops::kNameL2Normalize; using mindspore::ops::kNameLayerNorm; using mindspore::ops::kNameLeakyRelu; using mindspore::ops::kNameMaxPool; +using mindspore::ops::kNameMaxPoolGrad; using mindspore::ops::kNameMul; using mindspore::ops::kNamePad; +using mindspore::ops::kNamePow; using mindspore::ops::kNamePReLU; using mindspore::ops::kNameReduceAll; using mindspore::ops::kNameReduceASum; @@ -100,6 +108,7 @@ using mindspore::ops::kNameReduceSumSquare; using mindspore::ops::kNameReLU; using mindspore::ops::kNameReLU6; using mindspore::ops::kNameResizeBilinear; +using mindspore::ops::kNameResizeNearestNeighbor; using mindspore::ops::kNameScale; using mindspore::ops::kNameSigmoid; using mindspore::ops::kNameSub; @@ -113,13 +122,30 @@ namespace { constexpr auto kNameArgMaxWithValue = "ArgMaxWithValue"; constexpr auto kNameArgMinWithValue = "ArgMinWithValue"; constexpr auto kNameBatchMatMul = "BatchMatMul"; -constexpr auto kNameGatherV2 = "GatherV2"; -constexpr auto kNameTensorAdd = "TensorAdd"; -std::map<std::string, mindspore::ActivationType> activation_map = { - {ops::kNameAbs, mindspore::ABS}, {ops::kNameElu, mindspore::ELU}, - {ops::kNameGeLU, mindspore::GELU}, {ops::kNameLeakyRelu, mindspore::LEAKY_RELU}, - {ops::kNameReLU, mindspore::RELU}, {ops::kNameReLU6, mindspore::RELU6}, - {ops::kNameSigmoid, mindspore::SIGMOID}, {ops::kNameTanh, mindspore::TANH}}; +constexpr auto kNameFusedBatchNormEx = "FusedBatchNormEx"; +constexpr auto kNameFusedBatchNormGradEx = "FusedBatchNormGradEx"; +constexpr auto kNameHSigmoid = "HSigmoid"; +constexpr auto kNameHSigmoidGrad = "HSigmoidGrad"; +constexpr auto kNameHSwish = "HSwish"; +constexpr auto kNameHSwishGrad = "HSwishGrad"; +constexpr auto kNameReluGrad = "ReluGrad"; +constexpr auto kNameReLU6Grad = "ReLU6Grad"; +constexpr auto kNameSigmoidGrad = "SigmoidGrad"; +constexpr auto kNameSlice = "Slice"; +std::map<std::string, mindspore::ActivationType> activation_map = {{ops::kNameElu, mindspore::ELU}, + {ops::kNameGeLU, mindspore::GELU}, + {ops::kNameLeakyRelu, mindspore::LEAKY_RELU}, + {ops::kNameReLU, mindspore::RELU}, + {ops::kNameReLU6, mindspore::RELU6}, + {ops::kNameSigmoid, mindspore::SIGMOID}, + {ops::kNameTanh, mindspore::TANH}, + {kNameHSigmoid, mindspore::HSIGMOID}, + {kNameHSigmoidGrad, mindspore::HSIGMOID}, + {kNameHSwish, mindspore::HSWISH}, + {kNameHSwishGrad, mindspore::HSWISH}, + {kNameReluGrad, mindspore::RELU}, + {kNameReLU6Grad, mindspore::RELU6}, + {kNameSigmoidGrad, mindspore::SIGMOID}}; std::map<std::string, mindspore::ReduceMode> reduce_map = { {ops::kNameReduceAll, mindspore::Reduce_All}, {ops::kNameReduceASum, mindspore::Reduce_ASum}, @@ -160,7 +186,9 @@ int AttrAdjust(const PrimitivePtr &prim, const std::string &name, const std::vec } template <typename T> -int MoveAttrMapCommon(const ValueNodePtr &value_node) { +int MoveAttrMapCommon(const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + auto value_node = cnode->input(0)->cast<ValueNodePtr>(); MS_ASSERT(value_node != nullptr); auto src_prim = GetValueNode<PrimitivePtr>(value_node); if (src_prim == nullptr) { @@ -174,7 +202,9 @@ int MoveAttrMapCommon(const ValueNodePtr &value_node) { return lite::RET_OK; } -int MoveAttrMapActivation(const ValueNodePtr &value_node) { +int MoveAttrMapActivation(const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + auto value_node = cnode->input(0)->cast<ValueNodePtr>(); MS_ASSERT(value_node != nullptr); auto src_prim = GetValueNode<PrimitivePtr>(value_node); if (src_prim == nullptr) { @@ -186,7 +216,7 @@ int MoveAttrMapActivation(const ValueNodePtr &value_node) { dst_prim->SetAttrs(src_prim->attrs()); auto iter = activation_map.find(src_prim->name()); if (iter == activation_map.end()) { - MS_LOG(ERROR) << "activation mode is unsupport."; + MS_LOG(ERROR) << "activation mode is unsupported."; return lite::RET_ERROR; } dst_prim->set_activation_type(iter->second); @@ -194,7 +224,31 @@ int MoveAttrMapActivation(const ValueNodePtr &value_node) { return lite::RET_OK; } -int MoveAttrMapReduce(const ValueNodePtr &value_node) { +int MoveAttrMapActivationGrad(const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + auto value_node = cnode->input(0)->cast<ValueNodePtr>(); + MS_ASSERT(value_node != nullptr); + auto src_prim = GetValueNode<PrimitivePtr>(value_node); + if (src_prim == nullptr) { + MS_LOG(ERROR) << "value node is invalid."; + return lite::RET_ERROR; + } + auto dst_prim = std::make_shared<ops::ActivationGrad>(); + MS_ASSERT(dst_prim != nullptr); + dst_prim->SetAttrs(src_prim->attrs()); + auto iter = activation_map.find(src_prim->name()); + if (iter == activation_map.end()) { + MS_LOG(ERROR) << "activation mode is unsupported."; + return lite::RET_ERROR; + } + dst_prim->set_activation_type(iter->second); + value_node->set_value(dst_prim); + return lite::RET_OK; +} + +int MoveAttrMapReduce(const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + auto value_node = cnode->input(0)->cast<ValueNodePtr>(); MS_ASSERT(value_node != nullptr); auto src_prim = GetValueNode<PrimitivePtr>(value_node); if (src_prim == nullptr) { @@ -206,7 +260,7 @@ int MoveAttrMapReduce(const ValueNodePtr &value_node) { dst_prim->SetAttrs(src_prim->attrs()); auto iter = reduce_map.find(src_prim->name()); if (iter == reduce_map.end()) { - MS_LOG(ERROR) << "reduce mode is unsupport."; + MS_LOG(ERROR) << "reduce mode is unsupported."; return lite::RET_ERROR; } dst_prim->set_mode(iter->second); @@ -215,7 +269,9 @@ int MoveAttrMapReduce(const ValueNodePtr &value_node) { return lite::RET_OK; } -int MoveAttrMapConv2D(const ValueNodePtr &value_node) { +int MoveAttrMapConv2D(const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + auto value_node = cnode->input(0)->cast<ValueNodePtr>(); MS_ASSERT(value_node != nullptr); auto src_prim = GetValueNode<PrimitivePtr>(value_node); if (src_prim == nullptr) { @@ -252,7 +308,9 @@ int MoveAttrMapConv2D(const ValueNodePtr &value_node) { return lite::RET_OK; } -int MoveAttrPool(const ValueNodePtr &value_node) { +int MoveAttrPool(const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + auto value_node = cnode->input(0)->cast<ValueNodePtr>(); MS_ASSERT(value_node != nullptr); auto src_prim = GetValueNode<PrimitivePtr>(value_node); if (src_prim == nullptr) { @@ -265,7 +323,7 @@ int MoveAttrPool(const ValueNodePtr &value_node) { } else if (src_prim->name() == kNameMaxPool) { dst_prim = std::make_shared<ops::MaxPoolFusion>(); } else { - MS_LOG(ERROR) << "unsupport pooling type."; + MS_LOG(ERROR) << "unsupported pooling type."; return lite::RET_ERROR; } MS_ASSERT(dst_prim != nullptr); @@ -280,14 +338,35 @@ int MoveAttrPool(const ValueNodePtr &value_node) { MS_LOG(ERROR) << "adjust strides failed."; return status; } - if (dst_prim->GetAttr(ops::kPadding) != nullptr) { - dst_prim->AddAttr(ops::kPadMode, dst_prim->GetAttr(ops::kPadding)); - } value_node->set_value(dst_prim); return lite::RET_OK; } -int MoveAttrMapAdder(const ValueNodePtr &value_node) { +int MoveAttrPoolGrad(const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + auto value_node = cnode->input(0)->cast<ValueNodePtr>(); + MS_ASSERT(value_node != nullptr); + auto src_prim = GetValueNode<PrimitivePtr>(value_node); + if (src_prim == nullptr) { + MS_LOG(ERROR) << "value node is invalid."; + return lite::RET_ERROR; + } + auto status = AttrAdjust(src_prim, ops::kKernelSize, {2, 3}); + if (status != lite::RET_OK) { + MS_LOG(ERROR) << "adjust ksize failed."; + return status; + } + status = AttrAdjust(src_prim, ops::kStrides, {2, 3}); + if (status != lite::RET_OK) { + MS_LOG(ERROR) << "adjust strides failed."; + return status; + } + return lite::RET_OK; +} + +int MoveAttrMapAdder(const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + auto value_node = cnode->input(0)->cast<ValueNodePtr>(); MS_ASSERT(value_node != nullptr); auto src_prim = GetValueNode<PrimitivePtr>(value_node); if (src_prim == nullptr) { @@ -316,7 +395,9 @@ int MoveAttrMapAdder(const ValueNodePtr &value_node) { return lite::RET_OK; } -int MoveAttrMapLayerNorm(const ValueNodePtr &value_node) { +int MoveAttrMapLayerNorm(const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + auto value_node = cnode->input(0)->cast<ValueNodePtr>(); MS_ASSERT(value_node != nullptr); auto src_prim = GetValueNode<PrimitivePtr>(value_node); if (src_prim == nullptr) { @@ -334,7 +415,9 @@ int MoveAttrMapLayerNorm(const ValueNodePtr &value_node) { return lite::RET_OK; } -int MoveAttrMapResize(const ValueNodePtr &value_node) { +int MoveAttrMapResize(const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + auto value_node = cnode->input(0)->cast<ValueNodePtr>(); MS_ASSERT(value_node != nullptr); auto src_prim = GetValueNode<PrimitivePtr>(value_node); if (src_prim == nullptr) { @@ -342,20 +425,45 @@ int MoveAttrMapResize(const ValueNodePtr &value_node) { return lite::RET_ERROR; } auto dst_prim = std::make_shared<ops::Resize>(); + MS_ASSERT(dst_prim != nullptr); auto size = GetValue<std::vector<int64_t>>(src_prim->GetAttr(ops::kSize)); dst_prim->set_new_height(size[0]); dst_prim->set_new_width(size[1]); - if (dst_prim->GetAttr(ops::kAlignCorners) != nullptr && GetValue<bool>(dst_prim->GetAttr(ops::kAlignCorners))) { + if (src_prim->GetAttr(ops::kAlignCorners) != nullptr && GetValue<bool>(src_prim->GetAttr(ops::kAlignCorners))) { dst_prim->set_coordinate_transform_mode(mindspore::ALIGN_CORNERS); } if (src_prim->name() == kNameResizeBilinear) { dst_prim->set_method(ResizeMethod::LINEAR); - } else if (src_prim->name() == "ResizeNearestNeighbor") { + } else if (src_prim->name() == kNameResizeNearestNeighbor) { dst_prim->set_method(ResizeMethod::NEAREST); } value_node->set_value(dst_prim); return lite::RET_OK; } + +int MoveAttrSlice(const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + auto value_node = cnode->input(0)->cast<ValueNodePtr>(); + MS_ASSERT(value_node != nullptr); + auto src_prim = GetValueNode<PrimitivePtr>(value_node); + if (src_prim == nullptr) { + MS_LOG(ERROR) << "value node is invalid."; + return lite::RET_ERROR; + } + auto dst_prim = std::make_shared<ops::SliceFusion>(); + MS_ASSERT(dst_prim != nullptr); + auto begin = GetValueNode<ValuePtr>(cnode->input(2)); + auto begin_value = GetValue<std::vector<int64_t>>(begin); + + std::vector<int64_t> axes(begin_value.size()); + for (size_t i = 0; i < begin_value.size(); i++) { + axes[i] = i; + } + dst_prim->set_axes(axes); + dst_prim->SetAttrs(src_prim->attrs()); + value_node->set_value(dst_prim); + return lite::RET_OK; +} } // namespace bool PrimitiveAdjustPass::Run(const FuncGraphPtr &func_graph) { @@ -382,10 +490,10 @@ bool PrimitiveAdjustPass::Run(const FuncGraphPtr &func_graph) { auto name = prim->name(); auto adjust_func = PrimitiveAdjustRegistry::GetInstance()->GetPrimitiveCreator(name); if (adjust_func == nullptr) { - MS_LOG(DEBUG) << "dont't need to adjust."; + MS_LOG(DEBUG) << "don't need to adjust."; continue; } - status = adjust_func(value_node); + status = adjust_func(cnode); if (status != lite::RET_OK) { MS_LOG(ERROR) << "convert primitive failed."; return false; @@ -394,7 +502,6 @@ bool PrimitiveAdjustPass::Run(const FuncGraphPtr &func_graph) { return true; } -REGIST_PRIMITIVE_ADJUST(kNameAbs, MoveAttrMapActivation) REGIST_PRIMITIVE_ADJUST(kNameAdd, MoveAttrMapCommon<ops::AddFusion>) REGIST_PRIMITIVE_ADJUST(kNameAdder, MoveAttrMapAdder) REGIST_PRIMITIVE_ADJUST(kNameArgMax, MoveAttrMapCommon<ops::ArgMaxFusion>) @@ -402,6 +509,7 @@ REGIST_PRIMITIVE_ADJUST(kNameArgMaxWithValue, MoveAttrMapCommon<ops::ArgMaxFusio REGIST_PRIMITIVE_ADJUST(kNameArgMin, MoveAttrMapCommon<ops::ArgMinFusion>) REGIST_PRIMITIVE_ADJUST(kNameArgMinWithValue, MoveAttrMapCommon<ops::ArgMinFusion>) REGIST_PRIMITIVE_ADJUST(kNameAvgPool, MoveAttrPool) +REGIST_PRIMITIVE_ADJUST(kNameAvgPoolGrad, MoveAttrPoolGrad) REGIST_PRIMITIVE_ADJUST(kNameBatchMatMul, MoveAttrMapCommon<ops::MatMul>) REGIST_PRIMITIVE_ADJUST(kNameBatchNorm, MoveAttrMapCommon<ops::FusedBatchNorm>) REGIST_PRIMITIVE_ADJUST(kNameConv2DBackpropFilter, MoveAttrMapCommon<ops::Conv2DBackpropFilterFusion>) @@ -412,14 +520,21 @@ REGIST_PRIMITIVE_ADJUST(kNameConv2dTranspose, MoveAttrMapCommon<ops::Conv2dTrans REGIST_PRIMITIVE_ADJUST(kNameDiv, MoveAttrMapCommon<ops::DivFusion>) REGIST_PRIMITIVE_ADJUST(kNameElu, MoveAttrMapActivation) REGIST_PRIMITIVE_ADJUST(kNameExp, MoveAttrMapCommon<ops::ExpFusion>) -REGIST_PRIMITIVE_ADJUST(kNameGatherV2, MoveAttrMapCommon<ops::Gather>) +REGIST_PRIMITIVE_ADJUST(kNameFusedBatchNormEx, MoveAttrMapCommon<ops::FusedBatchNorm>) +REGIST_PRIMITIVE_ADJUST(kNameFusedBatchNormGradEx, MoveAttrMapCommon<ops::BatchNormGrad>) REGIST_PRIMITIVE_ADJUST(kNameGeLU, MoveAttrMapActivation) +REGIST_PRIMITIVE_ADJUST(kNameHSigmoid, MoveAttrMapActivation) +REGIST_PRIMITIVE_ADJUST(kNameHSigmoidGrad, MoveAttrMapActivationGrad) +REGIST_PRIMITIVE_ADJUST(kNameHSwish, MoveAttrMapActivation) +REGIST_PRIMITIVE_ADJUST(kNameHSwishGrad, MoveAttrMapActivationGrad) REGIST_PRIMITIVE_ADJUST(kNameL2Normalize, MoveAttrMapCommon<ops::L2NormalizeFusion>) REGIST_PRIMITIVE_ADJUST(kNameLayerNorm, MoveAttrMapLayerNorm) REGIST_PRIMITIVE_ADJUST(kNameLeakyRelu, MoveAttrMapActivation) REGIST_PRIMITIVE_ADJUST(kNameMaxPool, MoveAttrPool) +REGIST_PRIMITIVE_ADJUST(kNameMaxPoolGrad, MoveAttrPoolGrad) REGIST_PRIMITIVE_ADJUST(kNameMul, MoveAttrMapCommon<ops::MulFusion>) REGIST_PRIMITIVE_ADJUST(kNamePad, MoveAttrMapCommon<ops::PadFusion>) +REGIST_PRIMITIVE_ADJUST(kNamePow, MoveAttrMapCommon<ops::PowFusion>) REGIST_PRIMITIVE_ADJUST(kNamePReLU, MoveAttrMapCommon<ops::PReLUFusion>) REGIST_PRIMITIVE_ADJUST(kNameReduceAll, MoveAttrMapReduce) REGIST_PRIMITIVE_ADJUST(kNameReduceASum, MoveAttrMapReduce) @@ -430,15 +545,18 @@ REGIST_PRIMITIVE_ADJUST(kNameReduceProd, MoveAttrMapReduce) REGIST_PRIMITIVE_ADJUST(kNameReduceSum, MoveAttrMapReduce) REGIST_PRIMITIVE_ADJUST(kNameReduceSumSquare, MoveAttrMapReduce) REGIST_PRIMITIVE_ADJUST(kNameReLU, MoveAttrMapActivation) +REGIST_PRIMITIVE_ADJUST(kNameReluGrad, MoveAttrMapActivationGrad) REGIST_PRIMITIVE_ADJUST(kNameReLU6, MoveAttrMapActivation) +REGIST_PRIMITIVE_ADJUST(kNameReLU6Grad, MoveAttrMapActivationGrad) REGIST_PRIMITIVE_ADJUST(kNameResizeBilinear, MoveAttrMapResize) +REGIST_PRIMITIVE_ADJUST(kNameResizeNearestNeighbor, MoveAttrMapResize) REGIST_PRIMITIVE_ADJUST(kNameScale, MoveAttrMapCommon<ops::ScaleFusion>) REGIST_PRIMITIVE_ADJUST(kNameSigmoid, MoveAttrMapActivation) +REGIST_PRIMITIVE_ADJUST(kNameSigmoidGrad, MoveAttrMapActivationGrad) +REGIST_PRIMITIVE_ADJUST(kNameSlice, MoveAttrSlice) REGIST_PRIMITIVE_ADJUST(kNameSub, MoveAttrMapCommon<ops::SubFusion>) REGIST_PRIMITIVE_ADJUST(kNameTanh, MoveAttrMapActivation) -REGIST_PRIMITIVE_ADJUST(kNameTensorAdd, MoveAttrMapCommon<ops::AddFusion>) REGIST_PRIMITIVE_ADJUST(kNameTile, MoveAttrMapCommon<ops::TileFusion>) REGIST_PRIMITIVE_ADJUST(kNameTopK, MoveAttrMapCommon<ops::TopKFusion>) - } // namespace opt } // namespace mindspore diff --git a/mindspore/lite/tools/optimizer/graph/primitive_adjust_pass.h b/mindspore/lite/tools/optimizer/graph/primitive_adjust_pass.h new file mode 100644 index 0000000000..729757d12f --- /dev/null +++ b/mindspore/lite/tools/optimizer/graph/primitive_adjust_pass.h @@ -0,0 +1,76 @@ +/** + * Copyright 2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_PRIMITIVE_ADJUST_PASS_H +#define MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_PRIMITIVE_ADJUST_PASS_H + +#include <map> +#include <string> +#include <vector> +#include "backend/optimizer/common/pass.h" +#include "tools/converter/converter_flags.h" +#include "tools/optimizer/common/gllo_utils.h" + +using mindspore::lite::converter::FmkType; +namespace mindspore { +namespace opt { +typedef int (*PrimitiveAdjustCreator)(const CNodePtr &value_node); +class PrimitiveAdjustRegistry { + public: + static PrimitiveAdjustRegistry *GetInstance() { + static PrimitiveAdjustRegistry registry; + return &registry; + } + + void InsertPrimitiveAdjustMap(const std::string &key, PrimitiveAdjustCreator creator) { + primitive_adjust_creators_[key] = creator; + } + + PrimitiveAdjustCreator GetPrimitiveCreator(const std::string &key) { + if (primitive_adjust_creators_.find(key) != primitive_adjust_creators_.end()) { + return primitive_adjust_creators_[key]; + } else { + MS_LOG(DEBUG) << "Unsupported primitive type : " << key; + return nullptr; + } + } + + protected: + std::map<std::string, PrimitiveAdjustCreator> primitive_adjust_creators_; +}; + +class RegistryPrimitiveAdjust { + public: + RegistryPrimitiveAdjust(const std::string &key, PrimitiveAdjustCreator creator) { + PrimitiveAdjustRegistry::GetInstance()->InsertPrimitiveAdjustMap(key, creator); + } +}; + +#define REGIST_PRIMITIVE_ADJUST(type, primitive_adjust_func) \ + RegistryPrimitiveAdjust g_##type##_primitive_adjust(type, primitive_adjust_func); + +class PrimitiveAdjustPass : public Pass { + public: + void SetFmkType(FmkType fmk_type) { fmk_type_ = fmk_type; } + bool Run(const FuncGraphPtr &func_graph) override; + + protected: + FmkType fmk_type_ = FmkType::FmkType_MS; +}; +} // namespace opt +} // namespace mindspore + +#endif // MINDSPORE_LITE_TOOLS_OPTIMIZER_GRAPH_PRIMITIVE_ADJUST_PASS_H diff --git a/mindspore/lite/tools/optimizer/graph/redundant_op_remove_pass.cc b/mindspore/lite/tools/optimizer/graph/redundant_op_remove_pass.cc index 54b561975b..6057a1c92c 100644 --- a/mindspore/lite/tools/optimizer/graph/redundant_op_remove_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/redundant_op_remove_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,26 +13,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + #include "tools/optimizer/graph/redundant_op_remove_pass.h" -#include <memory> #include "mindspore/lite/include/errorcode.h" -#include "src/ops/primitive_c.h" namespace mindspore::opt { namespace { constexpr size_t InputDoubleNum = 2; constexpr size_t InputTripleNum = 3; -constexpr auto kNameLoad = "Load"; -constexpr auto kNameUpdateState = "UpdateState"; } // namespace int RemoveRedundantOpPass::ReplaceOp(const AnfNodePtr &anf_node, const FuncGraphManagerPtr &manager) { if (!utils::isa<CNodePtr>(anf_node)) { MS_LOG(DEBUG) << "anf node is node a cnode."; return lite::RET_NO_CHANGE; } - auto type = opt::GetCNodeType(anf_node); auto cnode = anf_node->cast<CNodePtr>(); - if (type == schema::PrimitiveType_Identity) { + if (CheckPrimitiveType(anf_node, kPrimIdentity)) { if (cnode->size() != InputDoubleNum) { MS_LOG(DEBUG) << "The node inputs size is bigger than 1"; remove_cnode_.insert(anf_node); @@ -52,8 +48,7 @@ int RemoveRedundantOpPass::ReplaceTupleGetItem(const AnfNodePtr &anf_node, const MS_LOG(DEBUG) << "anf node is node a cnode."; return lite::RET_NO_CHANGE; } - auto type = opt::GetCNodeType(anf_node); - if (type != schema::PrimitiveType_TupleGetItem) { + if (!CheckPrimitiveType(anf_node, prim::kPrimTupleGetItem)) { return lite::RET_NO_CHANGE; } auto cnode = anf_node->cast<CNodePtr>(); @@ -61,8 +56,7 @@ int RemoveRedundantOpPass::ReplaceTupleGetItem(const AnfNodePtr &anf_node, const MS_LOG(ERROR) << "TupleGetItem should have 3 inputs, got " << cnode->inputs().size(); return RET_ERROR; } - type = opt::GetCNodeType(cnode->input(1)); - if (type != schema::PrimitiveType_Identity) { + if (!CheckPrimitiveType(cnode->input(1), kPrimIdentity)) { return lite::RET_NO_CHANGE; } auto get_item_input_cnode = cnode->input(1)->cast<CNodePtr>(); @@ -71,7 +65,7 @@ int RemoveRedundantOpPass::ReplaceTupleGetItem(const AnfNodePtr &anf_node, const MS_LOG(ERROR) << "TupleGetItem's input 2 is not valuenode"; return lite::RET_ERROR; } - int index = lite::CastToInt(index_vnode->cast<ValueNodePtr>()->value()).front(); + int index = CastToInt(index_vnode->cast<ValueNodePtr>()->value()).front(); int input_cnode_inputs_size = get_item_input_cnode->inputs().size(); if ((index + 1) >= input_cnode_inputs_size) { MS_LOG(ERROR) << "value node index is out of range."; @@ -95,24 +89,23 @@ bool RemoveRedundantOpPass::Run(const FuncGraphPtr &func_graph) { if (!utils::isa<CNodePtr>(node)) { continue; } - auto type = opt::GetCNodeType(node); - if (type == schema::PrimitiveType_Identity) { + if (CheckPrimitiveType(node, kPrimIdentity)) { status = ReplaceOp(node, manager); } - if (CheckPrimitiveType(node, std::make_shared<Primitive>(kNameLoad))) { + if (CheckPrimitiveType(node, prim::kPrimLoad)) { status = ReplaceOp(node, manager); } - if (CheckPrimitiveType(node, std::make_shared<Primitive>(kNameUpdateState))) { + if (CheckPrimitiveType(node, prim::kPrimUpdateState)) { status = ReplaceOp(node, manager); } - if (type == schema::PrimitiveType_Depend || - type == schema::PrimitiveType_ControlDepend) { // ControlDepend delete next version. + if (CheckPrimitiveType(node, prim::kPrimDepend) || + CheckPrimitiveType(node, prim::kPrimControlDepend)) { // ControlDepend delete next version. status = ReplaceOp(node, manager); } - if (type == schema::PrimitiveType_TupleGetItem) { + if (CheckPrimitiveType(node, prim::kPrimTupleGetItem)) { status = ReplaceTupleGetItem(node, manager); } - if (type == schema::PrimitiveType_If || type == schema::PrimitiveType_While) { + if (CheckPrimitiveType(node, prim::kPrimIf) || CheckPrimitiveType(node, prim::kPrimWhile)) { auto sub_func_graph = GetValueNode<FuncGraphPtr>(node->cast<CNodePtr>()->input(1)); if (sub_func_graph == nullptr) { lite::ReturnCode::GetSingleReturnCode()->UpdateReturnCode(lite::RET_NULL_PTR); diff --git a/mindspore/lite/tools/optimizer/graph/redundant_op_remove_pass.h b/mindspore/lite/tools/optimizer/graph/redundant_op_remove_pass.h index 8e7237bbe4..1bef786eaa 100644 --- a/mindspore/lite/tools/optimizer/graph/redundant_op_remove_pass.h +++ b/mindspore/lite/tools/optimizer/graph/redundant_op_remove_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/optimizer/graph/slice_prepose_pass.cc b/mindspore/lite/tools/optimizer/graph/slice_prepose_pass.cc index 0c0329c72f..5543b7ace4 100644 --- a/mindspore/lite/tools/optimizer/graph/slice_prepose_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/slice_prepose_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,20 +18,52 @@ #include <memory> #include <set> #include <algorithm> -#include "mindspore/lite/include/errorcode.h" +#include "ops/fusion/full_connection.h" +#include "ops/reshape.h" +#include "ops/fusion/slice_fusion.h" +#include "ops/softmax.h" +#include "ops/op_utils.h" +#include "include/errorcode.h" #include "tools/optimizer/common/gllo_utils.h" #include "backend/optimizer/common/helper.h" -#include "src/ops/primitive_c.h" -#include "schema/inner/model_generated.h" #include "src/common/log_adapter.h" -using mindspore::lite::PrimitiveC; namespace mindspore::opt { namespace { const int kArithmeticInputNum = 2; -std::vector<int32_t> GetCNodeInputShape(const CNodePtr &cnode, size_t index = 1) { +const int SliceBeginIndex = 2; +const int SliceSizeIndex = 3; +int node_name_index = 0; +std::vector<int> GetSliceBeginAndSize(const CNodePtr &cnode, const int index) { MS_ASSERT(cnode != nullptr); - std::vector<int32_t> empty_shape; + std::vector<int> content; + if (index != SliceBeginIndex && index != SliceSizeIndex && cnode->size() != 4) { + return content; + } + auto node = cnode->input(index); + if (node == nullptr) { + return content; + } + auto paramter_node = node->cast<ParameterPtr>(); + if (paramter_node == nullptr || !paramter_node->has_default() || paramter_node->default_param() == nullptr) { + return content; + } + auto paramter_value = paramter_node->default_param()->cast<ParamValueLitePtr>(); + if (paramter_value == nullptr) { + return content; + } + content.resize(paramter_value->tensor_shape_size()); + if (memcpy_s(content.data(), paramter_value->tensor_shape_size(), paramter_value->tensor_addr(), + paramter_value->tensor_shape_size()) != EOK) { + MS_LOG(ERROR) << "memcpy data failed."; + return {}; + } + return content; +} + +std::vector<int64_t> GetCNodeInputShape(const CNodePtr &cnode, size_t index = 1) { + MS_ASSERT(cnode != nullptr); + std::vector<int64_t> empty_shape; if (index < 1 || cnode->inputs().size() <= index) { MS_LOG(ERROR) << "out of index"; return empty_shape; @@ -46,33 +78,28 @@ std::vector<int32_t> GetCNodeInputShape(const CNodePtr &cnode, size_t index = 1) return empty_shape; } auto abstract_tensor = utils::cast<abstract::AbstractTensorPtr>(abstract); - if (!utils::isa<ParamValueLitePtr>(abstract_tensor->GetValueTrack())) { - MS_LOG(DEBUG) << "Value of abstract is not ParamValueLite, indicate that infershape has failed"; - return empty_shape; - } - auto param_value_lite = utils::cast<ParamValueLitePtr>(abstract_tensor->GetValueTrack()); - if (param_value_lite == nullptr) { - MS_LOG(ERROR) << "ParamValueLite of abstract is nullptr"; - return empty_shape; - } - return param_value_lite->tensor_shape(); + MS_ASSERT(abstract_tensor != nullptr && abstract_tensor->shape() != nullptr); + return abstract_tensor->shape()->shape(); } -std::vector<int32_t> GetDefaultParamShape(const ParameterPtr &param) { +std::vector<int64_t> GetDefaultParamShape(const ParameterPtr &param) { MS_ASSERT(param != nullptr); MS_ASSERT(param->has_default()); - std::vector<int32_t> shape; + std::vector<int64_t> shape_vector; auto default_param = param->default_param(); if (default_param == nullptr) { MS_LOG(ERROR) << "default_param is nullptr"; - return shape; + return shape_vector; } if (!utils::isa<ParamValueLitePtr>(default_param)) { MS_LOG(ERROR) << "default_param is not ParamValueLite"; - return shape; + return shape_vector; } auto param_value_lite = utils::cast<ParamValueLitePtr>(default_param); - return param_value_lite->tensor_shape(); + auto shape = param_value_lite->tensor_shape(); + std::transform(shape.begin(), shape.end(), std::back_inserter(shape_vector), + [](const int val) { return static_cast<int64_t>(val); }); + return shape_vector; } bool IsScalarNode(const AnfNodePtr &nodePtr) { @@ -86,64 +113,61 @@ bool IsScalarNode(const AnfNodePtr &nodePtr) { return false; } -schema::SliceT *GetSliceT(const CNodePtr &cnode) { +std::shared_ptr<mindspore::ops::SliceFusion> GetSlice(const CNodePtr &cnode) { if (cnode == nullptr) { return nullptr; } - auto primc = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - if (primc == nullptr) { - return nullptr; - } - auto primt = primc->primitiveT(); - if (primt == nullptr || primt->value.AsSlice() == nullptr) { - return nullptr; - } - return primt->value.AsSlice(); + return GetValueNode<std::shared_ptr<mindspore::ops::SliceFusion>>(cnode->input(0)); } -schema::SoftMaxT *GetSoftmaxT(const CNodePtr &cnode) { +std::shared_ptr<mindspore::ops::Softmax> GetSoftmax(const CNodePtr &cnode) { if (cnode == nullptr) { return nullptr; } - auto primc = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - if (primc == nullptr) { - return nullptr; - } - auto primt = primc->primitiveT(); - if (primt == nullptr || primt->value.AsSoftMax() == nullptr) { - return nullptr; - } - return primt->value.AsSoftMax(); + return GetValueNode<std::shared_ptr<mindspore::ops::Softmax>>(cnode->input(0)); } -schema::ReshapeT *GetReshapeT(const CNodePtr &cnode) { +std::shared_ptr<mindspore::ops::Reshape> GetReshape(const CNodePtr &cnode) { if (cnode == nullptr) { return nullptr; } - auto primc = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - if (primc == nullptr) { - return nullptr; - } - auto primt = primc->primitiveT(); - if (primt == nullptr || primt->value.AsReshape() == nullptr) { - return nullptr; - } - return primt->value.AsReshape(); + return GetValueNode<std::shared_ptr<mindspore::ops::Reshape>>(cnode->input(0)); } -schema::FullConnectionT *GetFcT(const CNodePtr &cnode) { +std::shared_ptr<mindspore::ops::FullConnection> GetFc(const CNodePtr &cnode) { if (cnode == nullptr) { return nullptr; } - auto primc = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - if (primc == nullptr) { - return nullptr; + return GetValueNode<std::shared_ptr<mindspore::ops::FullConnection>>(cnode->input(0)); +} + +std::vector<int> GetTransposePerm(const CNodePtr &node) { + MS_ASSERT(node != nullptr); + std::vector<int> perm; + if (!CheckPrimitiveType(node, prim::kPrimTranspose)) { + return perm; } - auto primt = primc->primitiveT(); - if (primt == nullptr || primt->value.AsFullConnection() == nullptr) { - return nullptr; + if (node->inputs().size() != 3) { + return perm; + } + auto perm_node = node->input(2); + if (!utils::isa<ParameterPtr>(perm_node)) { + return perm; } - return primt->value.AsFullConnection(); + auto perm_param = perm_node->cast<ParameterPtr>(); + if (!perm_param->has_default() || perm_param->default_param() == nullptr) { + return perm; + } + auto perm_value = perm_param->default_param()->cast<ParamValueLitePtr>(); + if (perm_value == nullptr) { + return perm; + } + perm.resize(perm_value->tensor_shape()[0]); + if (memcpy_s(perm.data(), perm_value->tensor_size(), perm_value->tensor_addr(), perm_value->tensor_size()) != EOK) { + MS_LOG(ERROR) << "memcpy failed."; + return {}; + } + return perm; } } // namespace @@ -164,11 +188,11 @@ STATUS SlicePreposePass::SwapSliceWithPreceed(const FuncGraphPtr &graph, const C MS_ASSERT(slice_cnode != nullptr); MS_ASSERT(preceed_cnode != nullptr); if (slice_cnode->input(1) != preceed_cnode) { - MS_LOG(ERROR) << "preceed node must be slice node's direct parent"; + MS_LOG(ERROR) << "proceed node must be slice node's direct parent"; return RET_ERROR; } if (IsMultiOutputTensors(graph, preceed_cnode)) { - MS_LOG(ERROR) << "preceed node referenced by multi nodes not support swap"; + MS_LOG(ERROR) << "proceed node referenced by multi nodes not support swap"; return RET_ERROR; } auto manager = graph->manager(); @@ -193,87 +217,60 @@ STATUS SlicePreposePass::SwapSliceWithPreceed(const FuncGraphPtr &graph, const C return RET_OK; } -ValueNodePtr SlicePreposePass::CreateSliceValueNode(const FuncGraphPtr &graph, const std::vector<int32_t> &axes, - const std::vector<int32_t> &begin, - const std::vector<int32_t> &size) { +ValueNodePtr SlicePreposePass::CreateSliceValueNode(const FuncGraphPtr &graph, const std::vector<int64_t> &axes) { MS_ASSERT(graph != nullptr); MS_ASSERT(slice_cnode != nullptr); - std::unique_ptr<schema::SliceT> attr = std::make_unique<schema::SliceT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new SliceT failed"; - return nullptr; - } - attr->axes = axes; - attr->begin = begin; - attr->size = size; - auto new_primitive_t = std::make_unique<schema::PrimitiveT>(); - if (new_primitive_t == nullptr) { - MS_LOG(ERROR) << "primitive_t is nullptr"; - return nullptr; - } - new_primitive_t->value.type = schema::PrimitiveType_Slice; - new_primitive_t->value.value = attr.release(); - auto new_primtive_c = std::shared_ptr<PrimitiveC>(PrimitiveC::Create(new_primitive_t.release())); - if (new_primtive_c == nullptr) { - MS_LOG(ERROR) << "primitive_c is nullptr"; - return nullptr; - } - ValueNodePtr value_node = NewValueNode(new_primtive_c); + auto new_slice = std::make_shared<mindspore::ops::SliceFusion>(); + new_slice->set_axes(axes); + ValueNodePtr value_node = NewValueNode(new_slice); return value_node; } ValueNodePtr SlicePreposePass::CopySliceValueNode(const FuncGraphPtr &graph, const CNodePtr &slice_cnode) { MS_ASSERT(graph != nullptr); MS_ASSERT(slice_cnode != nullptr); - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(slice_cnode->input(0)); - if (primitive_c == nullptr) { - MS_LOG(ERROR) << "primitive_c is nullptr"; - return nullptr; - } - auto primitive_t = primitive_c->primitiveT(); - auto new_primitive_t = std::make_unique<schema::PrimitiveT>(); - if (new_primitive_t == nullptr) { - MS_LOG(ERROR) << "primitive_t is nullptr"; - return nullptr; - } - *new_primitive_t = *primitive_t; - auto new_primitive_c = std::make_shared<PrimitiveC>(new_primitive_t.release()); - if (new_primitive_c == nullptr) { - MS_LOG(ERROR) << "primitive_c is nullptr"; + auto slice_c = GetValueNode<std::shared_ptr<mindspore::ops::SliceFusion>>(slice_cnode->input(0)); + if (slice_c == nullptr) { + MS_LOG(ERROR) << "slice node is nullptr"; return nullptr; } - ValueNodePtr value_node = NewValueNode(new_primitive_c); + auto new_slice_c = std::make_shared<mindspore::ops::SliceFusion>(); + new_slice_c->set_axes(slice_c->get_axes()); + ValueNodePtr value_node = NewValueNode(new_slice_c); return value_node; } -CNodePtr SlicePreposePass::InsertSlice(const FuncGraphPtr &graph, const ValueNodePtr &slice_vnode, +CNodePtr SlicePreposePass::InsertSlice(const FuncGraphPtr &graph, const std::vector<AnfNodePtr> &inputs, const CNodePtr &preceed_cnode, const int index, const TransactionPtr &tr) { MS_ASSERT(graph != nullptr); MS_ASSERT(slice_cnode != nullptr); MS_ASSERT(preceed_cnode != nullptr); - auto slice_cnode = graph->NewCNode({slice_vnode, preceed_cnode->input(index)}); + auto slice_cnode = graph->NewCNode(inputs); + slice_cnode->set_fullname_with_scope(preceed_cnode->fullname_with_scope() + "_slice_" + + std::to_string(node_name_index)); + node_name_index += 1; tr->SetEdge(preceed_cnode, index, slice_cnode); return slice_cnode; } STATUS SlicePreposePass::VerifySliceAttrs(const CNodePtr &slice_cnode, const int dim) { // according to ops/slice.cc, axes >= 0, begin >= 0, size >= -1 - schema::SliceT *slice_t = GetSliceT(slice_cnode); - if (slice_t == nullptr) { - MS_LOG(ERROR) << "SliceT* is nullptr"; + auto slice = GetSlice(slice_cnode); + if (slice == nullptr) { + MS_LOG(ERROR) << "Slice is nullptr"; return RET_ERROR; } - auto &axes = slice_t->axes; - auto &begin = slice_t->begin; - auto &size = slice_t->size; + auto axes = slice->get_axes(); + auto begin = GetSliceBeginAndSize(slice_cnode, SliceBeginIndex); + auto size = GetSliceBeginAndSize(slice_cnode, SliceSizeIndex); - std::set<int32_t> unique_axes(axes.begin(), axes.end()); + std::set<int64_t> unique_axes(axes.begin(), axes.end()); if (axes.empty() || unique_axes.size() != axes.size()) { MS_LOG(DEBUG) << "Invalid slice axe attribute"; return RET_ERROR; } for (size_t i = 0; i < axes.size(); ++i) { - int axe = axes[i]; + auto axe = axes[i]; if (dim > -1 && axe >= dim) { MS_LOG(ERROR) << "Invalid slice axe attribute"; return RET_ERROR; @@ -297,19 +294,19 @@ STATUS SlicePreposePass::VerifySliceAttrs(const CNodePtr &slice_cnode, const int /* * Adjust slice's attr when broadcast happened in Arithmetic */ -STATUS SlicePreposePass::SliceParamDeBroadcast(const CNodePtr &slice_cnode, const std::vector<int32_t> &ref_shape, - std::vector<int32_t> *axes, std::vector<int32_t> *begin, - std::vector<int32_t> *size) { +STATUS SlicePreposePass::SliceParamDeBroadcast(const CNodePtr &slice_cnode, const std::vector<int64_t> &ref_shape, + std::vector<int64_t> *axes, std::vector<int> *begin, + std::vector<int> *size) { MS_ASSERT(slice_cnode != nullptr); MS_ASSERT(new_slice_cnode != nullptr); - auto slice_t = GetSliceT(slice_cnode); - if (slice_t == nullptr) { - MS_LOG(ERROR) << "slice_t is nullptr"; + auto slice = GetSlice(slice_cnode); + if (slice == nullptr) { + MS_LOG(ERROR) << "slice is nullptr"; return RET_ERROR; } - auto origin_axes = slice_t->axes; - auto origin_begin = slice_t->begin; - auto origin_size = slice_t->size; + auto origin_axes = slice->get_axes(); + auto origin_begin = GetSliceBeginAndSize(slice_cnode, SliceBeginIndex); + auto origin_size = GetSliceBeginAndSize(slice_cnode, SliceSizeIndex); auto status = VerifySliceAttrs(slice_cnode, ref_shape.size()); if (status != RET_OK) { return status; @@ -348,70 +345,71 @@ STATUS SlicePreposePass::SliceParamDeBroadcast(const CNodePtr &slice_cnode, cons } } -CNodePtr SlicePreposePass::CreateReshapeCNode(const FuncGraphPtr &graph, const std::vector<int64_t> &shape, +CNodePtr SlicePreposePass::CreateReshapeCNode(const FuncGraphPtr &graph, const std::vector<int64_t> &shape_vector, const AbstractBasePtr &abstract, const CNodePtr &preceed_cnode) { MS_ASSERT(graph != nullptr); MS_ASSERT(slice_cnode != nullptr); - std::unique_ptr<schema::ReshapeT> attr = std::make_unique<schema::ReshapeT>(); - if (attr == nullptr) { - MS_LOG(ERROR) << "new SliceT failed"; - return nullptr; - } - attr->shape = shape; - auto new_primitive_t = std::make_unique<schema::PrimitiveT>(); - if (new_primitive_t == nullptr) { - MS_LOG(ERROR) << "primitive_t is nullptr"; - return nullptr; - } - new_primitive_t->value.type = schema::PrimitiveType_Reshape; - new_primitive_t->value.value = attr.release(); - auto new_primtive_c = std::shared_ptr<PrimitiveC>(PrimitiveC::Create(new_primitive_t.release())); - if (new_primtive_c == nullptr) { + auto new_reshape = std::make_shared<mindspore::ops::Reshape>(); + if (new_reshape == nullptr) { MS_LOG(ERROR) << "primitive_c is nullptr"; return nullptr; } - ValueNodePtr value_node = NewValueNode(new_primtive_c); + ValueNodePtr value_node = NewValueNode(new_reshape); if (value_node == nullptr) { return nullptr; } - auto reshape_cnode = graph->NewCNode({value_node, preceed_cnode}); + std::vector<int> shape; + std::transform(shape_vector.begin(), shape_vector.end(), std::back_inserter(shape), + [](int64_t val) { return static_cast<int>(val); }); + auto shape_node = BuildIntVecParameterNode( + graph, shape, preceed_cnode->fullname_with_scope() + "_shape_" + std::to_string(node_name_index)); + node_name_index++; + if (shape_node == nullptr) { + MS_LOG(ERROR) << "build parameter node failed."; + return nullptr; + } + auto reshape_cnode = graph->NewCNode({value_node, preceed_cnode, shape_node}); reshape_cnode->set_abstract(abstract); + reshape_cnode->set_fullname_with_scope(preceed_cnode->fullname_with_scope() + "_reshape_" + + std::to_string(node_name_index)); + node_name_index++; ClearCNodeAbstractValue(reshape_cnode); return reshape_cnode; } bool SlicePreposePass::SiblingsAreSameSlice(const FuncGraphPtr &graph, const NodeUsedListPtr &output_node_list, - const std::vector<int32_t> &ref_shape) { + const std::vector<int64_t> &ref_shape) { MS_ASSERT(graph != nullptr); MS_ASSERT(output_node_list != nullptr); MS_ASSERT(output_node_list->size() >= 2); - std::vector<schema::SliceT *> slices; + std::vector<CNodePtr> slices; for (auto &output_node : *(output_node_list.get())) { auto cnode = output_node.first->cast<CNodePtr>(); if (cnode == nullptr) { MS_LOG(ERROR) << "cnode is nullptr"; return false; } - if (GetCNodeType(cnode) != schema::PrimitiveType_Slice) { + if (!CheckPrimitiveType(cnode, prim::kPrimSliceFusion)) { return false; } - schema::SliceT *slice_t = GetSliceT(cnode); - if (slice_t == nullptr) { - MS_LOG(ERROR) << "SliceT* is nullptr"; + auto slice_node = GetSlice(cnode); + if (slice_node == nullptr) { + MS_LOG(ERROR) << "Slice is nullptr"; return false; } - slices.push_back(slice_t); + slices.push_back(cnode); } - auto first_slice_t = slices.front(); - auto first_axes = first_slice_t->axes; - auto first_begin = first_slice_t->begin; - auto first_size = first_slice_t->size; + auto first_slice_cnode = slices.front(); + auto first_slice_node = GetSlice(first_slice_cnode); + auto first_axes = first_slice_node->get_axes(); + auto first_begin = GetSliceBeginAndSize(first_slice_cnode, SliceBeginIndex); + auto first_size = GetSliceBeginAndSize(first_slice_cnode, SliceSizeIndex); for (size_t i = 1; i < output_node_list->size(); ++i) { - auto slice_t = slices[i]; - auto axes = slice_t->axes; - auto begin = slice_t->begin; - auto size = slice_t->size; + auto slice = GetSlice(slices[i]); + auto axes = slice->get_axes(); + auto begin = GetSliceBeginAndSize(slices[i], SliceBeginIndex); + auto size = GetSliceBeginAndSize(slices[i], SliceSizeIndex); if (axes.size() != first_axes.size()) { return false; } @@ -447,15 +445,16 @@ bool SlicePreposePass::SiblingsAreSameSlice(const FuncGraphPtr &graph, const Nod return true; } -int SlicePreposePass::GetReshapeAbnormalAxeIn(const std::vector<int> &shape_in, const std::vector<int> &shape_out, - std::vector<int> *mapped_axe) { +int64_t SlicePreposePass::GetReshapeAbnormalAxeIn(const std::vector<int64_t> &shape_in, + const std::vector<int64_t> &shape_out, + std::vector<int64_t> *mapped_axe) { // find shape_out's correspond axe in shape_in // when there are such as 3x1x1x4 => 3x1x4, mapped_axe[1] == 2 - int32_t inner_size_in = 1; - int abnormal_axe_in = -1; + int64_t inner_size_in = 1; + int64_t abnormal_axe_in = -1; for (size_t i = 0; i < shape_in.size(); ++i) { inner_size_in *= shape_in[i]; - int32_t inner_size_out = 1; + int64_t inner_size_out = 1; size_t j; for (j = 0; j < shape_out.size(); ++j) { inner_size_out *= shape_out[j]; @@ -471,23 +470,25 @@ int SlicePreposePass::GetReshapeAbnormalAxeIn(const std::vector<int> &shape_in, return abnormal_axe_in; } -int SlicePreposePass::GetReshapeAbnormalIndexOut(const CNodePtr &slice_cnode, const std::vector<int> &mapped_axe, - const std::vector<int> &shape_out, std::vector<int> *shape_out_copy, - bool *is_normal_mode, bool *support_abnormal_mode) { +int64_t SlicePreposePass::GetReshapeAbnormalIndexOut(const CNodePtr &slice_cnode, + const std::vector<int64_t> &mapped_axe, + const std::vector<int64_t> &shape_out, + std::vector<int64_t> *shape_out_copy, bool *is_normal_mode, + bool *support_abnormal_mode) { MS_ASSERT(slice_cnode != nullptr); - auto slice_t = GetSliceT(slice_cnode); - if (slice_t == nullptr) { - MS_LOG(ERROR) << "slice_t is nullptr"; + auto slice_node = GetSlice(slice_cnode); + if (slice_node == nullptr) { + MS_LOG(ERROR) << "slice is nullptr"; return false; } - auto slice_axes = slice_t->axes; - auto slice_begin = slice_t->begin; - auto slice_size = slice_t->size; - int abnormal_index_out = -1; + auto slice_axes = slice_node->get_axes(); + auto slice_begin = GetSliceBeginAndSize(slice_cnode, SliceBeginIndex); + auto slice_size = GetSliceBeginAndSize(slice_cnode, SliceSizeIndex); + int64_t abnormal_index_out = -1; for (size_t j = 0; j < shape_out.size(); ++j) { int index = -1; for (size_t i = 0; i < slice_axes.size(); ++i) { - if (slice_axes[i] == static_cast<int>(j)) { + if (slice_axes[i] == static_cast<int64_t>(j)) { index = i; break; } @@ -502,7 +503,8 @@ int SlicePreposePass::GetReshapeAbnormalIndexOut(const CNodePtr &slice_cnode, co *support_abnormal_mode = false; } } else { // if there is matched axe sliced, not support abnormal mode - shape_out_copy->at(j) = (slice_size[index] == -1 ? shape_out[j] - slice_begin[index] : slice_size[index]); + shape_out_copy->at(j) = + (slice_size[index] == -1 ? shape_out[j] - slice_begin[index] : static_cast<int64_t>(slice_size[index])); *support_abnormal_mode = false; } } @@ -511,24 +513,24 @@ int SlicePreposePass::GetReshapeAbnormalIndexOut(const CNodePtr &slice_cnode, co } bool SlicePreposePass::PreposeWithNormalReshape(const FuncGraphPtr &graph, const CNodePtr &slice_cnode, - const CNodePtr &reshape_cnode, const std::vector<int> &shape_in, - const std::vector<int> &shape_out_copy, - const std::vector<int> &mapped_axe) { + const CNodePtr &reshape_cnode, const std::vector<int64_t> &shape_in, + const std::vector<int64_t> &shape_out_copy, + const std::vector<int64_t> &mapped_axe) { MS_ASSERT(graph != nullptr); MS_ASSERT(slice_cnode != nullptr); MS_ASSERT(reshape_cnode != nullptr); - auto slice_t = GetSliceT(slice_cnode); - if (slice_t == nullptr) { - MS_LOG(ERROR) << "slice_t is nullptr"; + auto slice_node = GetSlice(slice_cnode); + if (slice_node == nullptr) { + MS_LOG(ERROR) << "slice is nullptr"; return false; } - auto slice_axes = slice_t->axes; - auto slice_begin = slice_t->begin; - auto slice_size = slice_t->size; - std::vector<int32_t> new_axes(shape_in.size()); + auto slice_axes = slice_node->get_axes(); + auto slice_begin = GetSliceBeginAndSize(slice_cnode, SliceBeginIndex); + auto slice_size = GetSliceBeginAndSize(slice_cnode, SliceSizeIndex); + std::vector<int64_t> new_axes(shape_in.size()); std::iota(new_axes.begin(), new_axes.end(), 0); - std::vector<int32_t> new_begin(shape_in.size(), 0); - std::vector<int32_t> new_size(shape_in.size(), -1); + std::vector<int> new_begin(shape_in.size(), 0); + std::vector<int> new_size(shape_in.size(), -1); for (size_t i = 0; i < mapped_axe.size(); ++i) { auto axe_in = mapped_axe[i]; @@ -539,22 +541,30 @@ bool SlicePreposePass::PreposeWithNormalReshape(const FuncGraphPtr &graph, const new_size[axe_in] = slice_size[i]; } - auto reshape_t = GetReshapeT(reshape_cnode); - if (reshape_t == nullptr) { - MS_LOG(ERROR) << "reshape_t is nullptr"; + auto reshape_node = GetReshape(reshape_cnode); + if (reshape_node == nullptr) { + MS_LOG(ERROR) << "reshape is nullptr"; return false; } - reshape_t->shape = std::vector<int64_t>(shape_out_copy.begin(), shape_out_copy.end()); - auto reshape_origin_inputs = reshape_cnode->inputs(); - if (reshape_origin_inputs.size() < 2) { - MS_LOG(ERROR) << "Reshape inputs num is illegal"; + std::vector<int> new_shape_out_copy; + std::transform(shape_out_copy.begin(), shape_out_copy.end(), std::back_inserter(new_shape_out_copy), + [](int64_t val) { return static_cast<int>(val); }); + auto shape_node = BuildIntVecParameterNode( + graph, new_shape_out_copy, reshape_cnode->fullname_with_scope() + "_shape_" + std::to_string(node_name_index)); + node_name_index++; + if (shape_node == nullptr) { + MS_LOG(ERROR) << "build parameter node failed."; return false; } - reshape_cnode->set_inputs({reshape_origin_inputs[0], reshape_origin_inputs[1]}); + reshape_cnode->set_inputs({reshape_cnode->input(0), reshape_cnode->input(1), shape_node}); - slice_t->axes = new_axes; - slice_t->begin = new_begin; - slice_t->size = new_size; + slice_node->set_axes(new_axes); + auto new_begin_parameter = BuildIntVecParameterNode( + graph, new_begin, slice_cnode->input(SliceBeginIndex)->cast<ParameterPtr>()->fullname_with_scope()); + auto new_size_parameter = BuildIntVecParameterNode( + graph, new_size, slice_cnode->input(SliceSizeIndex)->cast<ParameterPtr>()->fullname_with_scope()); + slice_cnode->set_input(SliceBeginIndex, new_begin_parameter); + slice_cnode->set_input(SliceSizeIndex, new_size_parameter); auto status = SwapSliceWithPreceed(graph, slice_cnode, reshape_cnode, 1); if (status != RET_OK) { return false; @@ -565,28 +575,38 @@ bool SlicePreposePass::PreposeWithNormalReshape(const FuncGraphPtr &graph, const } CNodePtr SlicePreposePass::CreateSlice1ForReshapePrepose(const FuncGraphPtr &graph, const CNodePtr &slice_cnode, - const CNodePtr &matmul_cnode, const std::vector<int> &shape_in, - const int abnormal_axe_in, const int count_sliced_axe_in, - const bool slice_at_front) { + const CNodePtr &matmul_cnode, + const std::vector<int64_t> &shape_in, + const int64_t abnormal_axe_in, + const int64_t count_sliced_axe_in, const bool slice_at_front) { MS_ASSERT(graph != nullptr); MS_ASSERT(slice_cnode != nullptr); MS_ASSERT(matmul_cnode != nullptr); - std::vector<int32_t> new_axes1(shape_in.size()); + std::vector<int64_t> new_axes1(shape_in.size()); std::iota(new_axes1.begin(), new_axes1.end(), 0); - std::vector<int32_t> new_begin1(shape_in.size(), 0); - std::vector<int32_t> new_size1(shape_in.size(), -1); + std::vector<int> new_begin1(shape_in.size(), 0); + std::vector<int> new_size1(shape_in.size(), -1); if (slice_at_front) { - new_begin1[abnormal_axe_in] = count_sliced_axe_in; + new_begin1[abnormal_axe_in] = static_cast<int>(count_sliced_axe_in); } else { - new_size1[abnormal_axe_in] = shape_in[abnormal_axe_in] - count_sliced_axe_in; + new_size1[abnormal_axe_in] = static_cast<int>(shape_in[abnormal_axe_in] - count_sliced_axe_in); } - auto new_slice1 = CreateSliceValueNode(graph, new_axes1, new_begin1, new_size1); + auto new_slice1 = CreateSliceValueNode(graph, new_axes1); if (new_slice1 == nullptr) { MS_LOG(ERROR) << "CreateSliceValueNode failed"; return nullptr; } - auto new_slice1_cnode = graph->NewCNode({new_slice1, matmul_cnode}); + auto begin_parameter = BuildIntVecParameterNode( + graph, new_begin1, slice_cnode->fullname_with_scope() + "_begin_" + std::to_string(node_name_index)); + node_name_index += 1; + auto size_parameter = BuildIntVecParameterNode( + graph, new_size1, slice_cnode->fullname_with_scope() + "_size_" + std::to_string(node_name_index)); + node_name_index += 1; + auto new_slice1_cnode = graph->NewCNode({new_slice1, matmul_cnode, begin_parameter, size_parameter}); new_slice1_cnode->set_abstract(slice_cnode->abstract()->Clone()); + new_slice1_cnode->set_fullname_with_scope(slice_cnode->fullname_with_scope() + "_slice_" + + std::to_string(node_name_index)); + node_name_index++; ClearCNodeAbstractValue(new_slice1_cnode); return new_slice1_cnode; } @@ -594,55 +614,66 @@ CNodePtr SlicePreposePass::CreateSlice1ForReshapePrepose(const FuncGraphPtr &gra CNodePtr SlicePreposePass::CreateSlice2ForReshapePrepose(const FuncGraphPtr &graph, const CNodePtr &slice_cnode, const CNodePtr &new_reshape1_cnode, const std::vector<int64_t> &new_shape1, - const int abnormal_axe_in, const int count_sliced_axe_in, - const int count_sliced2, const bool slice_at_front) { + const int64_t abnormal_axe_in, + const int64_t count_sliced_axe_in, const int64_t count_sliced2, + const bool slice_at_front) { MS_ASSERT(graph != nullptr); MS_ASSERT(slice_cnode != nullptr); MS_ASSERT(matmul_cnode != nullptr); - std::vector<int32_t> new_axes2(abnormal_axe_in + 1); + std::vector<int64_t> new_axes2(abnormal_axe_in + 1); std::iota(new_axes2.begin(), new_axes2.end(), 0); - std::vector<int32_t> new_begin2(abnormal_axe_in + 1, 0); - std::vector<int32_t> new_size2(abnormal_axe_in + 1, -1); + std::vector<int> new_begin2(abnormal_axe_in + 1, 0); + std::vector<int> new_size2(abnormal_axe_in + 1, -1); if (count_sliced2 > new_shape1[abnormal_axe_in]) { MS_LOG(WARNING) << "calculation error"; return nullptr; } if (slice_at_front) { - new_begin2[abnormal_axe_in] = new_shape1[abnormal_axe_in] - count_sliced2; + new_begin2[abnormal_axe_in] = static_cast<int>(new_shape1[abnormal_axe_in] - count_sliced2); } else { - new_size2[abnormal_axe_in] = count_sliced2; + new_size2[abnormal_axe_in] = static_cast<int>(count_sliced2); } - auto new_slice2 = CreateSliceValueNode(graph, new_axes2, new_begin2, new_size2); + auto new_slice2 = CreateSliceValueNode(graph, new_axes2); if (new_slice2 == nullptr) { MS_LOG(ERROR) << "CreateSliceValueNode failed"; return nullptr; } - auto new_slice2_cnode = graph->NewCNode({new_slice2, new_reshape1_cnode}); + auto begin_parameter = BuildIntVecParameterNode( + graph, new_begin2, slice_cnode->fullname_with_scope() + "_begin_" + std::to_string(node_name_index)); + node_name_index += 1; + auto size_parameter = BuildIntVecParameterNode( + graph, new_size2, slice_cnode->fullname_with_scope() + "_size_" + std::to_string(node_name_index)); + node_name_index += 1; + auto new_slice2_cnode = graph->NewCNode({new_slice2, new_reshape1_cnode, begin_parameter, size_parameter}); new_slice2_cnode->set_abstract(slice_cnode->abstract()->Clone()); + new_slice2_cnode->set_fullname_with_scope(slice_cnode->fullname_with_scope() + "_slice_" + + std::to_string(node_name_index)); + node_name_index++; ClearCNodeAbstractValue(new_slice2_cnode); return new_slice2_cnode; } bool SlicePreposePass::PreposeWithAbnormalReshape(const FuncGraphPtr &graph, const CNodePtr &slice_cnode, const CNodePtr &reshape_cnode, const CNodePtr &matmul_cnode, - const std::vector<int> &shape_in, const std::vector<int> &shape_out, - const int abnormal_axe_in, const int abnormal_index_out) { + const std::vector<int64_t> &shape_in, + const std::vector<int64_t> &shape_out, const int64_t abnormal_axe_in, + const int64_t abnormal_index_out) { MS_ASSERT(graph != nullptr); MS_ASSERT(slice_cnode != nullptr); MS_ASSERT(reshape_cnode != nullptr); auto manager = graph->manager(); - auto slice_t = GetSliceT(slice_cnode); - if (slice_t == nullptr) { - MS_LOG(ERROR) << "slice_t is nullptr"; + auto slice_node = GetSlice(slice_cnode); + if (slice_node == nullptr) { + MS_LOG(ERROR) << "slice is nullptr"; return false; } - auto slice_axes = slice_t->axes; - auto slice_begin = slice_t->begin; - auto slice_size = slice_t->size; + auto slice_axes = slice_node->get_axes(); + auto slice_begin = GetSliceBeginAndSize(slice_cnode, SliceBeginIndex); + auto slice_size = GetSliceBeginAndSize(slice_cnode, SliceSizeIndex); auto abnormal_axe_out = slice_axes[abnormal_index_out]; MS_ASSERT(abnormal_axe_out + 1 < shape_out.size()); - int inter_size_in = 1; - int inter_size_out = 1; + int64_t inter_size_in = 1; + int64_t inter_size_out = 1; for (auto i = 0; i < abnormal_axe_in; ++i) { inter_size_in *= shape_in[i]; } @@ -653,24 +684,24 @@ bool SlicePreposePass::PreposeWithAbnormalReshape(const FuncGraphPtr &graph, con MS_LOG(DEBUG) << "not support prepose now"; return false; } - int outer_size_in = 1; - int outer_size_out = 1; + int64_t outer_size_in = 1; + int64_t outer_size_out = 1; for (auto i = abnormal_axe_in + 1; i < static_cast<int>(shape_in.size()); ++i) { outer_size_in *= shape_in[i]; } for (auto i = abnormal_axe_out + 1; i < static_cast<int>(shape_out.size()); ++i) { outer_size_out *= shape_out[i]; } - const int count_sliced_axe_front = slice_begin[abnormal_index_out]; - const int count_sliced_axe_rear = + const int64_t count_sliced_axe_front = slice_begin[abnormal_index_out]; + const int64_t count_sliced_axe_rear = slice_size[abnormal_index_out] == -1 ? 0 : (shape_out[abnormal_axe_out] - slice_size[abnormal_index_out]); if (count_sliced_axe_front * count_sliced_axe_rear > 0) { MS_LOG(DEBUG) << "not border slice at abnormal axe, prepose with reshape failed"; return false; } bool slice_at_front = count_sliced_axe_front > 0; - const int count_sliced_out = (count_sliced_axe_front + count_sliced_axe_rear) * outer_size_out; - const int count_sliced_axe_in = count_sliced_out / outer_size_in; + const int64_t count_sliced_out = (count_sliced_axe_front + count_sliced_axe_rear) * outer_size_out; + const int64_t count_sliced_axe_in = count_sliced_out / outer_size_in; if (count_sliced_axe_in <= 0 || count_sliced_axe_in > shape_in[abnormal_axe_in]) { MS_LOG(DEBUG) << "amount of sliced out tensor is illegal"; return false; @@ -692,8 +723,9 @@ bool SlicePreposePass::PreposeWithAbnormalReshape(const FuncGraphPtr &graph, con return false; } // new_slice2 - const int count_sliced_abnormal_axe = shape_out[abnormal_axe_out] - (count_sliced_axe_front + count_sliced_axe_rear); - const int count_sliced2 = count_sliced_abnormal_axe * outer_size_out; + const int64_t count_sliced_abnormal_axe = + shape_out[abnormal_axe_out] - (count_sliced_axe_front + count_sliced_axe_rear); + const int64_t count_sliced2 = count_sliced_abnormal_axe * outer_size_out; auto new_slice2_cnode = CreateSlice2ForReshapePrepose(graph, slice_cnode, new_reshape1_cnode, new_shape1, abnormal_axe_in, count_sliced_axe_in, count_sliced2, slice_at_front); @@ -716,13 +748,13 @@ bool SlicePreposePass::PreposeWithAbnormalReshape(const FuncGraphPtr &graph, con } bool SlicePreposePass::GetArithmeticInputInfo(const CNodePtr &arithmetic_cnode, std::vector<AnfNodePtr> *inputs, - std::vector<std::vector<int32_t>> *shapes, + std::vector<std::vector<int64_t>> *shapes, std::vector<bool> *is_default_params) { MS_ASSERT(arithmetic_cnode != nullptr); for (size_t i = 1; i < arithmetic_cnode->inputs().size(); ++i) { auto input = arithmetic_cnode->input(i); MS_ASSERT(input != nullptr); - std::vector<int32_t> shape; + std::vector<int64_t> shape; if (utils::isa<ParameterPtr>(input)) { auto parameter = utils::cast<ParameterPtr>(input); if (!parameter->has_default()) { // if one input is input placeholder, we can't change it @@ -754,30 +786,37 @@ bool SlicePreposePass::PreposeWithSoftmax(const FuncGraphPtr &graph, const CNode MS_ASSERT(graph != nullptr); MS_ASSERT(slice_cnode != nullptr); MS_ASSERT(softmax_cnode != nullptr); - auto softmax_t = GetSoftmaxT(softmax_cnode); - if (softmax_t == nullptr) { - MS_LOG(ERROR) << "softmax_t is nullptr"; + auto softmax_node = GetSoftmax(softmax_cnode); + if (softmax_node == nullptr) { + MS_LOG(ERROR) << "softmax is nullptr"; + return false; + } + std::vector<int64_t> softmax_axis{-1}; + if (softmax_node->GetAttr(ops::kAxis) != nullptr) { + softmax_axis = softmax_node->get_axis(); + } + if (softmax_axis.size() != 1) { + MS_LOG(ERROR) << "softmax axis is not a value, which don't support."; return false; } - auto softmax_axis = softmax_t->axis; auto shape = GetCNodeInputShape(softmax_cnode, 1); - if (softmax_axis == -1) { + if (softmax_axis.front() == -1) { if (shape.empty()) { // when softmax axis == -1, shape info is needed to determine whether slice can be preposed return false; } - softmax_axis += shape.size(); + softmax_axis[0] += shape.size(); } - auto slice_t = GetSliceT(slice_cnode); - if (slice_t == nullptr) { + auto slice_node = GetSlice(slice_cnode); + if (slice_node == nullptr) { return false; } - auto slice_axes = slice_t->axes; - auto slice_begin = slice_t->begin; - auto slice_size = slice_t->size; + auto slice_axes = slice_node->get_axes(); + auto slice_begin = GetSliceBeginAndSize(slice_cnode, SliceBeginIndex); + auto slice_size = GetSliceBeginAndSize(slice_cnode, SliceSizeIndex); for (size_t i = 0; i < slice_axes.size(); ++i) { - if (slice_axes[i] == softmax_axis) { + if (slice_axes[i] == softmax_axis.front()) { if (slice_begin[i] != 0) { return false; } @@ -829,12 +868,12 @@ bool SlicePreposePass::PreposeWithReshape(const FuncGraphPtr &graph, const CNode return false; } } - std::vector<int> mapped_axe(shape_out.size(), -1); - int abnormal_axe_in = GetReshapeAbnormalAxeIn(shape_in, shape_out, &mapped_axe); + std::vector<int64_t> mapped_axe(shape_out.size(), -1); + int64_t abnormal_axe_in = GetReshapeAbnormalAxeIn(shape_in, shape_out, &mapped_axe); bool is_normal_mode = true; // if all sliced axe can be found in input shape, normal bool support_abnormal_mode = true; // if first mismatch axe are sliced and no more other axes are sliced, abnormal - int abnormal_index_out = GetReshapeAbnormalIndexOut(slice_cnode, mapped_axe, shape_out, &shape_out_copy, - &is_normal_mode, &support_abnormal_mode); + int64_t abnormal_index_out = GetReshapeAbnormalIndexOut(slice_cnode, mapped_axe, shape_out, &shape_out_copy, + &is_normal_mode, &support_abnormal_mode); if (is_normal_mode) { return PreposeWithNormalReshape(graph, slice_cnode, reshape_cnode, shape_in, shape_out_copy, mapped_axe); } else if (support_abnormal_mode) { @@ -849,8 +888,8 @@ bool SlicePreposePass::PreposeWithReshape(const FuncGraphPtr &graph, const CNode MS_LOG(ERROR) << "matmul_cnode is nullptr"; return false; } - if (GetCNodeType(matmul_cnode) != schema::PrimitiveType_FullConnection && - GetCNodeType(matmul_cnode) != schema::PrimitiveType_MatMul) { + if (!CheckPrimitiveType(matmul_node, prim::kPrimFullConnection) && + !CheckPrimitiveType(matmul_node, prim::kPrimMatMul)) { MS_LOG(DEBUG) << "not matmul->reshape->slice pattern"; return false; } @@ -875,14 +914,14 @@ bool SlicePreposePass::PreposeWithMatmul(const FuncGraphPtr &graph, const CNodeP // if Matmul's output shape is unknown, can't do prepose, cause we can't determine last two axes return false; } - auto slice_t = GetSliceT(slice_cnode); - if (slice_t == nullptr) { - MS_LOG(ERROR) << "slice_t is nullptr"; + auto slice_node = GetSlice(slice_cnode); + if (slice_node == nullptr) { + MS_LOG(ERROR) << "slice is nullptr"; return RET_ERROR; } - auto axes = slice_t->axes; - auto begin = slice_t->begin; - auto size = slice_t->size; + auto axes = slice_node->get_axes(); + auto begin = GetSliceBeginAndSize(slice_cnode, SliceBeginIndex); + auto size = GetSliceBeginAndSize(slice_cnode, SliceSizeIndex); // matmul not support broadcast now, it makes things simpler auto manager = graph->manager(); std::shared_ptr<FuncGraphTransaction> tr = std::make_shared<FuncGraphTransaction>(manager.get()); @@ -915,12 +954,19 @@ bool SlicePreposePass::PreposeWithMatmul(const FuncGraphPtr &graph, const CNodeP left_size[i] = -1; } } - auto left_slice_vnode = CreateSliceValueNode(graph, left_axes, left_begin, left_size); + auto left_slice_vnode = CreateSliceValueNode(graph, left_axes); + auto begin_parameter = BuildIntVecParameterNode( + graph, left_begin, slice_cnode->fullname_with_scope() + "_begin_" + std::to_string(node_name_index)); + node_name_index += 1; + auto size_parameter = BuildIntVecParameterNode( + graph, left_size, slice_cnode->fullname_with_scope() + "_size_" + std::to_string(node_name_index)); + node_name_index += 1; if (left_slice_vnode == nullptr) { MS_LOG(ERROR) << "CreateSliceValueNode failed"; return false; } - auto new_slice_cnode = InsertSlice(graph, left_slice_vnode, matmul_cnode, 1, tr); + const std::vector<AnfNodePtr> inputs = {left_slice_vnode, matmul_cnode->input(1), begin_parameter, size_parameter}; + auto new_slice_cnode = InsertSlice(graph, inputs, matmul_cnode, 1, tr); new_slice_cnode->set_abstract(slice_cnode->abstract()->Clone()); ClearCNodeAbstractValue(new_slice_cnode); changed = true; @@ -935,12 +981,19 @@ bool SlicePreposePass::PreposeWithMatmul(const FuncGraphPtr &graph, const CNodeP right_size[i] = -1; } } - auto right_slice_vnode = CreateSliceValueNode(graph, right_axes, right_begin, right_size); + auto begin_parameter = BuildIntVecParameterNode( + graph, right_begin, slice_cnode->fullname_with_scope() + "_begin_" + std::to_string(node_name_index)); + node_name_index += 1; + auto size_parameter = BuildIntVecParameterNode( + graph, right_size, slice_cnode->fullname_with_scope() + "_size_" + std::to_string(node_name_index)); + node_name_index += 1; + auto right_slice_vnode = CreateSliceValueNode(graph, right_axes); if (right_slice_vnode == nullptr) { MS_LOG(ERROR) << "CreateSliceValueNode failed"; return false; } - auto new_slice_cnode = InsertSlice(graph, right_slice_vnode, matmul_cnode, 2, tr); + const std::vector<AnfNodePtr> inputs = {right_slice_vnode, matmul_cnode->input(2), begin_parameter, size_parameter}; + auto new_slice_cnode = InsertSlice(graph, inputs, matmul_cnode, 2, tr); new_slice_cnode->set_abstract(slice_cnode->abstract()->Clone()); ClearCNodeAbstractValue(new_slice_cnode); changed = true; @@ -972,19 +1025,19 @@ bool SlicePreposePass::PreposeWithFullConnection(const FuncGraphPtr &graph, cons MS_LOG(DEBUG) << "FullConnection can't be preposed if input shape is unknown or output shape is illegal"; return false; } - auto fc_t = GetFcT(fc_cnode); - if (fc_t == nullptr || fc_t->useAxis) { + auto fc_node = GetFc(fc_cnode); + if (fc_node == nullptr || (fc_node->GetAttr(ops::kUseAxis) != nullptr && fc_node->get_use_axis())) { MS_LOG(DEBUG) << "prepose with fc only support useAxis == false currently"; return false; } - auto slice_t = GetSliceT(slice_cnode); - if (slice_t == nullptr) { - MS_LOG(ERROR) << "slice_t is nullptr"; + auto slice_node = GetSlice(slice_cnode); + if (slice_node == nullptr) { + MS_LOG(ERROR) << "slice is nullptr"; return RET_ERROR; } - auto axes = slice_t->axes; - auto begin = slice_t->begin; - auto size = slice_t->size; + auto axes = slice_node->get_axes(); + auto begin = GetSliceBeginAndSize(slice_cnode, SliceBeginIndex); + auto size = GetSliceBeginAndSize(slice_cnode, SliceSizeIndex); for (size_t i = 0; i < axes.size(); ++i) { if (axes[i] == 1) { if (begin[i] != 0 || (size[i] != -1 && size[i] != shape_out[1])) { @@ -994,11 +1047,11 @@ bool SlicePreposePass::PreposeWithFullConnection(const FuncGraphPtr &graph, cons } } - std::vector<int> mapped_axe(shape_out.size(), -1); - int32_t inner_size_in = 1; + std::vector<int64_t> mapped_axe(shape_out.size(), -1); + int64_t inner_size_in = 1; for (size_t i = 0; i < shape_in.size(); ++i) { inner_size_in *= shape_in[i]; - int32_t inner_size_out = 1; + int64_t inner_size_out = 1; for (size_t j = 0; j < shape_out.size(); ++j) { inner_size_out *= shape_out[j]; if (shape_out[j] == shape_in[i] && inner_size_out == inner_size_in) { @@ -1012,13 +1065,13 @@ bool SlicePreposePass::PreposeWithFullConnection(const FuncGraphPtr &graph, cons return false; } - std::vector<int32_t> new_axes(shape_in.size()); + std::vector<int64_t> new_axes(shape_in.size()); std::iota(new_axes.begin(), new_axes.end(), 0); - std::vector<int32_t> new_begin(shape_in.size(), 0); - std::vector<int32_t> new_size(shape_in.size(), -1); + std::vector<int> new_begin(shape_in.size(), 0); + std::vector<int> new_size(shape_in.size(), -1); new_begin[mapped_axe[0]] = begin[0]; new_size[mapped_axe[0]] = size[0]; - auto new_slice_vnode = CreateSliceValueNode(graph, new_axes, new_begin, new_size); + auto new_slice_vnode = CreateSliceValueNode(graph, new_axes); if (new_slice_vnode == nullptr) { MS_LOG(ERROR) << "CreateSliceValueNode failed"; return false; @@ -1030,7 +1083,14 @@ bool SlicePreposePass::PreposeWithFullConnection(const FuncGraphPtr &graph, cons MS_LOG(ERROR) << "create FuncGraphTransaction failed"; return false; } - auto new_slice_cnode = InsertSlice(graph, new_slice_vnode, fc_cnode, 1, tr); + auto begin_parameter = BuildIntVecParameterNode( + graph, new_begin, slice_cnode->fullname_with_scope() + "_begin_" + std::to_string(node_name_index)); + node_name_index += 1; + auto size_parameter = BuildIntVecParameterNode( + graph, new_size, slice_cnode->fullname_with_scope() + "_size_" + std::to_string(node_name_index)); + node_name_index += 1; + const std::vector<AnfNodePtr> inputs = {new_slice_vnode, fc_cnode->input(1), begin_parameter, size_parameter}; + auto new_slice_cnode = InsertSlice(graph, inputs, fc_cnode, 1, tr); fc_cnode->set_abstract(slice_cnode->abstract()->Clone()); new_slice_cnode->set_abstract(slice_cnode->abstract()->Clone()); ClearCNodeAbstractValue(new_slice_cnode); @@ -1052,29 +1112,24 @@ bool SlicePreposePass::PreposeWithTranspose(const FuncGraphPtr &graph, const CNo MS_ASSERT(graph != nullptr); MS_ASSERT(slice_cnode != nullptr); MS_ASSERT(transpose_cnode != nullptr); - auto transpose_primc = GetValueNode<std::shared_ptr<PrimitiveC>>(transpose_cnode->input(0)); - if (transpose_primc == nullptr) { - MS_LOG(ERROR) << "transpose_primc is nullptr"; + if (transpose_cnode->inputs().size() != 3) { + MS_LOG(ERROR) << "transpose inputs size should be 3."; return false; } - auto transpose_primt = transpose_primc->primitiveT(); - if (transpose_primt == nullptr || transpose_primt->value.AsTranspose() == nullptr) { - MS_LOG(ERROR) << "transpose_primt is nullptr"; + auto perm = GetTransposePerm(transpose_cnode); + if (perm.empty()) { return false; } - auto transpose_attr = transpose_primt->value.AsTranspose(); - auto perm = transpose_attr->perm; - - auto slice_t = GetSliceT(slice_cnode); - if (slice_t == nullptr) { + auto slice_node = GetSlice(slice_cnode); + if (slice_node == nullptr) { MS_LOG(ERROR) << "GetSlicT failed"; return false; } - auto old_axes = slice_t->axes; - auto old_begin = slice_t->begin; - auto old_size = slice_t->size; - auto &slice_begin = slice_t->begin; - auto &slice_size = slice_t->size; + auto old_axes = slice_node->get_axes(); + auto old_begin = GetSliceBeginAndSize(slice_cnode, SliceBeginIndex); + auto old_size = GetSliceBeginAndSize(slice_cnode, SliceSizeIndex); + auto slice_begin = GetSliceBeginAndSize(slice_cnode, SliceBeginIndex); + auto slice_size = GetSliceBeginAndSize(slice_cnode, SliceSizeIndex); // perm is random shuffle of [0...n-1] according to ops/transpose.cc for (size_t i = 0; i < perm.size(); ++i) { if (perm[i] != static_cast<int>(i)) { @@ -1087,6 +1142,14 @@ bool SlicePreposePass::PreposeWithTranspose(const FuncGraphPtr &graph, const CNo } } } + auto begin_parameter = BuildIntVecParameterNode( + graph, slice_begin, slice_cnode->fullname_with_scope() + "_begin_" + std::to_string(node_name_index)); + node_name_index += 1; + auto size_parameter = BuildIntVecParameterNode( + graph, slice_size, slice_cnode->fullname_with_scope() + "_size_" + std::to_string(node_name_index)); + node_name_index += 1; + slice_cnode->set_input(SliceBeginIndex, begin_parameter); + slice_cnode->set_input(SliceSizeIndex, size_parameter); auto status = SwapSliceWithPreceed(graph, slice_cnode, transpose_cnode, 1); if (status != RET_OK) { return false; @@ -1113,7 +1176,7 @@ bool SlicePreposePass::PreposeWithArithmetic(const FuncGraphPtr &graph, const CN } bool changed = false; std::vector<AnfNodePtr> inputs; - std::vector<std::vector<int32_t>> shapes; + std::vector<std::vector<int64_t>> shapes; std::vector<bool> is_default_params; if (!GetArithmeticInputInfo(arithmetic_cnode, &inputs, &shapes, &is_default_params)) { return false; @@ -1137,7 +1200,10 @@ bool SlicePreposePass::PreposeWithArithmetic(const FuncGraphPtr &graph, const CN changed = false; break; } - auto new_slice_cnode = InsertSlice(graph, new_slice_vnode, arithmetic_cnode, i, tr); + std::vector<AnfNodePtr> slice_inputs = {new_slice_vnode, arithmetic_cnode->input(i), + slice_cnode->input(SliceBeginIndex), + slice_cnode->input(SliceSizeIndex)}; + auto new_slice_cnode = InsertSlice(graph, slice_inputs, arithmetic_cnode, i, tr); new_slice_cnode->set_abstract(slice_cnode->abstract()->Clone()); ClearCNodeAbstractValue(new_slice_cnode); changed = true; @@ -1148,9 +1214,9 @@ bool SlicePreposePass::PreposeWithArithmetic(const FuncGraphPtr &graph, const CN } } else { // shape not empty if (!another_shape.empty() || IsScalarNode(another_input)) { - std::vector<int32_t> new_axes; - std::vector<int32_t> new_begin; - std::vector<int32_t> new_size; + std::vector<int64_t> new_axes; + std::vector<int> new_begin; + std::vector<int> new_size; auto status = SliceParamDeBroadcast(slice_cnode, shape, &new_axes, &new_begin, &new_size); if (status == lite::RET_NO_CHANGE) { continue; @@ -1159,12 +1225,20 @@ bool SlicePreposePass::PreposeWithArithmetic(const FuncGraphPtr &graph, const CN changed = false; break; } - auto new_slice_vnode = CreateSliceValueNode(graph, new_axes, new_begin, new_size); + auto new_slice_vnode = CreateSliceValueNode(graph, new_axes); if (new_slice_vnode == nullptr) { changed = false; break; } - auto new_slice_cnode = InsertSlice(graph, new_slice_vnode, arithmetic_cnode, i, tr); + auto begin_parameter = BuildIntVecParameterNode( + graph, new_begin, slice_cnode->fullname_with_scope() + "_begin_" + std::to_string(node_name_index)); + node_name_index += 1; + auto size_parameter = BuildIntVecParameterNode( + graph, new_size, slice_cnode->fullname_with_scope() + "_size_" + std::to_string(node_name_index)); + node_name_index += 1; + std::vector<AnfNodePtr> slice_inputs = {new_slice_vnode, arithmetic_cnode->input(i), begin_parameter, + size_parameter}; + auto new_slice_cnode = InsertSlice(graph, slice_inputs, arithmetic_cnode, i, tr); new_slice_cnode->set_abstract(slice_cnode->abstract()->Clone()); ClearCNodeAbstractValue(new_slice_cnode); changed = true; @@ -1190,22 +1264,22 @@ bool SlicePreposePass::PreposeWithArithmetic(const FuncGraphPtr &graph, const CN */ bool SlicePreposePass::MergeSequentialSlice(const FuncGraphPtr &graph, const CNodePtr &slice1_cnode, const CNodePtr &slice2_cnode) { - if (slice2_cnode->inputs().size() != lite::kDoubleNum) { + if (slice2_cnode->inputs().size() != kArithmeticInputNum) { MS_LOG(INFO) << "Slice read attrs from input is not supported now"; return false; } - auto slice1_t = GetSliceT(slice1_cnode); // bottom node - auto slice2_t = GetSliceT(slice2_cnode); // top node - if (slice1_t == nullptr || slice2_t == nullptr) { - MS_LOG(ERROR) << "slice_t is null"; + auto slice1_node = GetSlice(slice1_cnode); // bottom node + auto slice2_node = GetSlice(slice2_cnode); // top node + if (slice1_node == nullptr || slice2_node == nullptr) { + MS_LOG(ERROR) << "slice is null"; return false; } - auto begin_slice1 = slice1_t->begin; - auto size_slice1 = slice1_t->size; - auto axes_slice1 = slice1_t->axes; - auto begin_slice2 = slice2_t->begin; - auto size_slice2 = slice2_t->size; - auto axes_slice2 = slice2_t->axes; + auto begin_slice1 = GetSliceBeginAndSize(slice1_cnode, SliceBeginIndex); + auto size_slice1 = GetSliceBeginAndSize(slice1_cnode, SliceSizeIndex); + auto axes_slice1 = slice1_node->get_axes(); + auto begin_slice2 = GetSliceBeginAndSize(slice2_cnode, SliceBeginIndex); + auto size_slice2 = GetSliceBeginAndSize(slice2_cnode, SliceSizeIndex); + auto axes_slice2 = slice2_node->get_axes(); auto status1 = VerifySliceAttrs(slice1_cnode); auto status2 = VerifySliceAttrs(slice2_cnode); if (status1 != RET_OK || status2 != RET_OK) { @@ -1214,12 +1288,12 @@ bool SlicePreposePass::MergeSequentialSlice(const FuncGraphPtr &graph, const CNo auto manager = graph->manager(); auto node_users = manager->node_users()[slice1_cnode]; - int axe_max1 = *std::max_element(axes_slice1.begin(), axes_slice1.end()); - int axe_max2 = *std::max_element(axes_slice2.begin(), axes_slice2.end()); - int axe_max = std::max(axe_max1, axe_max2); - auto &begin_new = slice2_t->begin; - auto &size_new = slice2_t->size; - auto &axes_new = slice2_t->axes; + int64_t axe_max1 = *std::max_element(axes_slice1.begin(), axes_slice1.end()); + int64_t axe_max2 = *std::max_element(axes_slice2.begin(), axes_slice2.end()); + int64_t axe_max = std::max(axe_max1, axe_max2); + auto begin_new = begin_slice2; + auto size_new = size_slice2; + auto axes_new = slice2_node->get_axes(); axes_new.resize(axe_max + 1); std::iota(axes_new.begin(), axes_new.end(), 0); begin_new.assign(axe_max + 1, 0); @@ -1248,6 +1322,15 @@ bool SlicePreposePass::MergeSequentialSlice(const FuncGraphPtr &graph, const CNo } } } + slice2_node->set_axes(axes_new); + auto begin_parameter = BuildIntVecParameterNode( + graph, begin_new, slice2_cnode->fullname_with_scope() + "_begin_" + std::to_string(node_name_index)); + node_name_index += 1; + auto size_parameter = BuildIntVecParameterNode( + graph, size_new, slice2_cnode->fullname_with_scope() + "_size_" + std::to_string(node_name_index)); + node_name_index += 1; + slice2_cnode->set_input(SliceBeginIndex, begin_parameter); + slice2_cnode->set_input(SliceSizeIndex, size_parameter); slice2_cnode->set_abstract(slice1_cnode->abstract()->Clone()); for (auto &node_user : node_users) { manager->SetEdge(node_user.first, node_user.second, slice2_cnode); @@ -1265,7 +1348,7 @@ bool SlicePreposePass::MergeParallelSlice(const FuncGraphPtr &graph, const NodeU MS_ASSERT(slices->size() >= 2); auto manager = graph->manager(); auto first_slice = utils::cast<CNodePtr>(slices->at(0).first); - if (first_slice == nullptr || GetCNodeType(first_slice) != schema::PrimitiveType_Slice) { + if (first_slice == nullptr || !CheckPrimitiveType(first_slice, prim::kPrimSliceFusion)) { MS_LOG(ERROR) << "first node is not Slice"; return false; } @@ -1281,7 +1364,7 @@ bool SlicePreposePass::MergeParallelSlice(const FuncGraphPtr &graph, const NodeU } for (size_t i = 1; i < slices->size(); ++i) { auto slice = utils::cast<CNodePtr>(slices->at(i).first); - if (slice == nullptr || GetCNodeType(slice) != schema::PrimitiveType_Slice) { + if (slice == nullptr || !CheckPrimitiveType(slice, prim::kPrimSliceFusion)) { MS_LOG(ERROR) << "current node is not Slice"; return false; } @@ -1304,34 +1387,22 @@ bool SlicePreposePass::DoPrepose(const FuncGraphPtr &graph, const CNodePtr &slic MS_ASSERT(graph != nullptr); MS_ASSERT(slice_cnode != nullptr); MS_ASSERT(preceed_cnode != nullptr); - auto preceed_node_type = GetCNodeType(preceed_cnode); - switch (preceed_node_type) { - case schema::PrimitiveType_SoftMax: { - return PreposeWithSoftmax(graph, slice_cnode, preceed_cnode); - } - case schema::PrimitiveType_Reshape: { - return PreposeWithReshape(graph, slice_cnode, preceed_cnode); - } - case schema::PrimitiveType_MatMul: { - return PreposeWithMatmul(graph, slice_cnode, preceed_cnode); - } - case schema::PrimitiveType_FullConnection: { - return PreposeWithFullConnection(graph, slice_cnode, preceed_cnode); - } - case schema::PrimitiveType_Transpose: { - return PreposeWithTranspose(graph, slice_cnode, preceed_cnode); - } - case schema::PrimitiveType_Sub: - case schema::PrimitiveType_Mul: - case schema::PrimitiveType_Add: { - return PreposeWithArithmetic(graph, slice_cnode, preceed_cnode); - } - case schema::PrimitiveType_Slice: { - return MergeSequentialSlice(graph, slice_cnode, preceed_cnode); - } - default: { - MS_LOG(DEBUG) << "Node type " << preceed_node_type << " currently not support SlicePrepose"; - } + if (CheckPrimitiveType(preceed_cnode, prim::kPrimSoftmax)) { + return PreposeWithSoftmax(graph, slice_cnode, preceed_cnode); + } else if (CheckPrimitiveType(preceed_cnode, prim::kPrimReshape)) { + return PreposeWithReshape(graph, slice_cnode, preceed_cnode); + } else if (CheckPrimitiveType(preceed_cnode, prim::kPrimMatMul)) { + return PreposeWithMatmul(graph, slice_cnode, preceed_cnode); + } else if (CheckPrimitiveType(preceed_cnode, prim::kPrimFullConnection)) { + return PreposeWithFullConnection(graph, slice_cnode, preceed_cnode); + } else if (CheckPrimitiveType(preceed_cnode, prim::kPrimTranspose)) { + return PreposeWithTranspose(graph, slice_cnode, preceed_cnode); + } else if (CheckPrimitiveType(preceed_cnode, prim::kPrimSubFusion) || + CheckPrimitiveType(preceed_cnode, prim::kPrimMulFusion) || + CheckPrimitiveType(preceed_cnode, prim::kPrimAddFusion)) { + return PreposeWithArithmetic(graph, slice_cnode, preceed_cnode); + } else if (CheckPrimitiveType(preceed_cnode, prim::kPrimSliceFusion)) { + return MergeSequentialSlice(graph, slice_cnode, preceed_cnode); } return false; } @@ -1350,22 +1421,22 @@ bool SlicePreposePass::Run(const FuncGraphPtr &graph) { if (node->func_graph() != graph) { continue; } - if (!utils::isa<CNodePtr>(node) || GetCNodeType(node) != schema::PrimitiveType_Slice) { + if (!utils::isa<CNodePtr>(node) || !CheckPrimitiveType(node, prim::kPrimSliceFusion)) { continue; } auto slice_cnode = node->cast<CNodePtr>(); - if (slice_cnode->inputs().size() != lite::kDoubleNum) { // only support params from attrs now - MS_LOG(INFO) << "SlicePrepose not support more than two inputs now"; + if (!CheckIsAllInputsParam(slice_cnode)) { // only support begin and size is const tensor. + MS_LOG(INFO) << "SlicePrepose not support input is variable now"; continue; } - auto primt = GetSliceT(slice_cnode); - if (primt == nullptr) { - MS_LOG(ERROR) << "primitive_t of slice is nullptr"; + auto slice_node = GetSlice(slice_cnode); + if (slice_node == nullptr) { + MS_LOG(ERROR) << "slice is nullptr"; continue; } auto preceed_node = slice_cnode->input(1); if (preceed_node == nullptr) { - MS_LOG(ERROR) << "preceed node is nullptr"; + MS_LOG(ERROR) << "proceed node is nullptr"; continue; } auto output_tensor_num = GetOutputTensorNum(preceed_node); diff --git a/mindspore/lite/tools/optimizer/graph/slice_prepose_pass.h b/mindspore/lite/tools/optimizer/graph/slice_prepose_pass.h index 52fa09d79a..4e336f3471 100644 --- a/mindspore/lite/tools/optimizer/graph/slice_prepose_pass.h +++ b/mindspore/lite/tools/optimizer/graph/slice_prepose_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,7 +24,6 @@ #include "backend/optimizer/common/pass.h" #include "include/errorcode.h" #include "mindspore/core/ir/manager.h" -#include "schema/inner/model_generated.h" using mindspore::lite::converter::FmkType; namespace mindspore::opt { @@ -44,40 +43,39 @@ class SlicePreposePass : public Pass { void ClearCNodeAbstractValue(const CNodePtr &cnode); STATUS SwapSliceWithPreceed(const FuncGraphPtr &graph, const CNodePtr &slice_cnode, const CNodePtr &preceed_cnode, const int index, const TransactionPtr &tr = nullptr); - ValueNodePtr CreateSliceValueNode(const FuncGraphPtr &graph, const std::vector<int32_t> &axes, - const std::vector<int32_t> &begin, const std::vector<int32_t> &size); + ValueNodePtr CreateSliceValueNode(const FuncGraphPtr &graph, const std::vector<int64_t> &axes); ValueNodePtr CopySliceValueNode(const FuncGraphPtr &graph, const CNodePtr &slice_cnode); - CNodePtr InsertSlice(const FuncGraphPtr &graph, const ValueNodePtr &slice_vnode, const CNodePtr &preceed_cnode, + CNodePtr InsertSlice(const FuncGraphPtr &graph, const std::vector<AnfNodePtr> &inputs, const CNodePtr &preceed_cnode, const int index, const TransactionPtr &tr); STATUS VerifySliceAttrs(const CNodePtr &slice_cnode, const int dim = -1); - STATUS SliceParamDeBroadcast(const CNodePtr &slice_cnode, const std::vector<int32_t> &ref_shape, - std::vector<int32_t> *axes, std::vector<int32_t> *begin, std::vector<int32_t> *size); + STATUS SliceParamDeBroadcast(const CNodePtr &slice_cnode, const std::vector<int64_t> &ref_shape, + std::vector<int64_t> *axes, std::vector<int> *begin, std::vector<int> *size); CNodePtr CreateReshapeCNode(const FuncGraphPtr &graph, const std::vector<int64_t> &shape, const AbstractBasePtr &abstract, const CNodePtr &preceed_cnode); bool SiblingsAreSameSlice(const FuncGraphPtr &graph, const NodeUsedListPtr &output_node_list, - const std::vector<int32_t> &ref_shape = {}); - int GetReshapeAbnormalAxeIn(const std::vector<int> &shape_in, const std::vector<int> &shape_out, - std::vector<int> *mapped_axe); - int GetReshapeAbnormalIndexOut(const CNodePtr &slice_cnode, const std::vector<int> &mapped_axe, - const std::vector<int> &shape_out, std::vector<int> *shape_out_copy, - bool *is_normal_mode, bool *support_abnormal_mode); + const std::vector<int64_t> &ref_shape = {}); + int64_t GetReshapeAbnormalAxeIn(const std::vector<int64_t> &shape_in, const std::vector<int64_t> &shape_out, + std::vector<int64_t> *mapped_axe); + int64_t GetReshapeAbnormalIndexOut(const CNodePtr &slice_cnode, const std::vector<int64_t> &mapped_axe, + const std::vector<int64_t> &shape_out, std::vector<int64_t> *shape_out_copy, + bool *is_normal_mode, bool *support_abnormal_mode); bool PreposeWithNormalReshape(const FuncGraphPtr &graph, const CNodePtr &slice_cnode, const CNodePtr &reshape_cnode, - const std::vector<int> &shape_in, const std::vector<int> &shape_out_copy, - const std::vector<int> &mapped_axe); + const std::vector<int64_t> &shape_in, const std::vector<int64_t> &shape_out_copy, + const std::vector<int64_t> &mapped_axe); CNodePtr CreateSlice1ForReshapePrepose(const FuncGraphPtr &graph, const CNodePtr &slice_cnode, - const CNodePtr &matmul_cnode, const std::vector<int> &shape_in, - const int abnormal_axe_in, const int count_sliced_axe_in, + const CNodePtr &matmul_cnode, const std::vector<int64_t> &shape_in, + const int64_t abnormal_axe_in, const int64_t count_sliced_axe_in, const bool slice_at_front); CNodePtr CreateSlice2ForReshapePrepose(const FuncGraphPtr &graph, const CNodePtr &slice_cnode, const CNodePtr &new_reshape1_cnode, const std::vector<int64_t> &new_shape1, - const int abnormal_axe_in, const int count_sliced_axe_in, - const int count_sliced2, const bool slice_at_front); + const int64_t abnormal_axe_in, const int64_t count_sliced_axe_in, + const int64_t count_sliced2, const bool slice_at_front); bool PreposeWithAbnormalReshape(const FuncGraphPtr &graph, const CNodePtr &slice_cnode, const CNodePtr &reshape_cnode, - const CNodePtr &matmul_cnode, const std::vector<int> &shape_in, - const std::vector<int> &shape_out, const int abnormal_axe_in, - const int abnormal_index_out); + const CNodePtr &matmul_cnode, const std::vector<int64_t> &shape_in, + const std::vector<int64_t> &shape_out, const int64_t abnormal_axe_in, + const int64_t abnormal_index_out); bool GetArithmeticInputInfo(const CNodePtr &arithmetic_cnode, std::vector<AnfNodePtr> *inputs, - std::vector<std::vector<int32_t>> *shapes, std::vector<bool> *is_default_params); + std::vector<std::vector<int64_t>> *shapes, std::vector<bool> *is_default_params); bool DoPrepose(const FuncGraphPtr &graph, const CNodePtr &slice_cnode, const CNodePtr &preceed_cnode); diff --git a/mindspore/lite/tools/optimizer/graph/tflite_inputs_adjust_pass.cc b/mindspore/lite/tools/optimizer/graph/tflite_inputs_adjust_pass.cc new file mode 100644 index 0000000000..c7d500f391 --- /dev/null +++ b/mindspore/lite/tools/optimizer/graph/tflite_inputs_adjust_pass.cc @@ -0,0 +1,190 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "tools/optimizer/graph/tflite_inputs_adjust_pass.h" +#include <vector> +#include <memory> +#include "ops/batch_to_space.h" +#include "ops/batch_to_space_nd.h" +#include "ops/fusion/arg_max_fusion.h" +#include "ops/fusion/arg_min_fusion.h" +#include "ops/fusion/pad_fusion.h" +#include "ops/fusion/reduce_fusion.h" +#include "ops/op_utils.h" +#include "ops/resize.h" +#include "ops/space_to_batch.h" +#include "ops/space_to_batch_nd.h" +#include "ops/space_to_depth.h" +#include "tools/converter/quant_param_holder.h" +#include "tools/converter/quantizer/quant_cast.h" + +namespace mindspore::opt { +namespace { +constexpr size_t split_inputs_size = 3; +const std::vector<std::string> single_input_ops = { + ops::kNameArgMaxFusion, ops::kNameArgMinFusion, ops::kNameBatchToSpace, ops::kNameBatchToSpaceND, + ops::kNameSpaceToBatch, ops::kNameSpaceToBatchND, ops::kNameSpaceToDepth}; + +bool CheckResize(const CNodePtr &cnode) { + if (!CheckPrimitiveType(cnode, prim::kPrimResize)) { + return false; + } + auto prim_resize = GetValueNode<std::shared_ptr<ops::Resize>>(cnode->input(0)); + if (prim_resize == nullptr || prim_resize->GetAttr(ops::kNewHeight) == nullptr || + prim_resize->GetAttr(ops::kNewWidth) == nullptr) { + return false; + } + int64_t new_height = prim_resize->get_new_height(); + int64_t new_width = prim_resize->get_new_width(); + return new_height != 0 && new_width != 0; +} + +lite::STATUS ReorderCnodeInputs(CNode *cnode, const std::vector<size_t> &perm) { + // add primitive first + std::vector<AnfNodePtr> new_inputs = {cnode->input(0)}; + auto primitive = GetValueNode<PrimitivePtr>(cnode->input(0)); + auto input_quant_params = primitive->GetAttr("quant_params"); + auto input_quant_params_holder = input_quant_params == nullptr + ? std::make_shared<lite::QuantParamHolder>() + : input_quant_params->cast<lite::QuantParamHolderPtr>(); + auto old_quant_params = input_quant_params_holder->input_quant_params(); + auto new_input_quant_holder = std::make_shared<lite::QuantParamHolder>(); + // add inputs as perm order + for (size_t idx : perm) { + if (idx > cnode->inputs().size() - 1) { + MS_LOG(ERROR) << "Idx " << idx << " is larger than inputs size: " << cnode->inputs().size() - 1; + return lite::RET_ERROR; + } + new_inputs.emplace_back(cnode->input(idx)); + auto quant_param = idx < old_quant_params.size() ? old_quant_params.at(idx) : std::vector<schema::QuantParamT>(); + new_input_quant_holder->AddInputQuantParam(quant_param); + } + cnode->set_inputs(new_inputs); + primitive->set_attr("quant_params", new_input_quant_holder); + return lite::RET_OK; +} +} // namespace + +STATUS TfliteInputsAdjustPass::ReplaceInt64ParameterNode(const FuncGraphPtr &func_graph, + const ParameterPtr &param_node) { + MS_ASSERT(func_graph != nullptr); + MS_ASSERT(param_node != nullptr); + if (param_node->abstract() == nullptr) { + MS_LOG(ERROR) << "parameter node abstract is invalid."; + return lite::RET_NULL_PTR; + } + auto abstract_tensor = param_node->abstract()->cast<abstract::AbstractTensorPtr>(); + if (abstract_tensor == nullptr) { + MS_LOG(ERROR) << "param node has no abstract tensor."; + return lite::RET_NULL_PTR; + } + if (abstract_tensor->element() == nullptr || abstract_tensor->element()->GetTypeTrack() == nullptr) { + MS_LOG(ERROR) << "get typePtr failed."; + return lite::RET_NULL_PTR; + } + if (abstract_tensor->element()->GetTypeTrack()->type_id() != kNumberTypeInt64) { + MS_LOG(DEBUG) << "don't need to convert to int32."; + return lite::RET_OK; + } + auto manager = func_graph->manager(); + MS_ASSERT(manager != nullptr); + if (param_node->has_default()) { + auto default_value = param_node->default_param(); + if (default_value == nullptr) { + MS_LOG(ERROR) << "default data is nullptr."; + return lite::RET_NULL_PTR; + } + auto param_value = default_value->cast<ParamValueLitePtr>(); + if (param_value == nullptr) { + MS_LOG(ERROR) << "default data is not paramvaluelite."; + return lite::RET_NULL_PTR; + } + auto param_node_new = BuildParameterNode(func_graph, param_node, param_value); + manager->Replace(param_node, param_node_new); + } else { + // set graph input + param_node->abstract()->set_type(TypeIdToType(kNumberTypeInt32)); + } + return lite::RET_OK; +} + +STATUS TfliteInputsAdjustPass::AdjustSlice(const AnfNodePtr &node, const FuncGraphPtr &graph) { + auto cnode = node->cast<CNodePtr>(); + if (cnode->inputs().size() < 4) { + MS_LOG(ERROR) << "Slice should own 3 inputs"; + return RET_ERROR; + } + + auto begin_param_node = cnode->input(2)->cast<ParameterPtr>(); + auto size_param_node = cnode->input(3)->cast<ParameterPtr>(); + if (ReplaceInt64ParameterNode(graph, begin_param_node) == RET_OK && + ReplaceInt64ParameterNode(graph, size_param_node) == RET_OK) { + return RET_OK; + } else { + MS_LOG(ERROR) << "Adjust inputs for Slice failed"; + return RET_ERROR; + } +} + +bool TfliteInputsAdjustPass::Run(const FuncGraphPtr &graph) { + auto node_list = TopoSort(graph->get_return()); + for (auto &node : node_list) { + if (!utils::isa<CNode>(node)) { + continue; + } + auto cnode = node->cast<CNodePtr>(); + if (CheckPrimitiveType(cnode, prim::kPrimFill)) { + // dims, value => value, dims + if (RET_OK != ReorderCnodeInputs(cnode.get(), {2, 1})) { + MS_LOG(ERROR) << "Reorder fill inputs failed"; + return false; + } + continue; + } + + if (CheckPrimitiveType(cnode, prim::kPrimConv2dTransposeFusion)) { + // output_shape, weights, input => input, weight + if (RET_OK != ReorderCnodeInputs(cnode.get(), {3, 2})) { + MS_LOG(ERROR) << "Reorder deconv inputs failed"; + return false; + } + continue; + } + + if (CheckPrimitiveType(cnode, prim::kPrimSplit) && cnode->inputs().size() == split_inputs_size) { + // axis, input, ??? => input, axis + if (RET_OK != ReorderCnodeInputs(cnode.get(), {2, 1})) { + MS_LOG(ERROR) << "Reorder split inputs failed"; + return false; + } + continue; + } + auto primitive = GetValueNode<PrimitivePtr>(cnode->input(0)); + if (lite::IsContain(single_input_ops, primitive->name()) || CheckResize(cnode)) { + if (ReorderCnodeInputs(cnode.get(), {1}) != lite::RET_OK) { + MS_LOG(ERROR) << "Reorder single input failed"; + return false; + } + } + if (CheckPrimitiveType(node, prim::kPrimSliceFusion)) { + if (AdjustSlice(node, graph) == RET_OK) { + continue; + } + return false; + } + } + return true; +} +} // namespace mindspore::opt diff --git a/mindspore/lite/tools/optimizer/graph/tflite_inputs_adjust_pass.h b/mindspore/lite/tools/optimizer/graph/tflite_inputs_adjust_pass.h new file mode 100644 index 0000000000..850953ccc0 --- /dev/null +++ b/mindspore/lite/tools/optimizer/graph/tflite_inputs_adjust_pass.h @@ -0,0 +1,37 @@ +/** + * Copyright 2020-2021 Huawei Technologies Co., Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef LITE_TFLITE_INPUTS_ADJUST_PASS_H +#define LITE_TFLITE_INPUTS_ADJUST_PASS_H + +#include <string> +#include "tools/converter/converter_flags.h" +#include "backend/optimizer/common/pass.h" +#include "src/param_value_lite.h" +#include "tools/optimizer/common/gllo_utils.h" + +namespace mindspore::opt { +class TfliteInputsAdjustPass : public Pass { + public: + TfliteInputsAdjustPass() : Pass("tflite_inputs_adjust_pass") {} + ~TfliteInputsAdjustPass() override = default; + + bool Run(const FuncGraphPtr &graph) override; + + STATUS ReplaceInt64ParameterNode(const FuncGraphPtr &func_graph, const ParameterPtr &param_node); + STATUS AdjustSlice(const AnfNodePtr &node, const FuncGraphPtr &func_graph); +}; +} // namespace mindspore::opt +#endif // LITE_TFLITE_INPUTS_ADJUST_PASS_H diff --git a/mindspore/lite/tools/optimizer/graph/tflite_inputs_order_exchange_pass.cc b/mindspore/lite/tools/optimizer/graph/tflite_inputs_order_exchange_pass.cc deleted file mode 100644 index abb8daaef9..0000000000 --- a/mindspore/lite/tools/optimizer/graph/tflite_inputs_order_exchange_pass.cc +++ /dev/null @@ -1,107 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "tools/optimizer/graph/tflite_inputs_order_exchange_pass.h" -#include <vector> -#include <memory> -#include "tools/optimizer/common/gllo_utils.h" -#include "schema/inner/model_generated.h" -#include "tools/converter/quantizer/quant_cast.h" -#include "src/common/utils.h" - -using mindspore::lite::PrimitiveC; -namespace mindspore::opt { -namespace { -constexpr size_t split_inputs_size = 3; -const std::vector<schema::PrimitiveType> single_input_ops = { - schema::PrimitiveType_Reduce, schema::PrimitiveType_ArgMin, schema::PrimitiveType_ArgMax, - schema::PrimitiveType_SpaceToBatch, schema::PrimitiveType_BatchToSpace, schema::PrimitiveType_SpaceToBatchND, - schema::PrimitiveType_BatchToSpaceND, schema::PrimitiveType_SpaceToDepth}; -} // namespace - -STATUS ReorderCnodeInputs(CNode *cnode, const std::vector<size_t> &perm) { - // add primitive first - std::vector<AnfNodePtr> new_inputs = {cnode->input(0)}; - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - auto old_quant_params = primitive_c->input_quant_params(); - std::vector<std::vector<schema::QuantParamT>> new_quant_params; - // add inputs as perm order - for (size_t idx : perm) { - if (idx > cnode->inputs().size() - 1) { - MS_LOG(ERROR) << "Idx " << idx << " is larger than inputs size: " << cnode->inputs().size() - 1; - return RET_ERROR; - } - new_inputs.emplace_back(cnode->input(idx)); - new_quant_params.emplace_back(old_quant_params.at(idx - 1)); - } - cnode->set_inputs(new_inputs); - primitive_c->set_input_quant_params(new_quant_params); - return RET_OK; -} - -bool TfliteInputsOrderExchangePass::Run(const FuncGraphPtr &graph) { - auto node_list = TopoSort(graph->get_return()); - for (auto &node : node_list) { - if (!utils::isa<CNode>(node)) { - continue; - } - auto cnode = node->cast<CNodePtr>(); - auto primitive_c = GetValueNode<std::shared_ptr<PrimitiveC>>(cnode->input(0)); - - if (opt::GetCNodeType(node) == schema::PrimitiveType_Fill) { - // dims, value => value, dims - if (RET_OK != ReorderCnodeInputs(cnode.get(), {2, 1})) { - MS_LOG(ERROR) << "Reorder fill inputs failed"; - return false; - } - continue; - } - - if (opt::GetCNodeType(node) == schema::PrimitiveType_DeConv2D) { - // output_shape, weights, input => input, weight - if (RET_OK != ReorderCnodeInputs(cnode.get(), {3, 2})) { - MS_LOG(ERROR) << "Reorder deconv inputs failed"; - return false; - } - continue; - } - - if (opt::GetCNodeType(node) == schema::PrimitiveType_Split && cnode->inputs().size() == split_inputs_size) { - // axis, input, ??? => input, axis - if (RET_OK != ReorderCnodeInputs(cnode.get(), {2, 1})) { - MS_LOG(ERROR) << "Reorder split inputs failed"; - return false; - } - continue; - } - - bool is_single_input_pad = opt::GetCNodeType(node) == schema::PrimitiveType_Pad && - primitive_c->primitiveT()->value.AsPad() != nullptr && - primitive_c->primitiveT()->value.AsPad()->paddingMode == schema::PaddingMode_CONSTANT; - bool is_single_input_resize = opt::GetCNodeType(node) == schema::PrimitiveType_Resize && - primitive_c->primitiveT()->value.AsResize() != nullptr && - primitive_c->primitiveT()->value.AsResize()->newHeight != 0 && - primitive_c->primitiveT()->value.AsResize()->newWidth != 0; - if (lite::IsContain(single_input_ops, opt::GetCNodeType(node)) || is_single_input_pad || is_single_input_resize) { - if (RET_OK != ReorderCnodeInputs(cnode.get(), {1})) { - MS_LOG(ERROR) << "Reorder single input failed"; - return false; - } - continue; - } - } - return true; -} -} // namespace mindspore::opt diff --git a/mindspore/lite/tools/optimizer/graph/tflite_inputs_order_exchange_pass.h b/mindspore/lite/tools/optimizer/graph/tflite_inputs_order_exchange_pass.h deleted file mode 100644 index 566cec6090..0000000000 --- a/mindspore/lite/tools/optimizer/graph/tflite_inputs_order_exchange_pass.h +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright 2019 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef LITE_TFLITE_INPUTS_ORDER_EXCHANGE_PASS_H -#define LITE_TFLITE_INPUTS_ORDER_EXCHANGE_PASS_H - -#include <string> -#include "schema/inner/model_generated.h" -#include "tools/converter/converter_flags.h" -#include "backend/optimizer/common/pass.h" -#include "src/param_value_lite.h" - -namespace mindspore::opt { -class TfliteInputsOrderExchangePass : public Pass { - public: - TfliteInputsOrderExchangePass() : Pass("tflite_inputs_order_exchange_pass") {} - ~TfliteInputsOrderExchangePass() override = default; - bool Run(const FuncGraphPtr &graph) override; -}; -} // namespace mindspore::opt -#endif // LITE_TFLITE_INPUTS_ORDER_EXCHANGE_PASS_H diff --git a/mindspore/lite/tools/optimizer/graph/unused_cast_node_remove_pass.cc b/mindspore/lite/tools/optimizer/graph/unused_cast_node_remove_pass.cc index 328ab146ed..cb88fb439a 100644 --- a/mindspore/lite/tools/optimizer/graph/unused_cast_node_remove_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/unused_cast_node_remove_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ #include "tools/optimizer/graph/unused_cast_node_remove_pass.h" #include "tools/optimizer/common/gllo_utils.h" #include "mindspore/lite/include/errorcode.h" -#include "src/ops/primitive_c.h" namespace mindspore::opt { +constexpr size_t kCastInputNum = 3; void RemoveUnusedCastOpPass::SetFmkType(FmkType type) { this->fmk_type = type; } bool RemoveUnusedCastOpPass::Run(const FuncGraphPtr &func_graph) { @@ -34,8 +34,7 @@ bool RemoveUnusedCastOpPass::Run(const FuncGraphPtr &func_graph) { if (!utils::isa<CNodePtr>(node)) { continue; } - auto type = opt::GetCNodeType(node); - if (type != schema::PrimitiveType_Cast) { + if (!CheckPrimitiveType(node, prim::kPrimCast)) { continue; } auto cast_cnode = node->cast<CNodePtr>(); @@ -54,7 +53,7 @@ bool RemoveUnusedCastOpPass::Run(const FuncGraphPtr &func_graph) { MS_ASSERT(input_type != nullptr); auto input_type_value = input_type->type_id(); - if (cast_cnode->inputs().size() != lite::kTripleNum || !utils::isa<ValueNodePtr>(cast_cnode->input(2))) { + if (cast_cnode->inputs().size() != kCastInputNum || !utils::isa<ValueNodePtr>(cast_cnode->input(2))) { MS_LOG(ERROR) << "Second input of cast should be a ValueNode"; return RET_ERROR; } diff --git a/mindspore/lite/tools/optimizer/graph/unused_cast_node_remove_pass.h b/mindspore/lite/tools/optimizer/graph/unused_cast_node_remove_pass.h index 4264e0d3d0..4536e0f06c 100644 --- a/mindspore/lite/tools/optimizer/graph/unused_cast_node_remove_pass.h +++ b/mindspore/lite/tools/optimizer/graph/unused_cast_node_remove_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/optimizer/graph/unused_transpose_node_remove_pass.cc b/mindspore/lite/tools/optimizer/graph/unused_transpose_node_remove_pass.cc index ffa5c40894..78b98c5b9a 100644 --- a/mindspore/lite/tools/optimizer/graph/unused_transpose_node_remove_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/unused_transpose_node_remove_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,16 +16,46 @@ #include "tools/optimizer/graph/unused_transpose_node_remove_pass.h" #include <vector> #include <memory> +#include "ops/transpose.h" #include "tools/optimizer/common/gllo_utils.h" -#include "mindspore/lite/include/errorcode.h" -#include "src/ops/primitive_c.h" +#include "include/errorcode.h" namespace mindspore::opt { static constexpr size_t kTransposeInput = 1; +constexpr size_t kTransposeInputNum = 3; const std::vector<int> kPermNCHW{0, 3, 1, 2}; const std::vector<int> kPermNHWC{0, 2, 3, 1}; void RemoveUnusedTransposeOpPass::SetFmkType(FmkType type) { this->fmk_type = type; } +std::vector<int> GetTransposePerm(const CNodePtr &node) { + MS_ASSERT(node != nullptr); + std::vector<int> perm; + if (!CheckPrimitiveType(node, prim::kPrimTranspose)) { + return perm; + } + if (node->inputs().size() != kTransposeInputNum) { + return perm; + } + auto perm_node = node->input(2); + if (!utils::isa<ParameterPtr>(perm_node)) { + return perm; + } + auto perm_param = perm_node->cast<ParameterPtr>(); + if (!perm_param->has_default() || perm_param->default_param() == nullptr) { + return perm; + } + auto perm_value = perm_param->default_param()->cast<ParamValueLitePtr>(); + if (perm_value == nullptr) { + return perm; + } + perm.resize(perm_value->tensor_shape()[0]); + if (memcpy_s(perm.data(), perm_value->tensor_size(), perm_value->tensor_addr(), perm_value->tensor_size()) != EOK) { + MS_LOG(ERROR) << "memcpy failed."; + return {}; + } + return perm; +} + bool RemoveUnusedTransposeOpPass::Run(const FuncGraphPtr &func_graph) { if (this->fmk_type != lite::converter::FmkType_ONNX) { MS_LOG(ERROR) << "The framework type of model should be onnx."; @@ -39,48 +69,26 @@ bool RemoveUnusedTransposeOpPass::Run(const FuncGraphPtr &func_graph) { if (!utils::isa<CNodePtr>(node)) { continue; } - auto type = opt::GetCNodeType(node); - if (type == schema::PrimitiveType_Transpose) { + if (CheckPrimitiveType(node, prim::kPrimTranspose)) { auto transpose_cnode = node->cast<CNodePtr>(); - auto typeInput = opt::GetCNodeType(transpose_cnode->input(kTransposeInput)); - if (typeInput != schema::PrimitiveType_Conv2D) { + if (!CheckPrimitiveType(transpose_cnode->input(kTransposeInput), prim::kPrimConv2DFusion)) { continue; } - auto primPtr = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(transpose_cnode->input(0)); - if (primPtr == nullptr) { - MS_LOG(ERROR) << "Transpose node of onnx need to removed which has not primitiveC"; - return RET_ERROR; - } - auto primT = primPtr->primitiveT(); - if (primT == nullptr) { - MS_LOG(ERROR) << "Transpose node of onnx need to removed which has not primitiveC"; - return RET_ERROR; + if (transpose_cnode->inputs().size() != kTransposeInputNum) { + MS_LOG(ERROR) << "transpose node need have 2 inputs."; + return false; } - MS_ASSERT(primT->value != nullptr); - MS_ASSERT(primT->value.AsTranspose() != nullptr); - std::vector<int32_t> perm = primT->value.AsTranspose()->perm; + auto perm = GetTransposePerm(transpose_cnode); if (perm == kPermNCHW) { manager->Replace(transpose_cnode, transpose_cnode->input(1)); } - } else if (type == schema::PrimitiveType_Conv2D) { + } else if (CheckPrimitiveType(node, prim::kPrimConv2DFusion)) { auto conv_node = node->cast<CNodePtr>(); - auto typeInput = opt::GetCNodeType(conv_node->input(kTransposeInput)); - if (typeInput != schema::PrimitiveType_Transpose) { + if (!CheckPrimitiveType(conv_node->input(kTransposeInput), prim::kPrimTranspose)) { continue; } auto transpose_cnode = conv_node->input(kTransposeInput)->cast<CNodePtr>(); - auto primPtr = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(transpose_cnode->input(0)); - if (primPtr == nullptr) { - MS_LOG(ERROR) << "Transpose node of onnx need to removed which has not primitiveC"; - return RET_ERROR; - } - auto primT = primPtr->primitiveT(); - if (primT == nullptr) { - MS_LOG(ERROR) << "Transpose node of onnx need to removed which has not primitiveT"; - return RET_ERROR; - } - MS_ASSERT(primT->value.AsTranspose() != nullptr); - std::vector<int32_t> perm = primT->value.AsTranspose()->perm; + auto perm = GetTransposePerm(transpose_cnode); if (perm == kPermNHWC) { manager->Replace(transpose_cnode, transpose_cnode->input(1)); } diff --git a/mindspore/lite/tools/optimizer/graph/unused_transpose_node_remove_pass.h b/mindspore/lite/tools/optimizer/graph/unused_transpose_node_remove_pass.h index 7353b78a70..9725ed4813 100644 --- a/mindspore/lite/tools/optimizer/graph/unused_transpose_node_remove_pass.h +++ b/mindspore/lite/tools/optimizer/graph/unused_transpose_node_remove_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/optimizer/graph/update_conv2d_param_pass.cc b/mindspore/lite/tools/optimizer/graph/update_conv2d_param_pass.cc index e5c4185189..54854b62eb 100644 --- a/mindspore/lite/tools/optimizer/graph/update_conv2d_param_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/update_conv2d_param_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,84 +15,99 @@ */ #include "tools/optimizer/graph/update_conv2d_param_pass.h" #include <memory> +#include <vector> +#include "ops/fusion/conv2d_fusion.h" #include "mindspore/lite/include/errorcode.h" -#include "src/ops/primitive_c.h" namespace mindspore::opt { +namespace { +constexpr int kAnfPopulaterInputNumTwo = 2; +} + +lite::STATUS UpdateConv2DParamPass::UpdateCommonConv2D(const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + if (fmk_type_ != lite::converter::FmkType_TF) { + return lite::RET_OK; + } + auto conv = GetValueNode<std::shared_ptr<ops::Conv2DFusion>>(cnode->input(0)); + if (conv == nullptr) { + MS_LOG(DEBUG) << "cnode is invalid."; + return lite::RET_ERROR; + } + if (conv->GetAttr(ops::kFormat) == nullptr || conv->get_format() != mindspore::NHWC) { + return lite::RET_OK; + } + auto weight_node = cnode->input(kAnfPopulaterInputNumTwo); + if (weight_node == nullptr) { + MS_LOG(DEBUG) << "Conv2D weight node is nullptr."; + return lite::RET_ERROR; + } + if (!weight_node->isa<Parameter>()) { + MS_LOG(DEBUG) << "Conv2D weight node is not parameter."; + return lite::RET_NO_CHANGE; + } + auto weight_param = weight_node->cast<ParameterPtr>(); + if (!weight_param->has_default()) { + MS_LOG(DEBUG) << "Conv2D weight node is not parameter."; + return lite::RET_NO_CHANGE; + } + auto default_param = weight_param->default_param(); + auto weight_tensor = std::dynamic_pointer_cast<ParamValueLite>(default_param); + auto weight_shape = weight_tensor->tensor_shape(); + std::vector<int64_t> kernel_size = {weight_shape[0], weight_shape[1]}; + conv->set_kernel_size(kernel_size); + conv->set_in_channel(weight_shape[2]); + conv->set_out_channel(weight_shape[3]); + return lite::RET_OK; +} + +lite::STATUS UpdateConv2DParamPass::UpdateDepthWiseConv2D(const CNodePtr &cnode) { + MS_ASSERT(cnode != nullptr); + auto conv = GetValueNode<std::shared_ptr<ops::Conv2DFusion>>(cnode->input(0)); + if (conv == nullptr) { + MS_LOG(ERROR) << "cnode is invalid."; + return lite::RET_ERROR; + } + int64_t channel_in = conv->GetAttr(ops::kInChannel) != nullptr ? conv->get_in_channel() : -1; + if (channel_in == -1) { + auto input_node = cnode->input(kAnfPopulaterInputNumTwo); + MS_ASSERT(input_node != nullptr); + if (input_node->isa<Parameter>()) { + auto param_node = input_node->cast<ParameterPtr>(); + auto param = param_node->default_param(); + auto weight = std::dynamic_pointer_cast<ParamValueLite>(param); + conv->set_in_channel(static_cast<int64_t>(weight->tensor_shape().at(0))); + } + } + return lite::RET_OK; +} + bool UpdateConv2DParamPass::Run(const FuncGraphPtr &func_graph) { MS_ASSERT(func_graph != nullptr); auto manager = func_graph->manager(); MS_ASSERT(manager != nullptr); auto node_list = TopoSort(func_graph->get_return()); - int status = RET_OK; + int status = lite::RET_OK; for (auto &node : node_list) { if (!utils::isa<CNodePtr>(node)) { continue; } - auto type = opt::GetCNodeType(node); - if (type == schema::PrimitiveType_DepthwiseConv2D) { - auto dwconv2d_cnode = node->cast<CNodePtr>(); - auto primitive_c = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(dwconv2d_cnode->input(0)); - if (primitive_c == nullptr) { - MS_LOG(ERROR) << "Depthwise conv2D node has no primitiveC."; - return RET_ERROR; - } - auto primT = primitive_c->primitiveT(); - if (primT == nullptr) { - MS_LOG(ERROR) << "Depthwise conv2D node has no primitiveT."; - return RET_ERROR; - } - int channel_in = primT->value.AsDepthwiseConv2D()->channelIn; - if (channel_in == -1) { - auto input_node = node->cast<CNodePtr>()->input(lite::kAnfPopulaterInputNumTwo); - MS_ASSERT(input_node != nullptr); - if (input_node->isa<Parameter>()) { - auto param_node = input_node->cast<ParameterPtr>(); - auto param = param_node->default_param(); - auto weight = std::dynamic_pointer_cast<ParamValueLite>(param); - primT->value.AsDepthwiseConv2D()->channelIn = weight->tensor_shape().at(0); - } - } - } else if (type == schema::PrimitiveType_Conv2D) { - auto conv2d_cnode = node->cast<CNodePtr>(); - auto primitive_c = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(conv2d_cnode->input(0)); - if (primitive_c == nullptr) { - MS_LOG(DEBUG) << "Conv2D node has no primitiveC."; - continue; - } - auto primT = primitive_c->primitiveT(); - if (primT == nullptr) { - MS_LOG(DEBUG) << "Conv2D node has no primitiveT."; - continue; - } - auto conv2d_primt = primT->value.AsConv2D(); - auto weight_node = conv2d_cnode->input(lite::kAnfPopulaterInputNumTwo); - if (weight_node == nullptr) { - MS_LOG(DEBUG) << "Conv2D weight node is nullptr."; - continue; - } - if (!weight_node->isa<Parameter>()) { - MS_LOG(DEBUG) << "Conv2D weight node is not parameter."; - continue; - } - auto weight_param = weight_node->cast<ParameterPtr>(); - if (!weight_param->has_default()) { - MS_LOG(DEBUG) << "Conv2D weight node is not parameter."; - continue; - } - auto default_param = weight_param->default_param(); - auto weight_tensor = std::dynamic_pointer_cast<ParamValueLite>(default_param); - auto weight_shape = weight_tensor->tensor_shape(); - if (fmk_type == lite::converter::FmkType_TF && conv2d_primt->format == schema::Format_NHWC) { - conv2d_primt->kernelH = weight_shape[0]; - conv2d_primt->kernelW = weight_shape[1]; - conv2d_primt->channelIn = weight_shape[2]; - conv2d_primt->channelOut = weight_shape[3]; - } + if (!CheckPrimitiveType(node, prim::kPrimConv2DFusion)) { + continue; + } + auto cnode = node->cast<CNodePtr>(); + auto conv = GetValueNode<std::shared_ptr<mindspore::ops::Conv2DFusion>>(cnode->input(0)); + if (conv == nullptr) { + MS_LOG(ERROR) << "Depthwise conv2D node has no primitiveC."; + return RET_ERROR; + } + if (conv->GetAttr(ops::kIsDepthWise) != nullptr && GetValue<bool>(conv->GetAttr(ops::kIsDepthWise))) { + status = UpdateDepthWiseConv2D(cnode); + } else { + status = UpdateCommonConv2D(cnode); } - if (status != lite::RET_OK && status != lite::RET_NO_CHANGE) { - MS_LOG(ERROR) << "remove identity pass is failed."; + MS_LOG(ERROR) << "update con2d failed."; return false; } } diff --git a/mindspore/lite/tools/optimizer/graph/update_conv2d_param_pass.h b/mindspore/lite/tools/optimizer/graph/update_conv2d_param_pass.h index 30894d9904..797424a1a3 100644 --- a/mindspore/lite/tools/optimizer/graph/update_conv2d_param_pass.h +++ b/mindspore/lite/tools/optimizer/graph/update_conv2d_param_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,11 +27,13 @@ class UpdateConv2DParamPass : public Pass { public: UpdateConv2DParamPass() : Pass("update_conv2d_param_pass") {} ~UpdateConv2DParamPass() override = default; + lite::STATUS UpdateCommonConv2D(const CNodePtr &cnode); + lite::STATUS UpdateDepthWiseConv2D(const CNodePtr &cnode); bool Run(const FuncGraphPtr &graph) override; - void SetFmkType(FmkType fmk_type) { this->fmk_type = fmk_type; } + void SetFmkType(FmkType fmk_type) { this->fmk_type_ = fmk_type; } private: - FmkType fmk_type = lite::converter::FmkType_ONNX; + FmkType fmk_type_ = lite::converter::FmkType_ONNX; }; } // namespace mindspore::opt #endif // MINDSPORE_LITE_SRC_PASS_UPDATE_CONV2D_PARAM_PASS_H_ diff --git a/mindspore/lite/tools/optimizer/graph/weight_format_hardcode_pass.cc b/mindspore/lite/tools/optimizer/graph/weight_format_hardcode_pass.cc index 79952ee49f..80f2137fd7 100644 --- a/mindspore/lite/tools/optimizer/graph/weight_format_hardcode_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/weight_format_hardcode_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,6 +15,8 @@ */ #include "tools/optimizer/graph/weight_format_hardcode_pass.h" #include <memory> +#include "ops/fusion/conv2d_fusion.h" +#include "ops/fusion/conv2d_backprop_input_fusion.h" #include "tools/optimizer/common/gllo_utils.h" using mindspore::lite::converter::FmkType_CAFFE; @@ -29,10 +31,11 @@ using mindspore::schema::QuantType_WeightQuant; namespace mindspore::opt { namespace { constexpr size_t kConvWeightIndex = 2; +const PrimitivePtr kPrimConv2DBackpropInputFusion = std::make_shared<Primitive>(ops::kNameConv2DBackpropInputFusion); } // namespace void WeightFormatHardCodePass::SetQuantType(QuantType type) { this->quant_type = type; } void WeightFormatHardCodePass::SetFmkType(FmkType type) { this->fmk_type = type; } -lite::STATUS WeightFormatHardCodePass::HardCodeCAFFE(const AnfNodePtr &conv_node, +lite::STATUS WeightFormatHardCodePass::HardCodeCAFFE(const CNodePtr &conv_node, const ParamValueLitePtr &param_value) const { MS_ASSERT(conv_cnode != nullptr); MS_ASSERT(param_value != nullptr); @@ -51,23 +54,30 @@ lite::STATUS WeightFormatHardCodePass::HardCodeCAFFE(const AnfNodePtr &conv_node return lite::RET_OK; } -lite::STATUS WeightFormatHardCodePass::HardCodeONNX(const AnfNodePtr &conv_node, +lite::STATUS WeightFormatHardCodePass::HardCodeONNX(const CNodePtr &conv_node, const ParamValueLitePtr &param_value) const { MS_ASSERT(conv_cnode != nullptr); MS_ASSERT(param_value != nullptr); - auto op_type = GetCNodeType(conv_node); + auto prim = GetValueNode<PrimitivePtr>(conv_node->input(0)); + if (prim == nullptr) { + MS_LOG(ERROR) << "Invalid anfnode, which don't have primitive."; + return lite::RET_ERROR; + } + bool is_depth_wise = prim->GetAttr(ops::kIsDepthWise) != nullptr && GetValue<bool>(prim->GetAttr(ops::kIsDepthWise)); + int64_t format = prim->GetAttr(ops::kFormat) != nullptr ? GetValue<int64_t>(prim->GetAttr(ops::kFormat)) : 0; switch (this->quant_type) { case QuantType_AwareTraining: { // sum up from current onnx quant models - if (op_type == schema::PrimitiveType_Conv2D) { - param_value->set_format(schema::Format::Format_KHWC); - } else if (op_type == schema::PrimitiveType_DepthwiseConv2D) { - param_value->set_format(schema::Format::Format_CHWK); - } else if (op_type == schema::PrimitiveType_DeConv2D) { + if (CheckPrimitiveType(conv_node, prim::kPrimConv2DFusion)) { + if (!is_depth_wise) { + param_value->set_format(schema::Format::Format_KHWC); + } else { + param_value->set_format(schema::Format::Format_CHWK); + } + } else if (CheckPrimitiveType(conv_node, prim::kPrimConv2dTransposeFusion) && !is_depth_wise) { param_value->set_format(schema::Format::Format_KCHW); } else { - MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(op_type) - << ", node: " << conv_node->fullname_with_scope(); + MS_LOG(ERROR) << "Unsupported op: " << conv_node->fullname_with_scope(); return lite::RET_ERROR; } } break; @@ -78,17 +88,13 @@ lite::STATUS WeightFormatHardCodePass::HardCodeONNX(const AnfNodePtr &conv_node, // depth (K x C/group x kH x kW) group = channelOut ==> (K, multiplier, H, W) // deconv (C x K/group x kH x kW) group = 1 // dedepth (C x K/group x kH x kW) group = channelIn ==> (C, multiplier, H, W) - if (op_type == schema::PrimitiveType_Conv2D || op_type == schema::PrimitiveType_DepthwiseConv2D || - op_type == schema::PrimitiveType_DeConv2D || op_type == schema::PrimitiveType_DeDepthwiseConv2D) { - if (param_value->format() == schema::Format::Format_NHWC) { + if (CheckPrimitiveType(conv_node, prim::kPrimConv2DFusion) || + CheckPrimitiveType(conv_node, prim::kPrimConv2dTransposeFusion)) { + if (format == schema::Format::Format_NHWC) { param_value->set_format(schema::Format::Format_KHWC); } else { param_value->set_format(schema::Format::Format_KCHW); } - } else { - MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(op_type) - << ", node: " << conv_node->fullname_with_scope(); - return lite::RET_ERROR; } } break; default: { @@ -100,46 +106,23 @@ lite::STATUS WeightFormatHardCodePass::HardCodeONNX(const AnfNodePtr &conv_node, return lite::RET_OK; } -lite::STATUS WeightFormatHardCodePass::HardCodeMS(const AnfNodePtr &conv_node, +lite::STATUS WeightFormatHardCodePass::HardCodeMS(const CNodePtr &conv_node, const ParamValueLitePtr &param_value) const { MS_ASSERT(conv_cnode != nullptr); MS_ASSERT(param_value != nullptr); - auto weight_node = conv_node->cast<CNodePtr>()->input(kConvWeightIndex); - auto op_type = GetCNodeType(conv_node); + auto prim = GetValueNode<PrimitivePtr>(conv_node->input(0)); + if (prim == nullptr) { + MS_LOG(ERROR) << "Invalid anfnode, which don't have primitive."; + return lite::RET_ERROR; + } + auto weight_node = conv_node->input(kConvWeightIndex); switch (this->quant_type) { - case QuantType_AwareTraining: { - if (op_type == schema::PrimitiveType_Conv2D) { - param_value->set_format(schema::Format::Format_KCHW); - } else if (op_type == schema::PrimitiveType_DepthwiseConv2D) { - param_value->set_format(schema::Format::Format_CKHW); - } else { - param_value->set_format(schema::Format::Format_KCHW); - } - } break; + case QuantType_AwareTraining: case QuantType_PostTraining: case QuantType_WeightQuant: case QuantType_QUANT_NONE: { // sum up from current ms quant models - if (op_type == schema::PrimitiveType_Conv2D) { - param_value->set_format(schema::Format::Format_KCHW); - } else if (op_type == schema::PrimitiveType_DepthwiseConv2D) { - // the format should be set to KCHW while the weight is output of constfolding . - if (weight_node->fullname_with_scope().find("constfold") == weight_node->fullname_with_scope().npos) { - param_value->set_format(schema::Format::Format_CKHW); - } - } else if (op_type == schema::PrimitiveType_DeDepthwiseConv2D) { - param_value->set_format(schema::Format::Format_CKHW); - } else if (op_type == schema::PrimitiveType_DeConv2D) { - param_value->set_format(schema::Format::Format_KCHW); - } else if (op_type == schema::PrimitiveType_Conv2DGradInput) { - param_value->set_format(schema::Format::Format_KCHW); - } else if (op_type == schema::PrimitiveType_GroupConv2DGradInput) { - param_value->set_format(schema::Format::Format_CKHW); - } else { - MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(op_type) - << ", node: " << conv_node->fullname_with_scope(); - return lite::RET_ERROR; - } + param_value->set_format(schema::Format::Format_KCHW); } break; default: { MS_LOG(ERROR) << "Unsupported quantType: " << EnumNameQuantType(quant_type) @@ -150,54 +133,60 @@ lite::STATUS WeightFormatHardCodePass::HardCodeMS(const AnfNodePtr &conv_node, return lite::RET_OK; } -lite::STATUS WeightFormatHardCodePass::HardCodeTFLITE(const AnfNodePtr &conv_node, +lite::STATUS WeightFormatHardCodePass::HardCodeTFLITE(const CNodePtr &conv_node, const ParamValueLitePtr &param_value) const { MS_ASSERT(conv_cnode != nullptr); MS_ASSERT(param_value != nullptr); - auto op_type = GetCNodeType(conv_node); + auto prim = GetValueNode<PrimitivePtr>(conv_node->input(0)); + if (prim == nullptr) { + MS_LOG(ERROR) << "Invalid anfnode, which don't have primitive."; + return lite::RET_ERROR; + } + bool is_depth_wise = prim->GetAttr(ops::kIsDepthWise) != nullptr && GetValue<bool>(prim->GetAttr(ops::kIsDepthWise)); switch (this->quant_type) { case QuantType_AwareTraining: case QuantType_PostTraining: case QuantType_WeightQuant: case QuantType_QUANT_NONE: { - if (op_type == schema::PrimitiveType_Conv2D) { - param_value->set_format(schema::Format::Format_KHWC); - } else if (op_type == schema::PrimitiveType_DepthwiseConv2D) { - param_value->set_format(schema::Format::Format_CHWK); - } else if (op_type == schema::PrimitiveType_DeConv2D) { + if (CheckPrimitiveType(conv_node, prim::kPrimConv2DFusion)) { + if (!is_depth_wise) { + param_value->set_format(schema::Format::Format_KHWC); + } else { + param_value->set_format(schema::Format::Format_CHWK); + } + } else if (CheckPrimitiveType(conv_node, prim::kPrimConv2dTransposeFusion) && !is_depth_wise) { param_value->set_format(schema::Format::Format_CHWK); - } else { - MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(op_type) - << ", node: " << conv_node->fullname_with_scope(); - return lite::RET_ERROR; } } break; default: { - MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(op_type) - << ", node: " << conv_node->fullname_with_scope(); + MS_LOG(ERROR) << "Unsupported op: " << conv_node->fullname_with_scope(); return lite::RET_ERROR; } } return lite::RET_OK; } -lite::STATUS WeightFormatHardCodePass::HardCodeTF(const AnfNodePtr &conv_node, +lite::STATUS WeightFormatHardCodePass::HardCodeTF(const CNodePtr &conv_node, const ParamValueLitePtr &param_value) const { MS_ASSERT(conv_cnode != nullptr); MS_ASSERT(param_value != nullptr); - auto op_type = GetCNodeType(conv_node); - - if (op_type == schema::PrimitiveType_Conv2D) { - param_value->set_format(schema::Format::Format_HWCK); - } else if (op_type == schema::PrimitiveType_DepthwiseConv2D) { - param_value->set_format(schema::Format::Format_HWKC); - } else if (op_type == schema::PrimitiveType_DeConv2D) { - param_value->set_format(schema::Format::Format_HWCK); - } else { - MS_LOG(ERROR) << "Unsupported opType: " << EnumNamePrimitiveType(op_type) - << ", node: " << conv_node->fullname_with_scope(); + auto prim = GetValueNode<PrimitivePtr>(conv_node->input(0)); + if (prim == nullptr) { + MS_LOG(ERROR) << "Invalid anfnode, which don't have primitive."; return lite::RET_ERROR; } + bool is_depth_wise = prim->GetAttr(ops::kIsDepthWise) != nullptr && GetValue<bool>(prim->GetAttr(ops::kIsDepthWise)); + if (CheckPrimitiveType(conv_node, prim::kPrimConv2DFusion)) { + { + if (!is_depth_wise) { + param_value->set_format(schema::Format::Format_HWCK); + } else { + param_value->set_format(schema::Format::Format_HWKC); + } + } + } else if (CheckPrimitiveType(conv_node, prim::kPrimConv2dTransposeFusion) && !is_depth_wise) { + param_value->set_format(schema::Format::Format_HWCK); + } return lite::RET_OK; } @@ -209,11 +198,9 @@ bool WeightFormatHardCodePass::Run(const FuncGraphPtr &graph) { continue; } auto conv_cnode = node->cast<CNodePtr>(); - auto type = opt::GetCNodeType(node); - if (type != schema::PrimitiveType_Conv2D && type != schema::PrimitiveType_DepthwiseConv2D && - ((type != schema::PrimitiveType_Conv2DGradInput) || (fmk_type != FmkType_MS)) && - ((type != schema::PrimitiveType_GroupConv2DGradInput) || (fmk_type != FmkType_MS)) && - type != schema::PrimitiveType_DeConv2D && type != schema::PrimitiveType_DeDepthwiseConv2D) { + if (!CheckPrimitiveType(node, prim::kPrimConv2DFusion) && + (!CheckPrimitiveType(node, kPrimConv2DBackpropInputFusion) || (fmk_type != FmkType_MS)) && + !CheckPrimitiveType(node, prim::kPrimConv2dTransposeFusion)) { continue; } MS_ASSERT(conv_cnode->inputs().size() > kConvWeightIndex); @@ -227,19 +214,19 @@ bool WeightFormatHardCodePass::Run(const FuncGraphPtr &graph) { lite::STATUS status; switch (fmk_type) { case FmkType_CAFFE: - status = HardCodeCAFFE(node, param_value); + status = HardCodeCAFFE(conv_cnode, param_value); break; case FmkType_TFLITE: - status = HardCodeTFLITE(node, param_value); + status = HardCodeTFLITE(conv_cnode, param_value); break; case FmkType_TF: - status = HardCodeTF(node, param_value); + status = HardCodeTF(conv_cnode, param_value); break; case FmkType_ONNX: - status = HardCodeONNX(node, param_value); + status = HardCodeONNX(conv_cnode, param_value); break; case FmkType_MS: - status = HardCodeMS(node, param_value); + status = HardCodeMS(conv_cnode, param_value); break; default: MS_LOG(ERROR) << "Unsupported fmkType: " << fmk_type << ", node: " << node->fullname_with_scope(); diff --git a/mindspore/lite/tools/optimizer/graph/weight_format_hardcode_pass.h b/mindspore/lite/tools/optimizer/graph/weight_format_hardcode_pass.h index a46f6ab0d1..a78b02371b 100644 --- a/mindspore/lite/tools/optimizer/graph/weight_format_hardcode_pass.h +++ b/mindspore/lite/tools/optimizer/graph/weight_format_hardcode_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -34,11 +34,11 @@ class WeightFormatHardCodePass : public Pass { bool Run(const FuncGraphPtr &graph) override; private: - lite::STATUS HardCodeCAFFE(const AnfNodePtr &node, const ParamValueLitePtr &param_value) const; - lite::STATUS HardCodeONNX(const AnfNodePtr &node, const ParamValueLitePtr &param_value) const; - lite::STATUS HardCodeMS(const AnfNodePtr &node, const ParamValueLitePtr &param_value) const; - lite::STATUS HardCodeTFLITE(const AnfNodePtr &node, const ParamValueLitePtr &param_value) const; - lite::STATUS HardCodeTF(const AnfNodePtr &conv_node, const ParamValueLitePtr &param_value) const; + lite::STATUS HardCodeCAFFE(const CNodePtr &node, const ParamValueLitePtr &param_value) const; + lite::STATUS HardCodeONNX(const CNodePtr &node, const ParamValueLitePtr &param_value) const; + lite::STATUS HardCodeMS(const CNodePtr &node, const ParamValueLitePtr &param_value) const; + lite::STATUS HardCodeTFLITE(const CNodePtr &node, const ParamValueLitePtr &param_value) const; + lite::STATUS HardCodeTF(const CNodePtr &conv_node, const ParamValueLitePtr &param_value) const; private: QuantType quant_type = schema::QuantType_QUANT_NONE; diff --git a/mindspore/lite/tools/optimizer/graph/weight_format_transform_pass.cc b/mindspore/lite/tools/optimizer/graph/weight_format_transform_pass.cc index 1b1733c101..7f7032db17 100644 --- a/mindspore/lite/tools/optimizer/graph/weight_format_transform_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/weight_format_transform_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +17,8 @@ #include <memory> #include <algorithm> #include <vector> +#include "ops/fusion/conv2d_backprop_input_fusion.h" +#include "ops/transpose.h" #include "tools/optimizer/common/gllo_utils.h" using mindspore::lite::converter::FmkType_CAFFE; @@ -30,11 +32,96 @@ using mindspore::schema::QuantType_WeightQuant; namespace mindspore::opt { namespace { +constexpr size_t kFirstInputIndex = 1; constexpr size_t kConvWeightIndex = 2; +const PrimitivePtr kPrimConv2DBackpropInputFusion = std::make_shared<Primitive>(ops::kNameConv2DBackpropInputFusion); +lite::STATUS GetTransposePerm(schema::Format src_format, schema::Format dst_format, std::vector<int> *perm) { + MS_ASSERT(perm != nullptr); + auto src_format_str = std::string(schema::EnumNameFormat(src_format)); + auto dst_format_str = std::string(schema::EnumNameFormat(dst_format)); + if (src_format_str.empty() || dst_format_str.empty() || src_format_str.size() != dst_format_str.size()) { + MS_LOG(ERROR) << "src_format or dst_format is error."; + return lite::RET_ERROR; + } + for (size_t i = 0; i < src_format_str.size(); ++i) { + auto pos = dst_format_str.find(src_format_str[i]); + if (pos == std::string::npos) { + MS_LOG(ERROR) << "src_format and dst_format don't match."; + return lite::RET_ERROR; + } + perm->push_back(static_cast<int>(pos)); + } + return lite::RET_OK; +} } // namespace + void WeightFormatTransformPass::SetQuantType(QuantType type) { this->quant_type = type; } void WeightFormatTransformPass::SetFmkType(FmkType type) { this->fmk_type = type; } void WeightFormatTransformPass::SetDstFormat(schema::Format format) { this->dst_format = format; } +lite::STATUS WeightFormatTransformPass::TransposeInsertForWeightSharing(const FuncGraphPtr &graph, + const ParameterPtr &weight_node, + std::vector<int> perm) { + MS_ASSERT(graph != nullptr); + MS_ASSERT(weight_node != nullptr); + auto node_list = TopoSort(graph->get_return()); + std::vector<CNodePtr> adjust_nodes; + for (auto &node : node_list) { + if (!utils::isa<CNode>(node)) { + continue; + } + if (CheckPrimitiveType(node, prim::kPrimConv2DFusion) || CheckPrimitiveType(node, kPrimConv2DBackpropInputFusion) || + CheckPrimitiveType(node, prim::kPrimConv2dTransposeFusion)) { + continue; + } + auto cnode = node->cast<CNodePtr>(); + auto inputs = cnode->inputs(); + if (std::any_of(inputs.begin(), inputs.end(), + [&weight_node](const AnfNodePtr &anf_node) { return weight_node == anf_node; })) { + adjust_nodes.push_back(cnode); + } + } + if (adjust_nodes.empty()) { + MS_LOG(DEBUG) << "do not need to adjust nodes."; + return lite::RET_OK; + } + auto perm_node = BuildIntVecParameterNode(graph, perm, weight_node->fullname_with_scope() + "_perm"); + auto prim = std::make_shared<ops::Transpose>(); + auto transpose_node = graph->NewCNode(prim, {weight_node, perm_node}); + auto type_ptr = TypeIdToType(kTypeUnknown); + std::vector<int64_t> shape_vector; + auto abstract = std::make_shared<abstract::AbstractTensor>(type_ptr, shape_vector); + transpose_node->set_abstract(abstract); + transpose_node->set_fullname_with_scope(weight_node->fullname_with_scope() + "_post"); + for (auto &adjust_node : adjust_nodes) { + auto inputs = adjust_node->inputs(); + std::replace_if( + inputs.begin(), inputs.end(), [&weight_node](const AnfNodePtr &anf_node) { return weight_node == anf_node; }, + transpose_node); + adjust_node->set_inputs(inputs); + } + return lite::RET_OK; +} + +lite::STATUS WeightFormatTransformPass::HandleWeightSharing(const FuncGraphPtr &graph, const ParameterPtr &weight_node, + schema::Format src_format, schema::Format dst_format) { + MS_ASSERT(graph != nullptr); + MS_ASSERT(weight_node != nullptr); + if (src_format == dst_format) { + return lite::RET_OK; + } + std::vector<int> perm; + auto status = GetTransposePerm(src_format, dst_format, &perm); + if (status != lite::RET_OK) { + MS_LOG(ERROR) << "get perm failed."; + return status; + } + status = TransposeInsertForWeightSharing(graph, weight_node, perm); + if (status != lite::RET_OK) { + MS_LOG(ERROR) << "transpose insert failed."; + } + return status; +} + lite::STATUS WeightFormatTransformPass::ConvWeightFormatTrans(const FuncGraphPtr &graph) { MS_ASSERT(graph != nullptr); auto node_list = TopoSort(graph->get_return()); @@ -42,10 +129,9 @@ lite::STATUS WeightFormatTransformPass::ConvWeightFormatTrans(const FuncGraphPtr if (!utils::isa<CNodePtr>(node)) { continue; } - auto type = opt::GetCNodeType(node); - if (type != schema::PrimitiveType_Conv2D && type != schema::PrimitiveType_DepthwiseConv2D && - type != schema::PrimitiveType_Conv2DGradInput && type != schema::PrimitiveType_GroupConv2DGradInput && - type != schema::PrimitiveType_DeConv2D && type != schema::PrimitiveType_DeDepthwiseConv2D) { + if (!CheckPrimitiveType(node, prim::kPrimConv2DFusion) && + !CheckPrimitiveType(node, kPrimConv2DBackpropInputFusion) && + !CheckPrimitiveType(node, prim::kPrimConv2dTransposeFusion)) { continue; } auto conv_cnode = node->cast<CNodePtr>(); @@ -60,6 +146,7 @@ lite::STATUS WeightFormatTransformPass::ConvWeightFormatTrans(const FuncGraphPtr MS_ASSERT(weight_value->tensor_type() == TypeId::kNumberTypeFloat32 || weight_value->tensor_type() == TypeId::kNumberTypeUInt8); lite::STATUS status; + schema::Format src_format = static_cast<schema::Format>(weight_value->format()); schema::Format weight_dst_format = schema::Format::Format_KHWC; if (dst_format != schema::Format::Format_NUM_OF_FORMAT) { weight_dst_format = dst_format; @@ -73,6 +160,11 @@ lite::STATUS WeightFormatTransformPass::ConvWeightFormatTrans(const FuncGraphPtr << "quant type:" << quant_type; return ERROR; } + status = HandleWeightSharing(graph, weight_node->cast<ParameterPtr>(), src_format, weight_dst_format); + if (status != lite::RET_OK) { + MS_LOG(ERROR) << "handle weight-sharing failed."; + return false; + } auto type_id = static_cast<TypeId>(weight_value->tensor_type()); auto type_ptr = TypeIdToType(type_id); auto shape = weight_value->tensor_shape(); diff --git a/mindspore/lite/tools/optimizer/graph/weight_format_transform_pass.h b/mindspore/lite/tools/optimizer/graph/weight_format_transform_pass.h index 78992f4ddd..83715ec30b 100644 --- a/mindspore/lite/tools/optimizer/graph/weight_format_transform_pass.h +++ b/mindspore/lite/tools/optimizer/graph/weight_format_transform_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ #ifndef MINDSPORE_LITE_SRC_PASS_FUSION_WEIGHT_FORMAT_TRANSFORM_PASS_H_ #define MINDSPORE_LITE_SRC_PASS_FUSION_WEIGHT_FORMAT_TRANSFORM_PASS_H_ #include <string> +#include <vector> #include "schema/inner/model_generated.h" #include "tools/converter/converter_flags.h" #include "backend/optimizer/common/pass.h" @@ -35,6 +36,10 @@ class WeightFormatTransformPass : public Pass { private: lite::STATUS ConvWeightFormatTrans(const FuncGraphPtr &graph); + lite::STATUS TransposeInsertForWeightSharing(const FuncGraphPtr &graph, const ParameterPtr &weight_node, + std::vector<int> perm); + lite::STATUS HandleWeightSharing(const FuncGraphPtr &graph, const ParameterPtr &weight_node, + schema::Format src_format, schema::Format dst_format); private: QuantType quant_type = schema::QuantType_QUANT_NONE; diff --git a/mindspore/lite/tools/optimizer/graph/while_pass.cc b/mindspore/lite/tools/optimizer/graph/while_pass.cc index 486f77f7bc..bd7f84dfd4 100644 --- a/mindspore/lite/tools/optimizer/graph/while_pass.cc +++ b/mindspore/lite/tools/optimizer/graph/while_pass.cc @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,36 +16,16 @@ #include "tools/optimizer/graph/while_pass.h" #include <vector> #include <memory> -#include <algorithm> -#include "mindspore/lite/include/errorcode.h" -#include "mindspore/lite/src/ops/primitive_c.h" -#include "tools/anf_importer/import_from_meta_graphT.h" +#include "ops/switch.h" +#include "include/errorcode.h" #include "tools/optimizer/common/gllo_utils.h" -#include "src/ops/primitive_c.h" -#include "schema/inner/model_generated.h" -#include "src/tensor.h" #include "src/common/log_adapter.h" -#include "src/ops/switch.h" -#include "src/ops/partial.h" namespace mindspore::opt { ValueNodePtr WhilePass::GetSwitchAnfPrim() { - auto switch_primitiveT = new (std::nothrow) schema::PrimitiveT; - if (switch_primitiveT == nullptr) { - MS_LOG(ERROR) << "new switch_primitiveT failed"; - return nullptr; - } - switch_primitiveT->value.type = schema::PrimitiveType_Switch; - switch_primitiveT->value.value = new (std::nothrow) schema::SwitchT; - if (switch_primitiveT->value.value == nullptr) { - MS_LOG(ERROR) << "new MakeTupleT failed"; - delete (switch_primitiveT); - return nullptr; - } - - auto partial_prim = std::make_shared<lite::Switch>(switch_primitiveT); - ValueNodePtr partial_anf_prim = NewValueNode(partial_prim); + auto switch_prim = std::make_shared<mindspore::ops::Switch>(); + ValueNodePtr partial_anf_prim = NewValueNode(switch_prim); return partial_anf_prim; } @@ -56,7 +36,7 @@ bool WhilePass::Run(const FuncGraphPtr &graph) { if (!utils::isa<CNodePtr>(node)) { continue; } - if (opt::GetCNodeType(node) != schema::PrimitiveType_While) { + if (!CheckPrimitiveType(node, prim::kPrimWhile)) { continue; } auto while_cnode = node->cast<CNodePtr>(); @@ -93,7 +73,7 @@ bool WhilePass::Run(const FuncGraphPtr &graph) { // concat body_fg output to cond_fg input auto body_output = body_fg->output(); auto body_output_cnode = utils::cast<CNodePtr>(body_output); - auto prim = GetValueNode<std::shared_ptr<lite::PrimitiveC>>(body_output_cnode->input(0)); + auto prim = GetValueNode<PrimitiveCPtr>(body_output_cnode->input(0)); if (prim == nullptr) { MS_LOG(ERROR) << "Get PrimitiveC of node:" << body_output_cnode->fullname_with_scope() << " failed."; return false; @@ -101,7 +81,7 @@ bool WhilePass::Run(const FuncGraphPtr &graph) { // concat body to cond std::vector<AnfNodePtr> body_to_cond_inputs{cond_vnode}; - if ((schema::PrimitiveType)(prim->Type()) == schema::PrimitiveType_MakeTuple) { + if (CheckPrimitiveType(body_output_cnode, kPrimMakeTuple)) { for (size_t i = 1; i < body_output_cnode->inputs().size(); ++i) { body_to_cond_inputs.emplace_back(body_output_cnode->input(i)); } diff --git a/mindspore/lite/tools/optimizer/graph/while_pass.h b/mindspore/lite/tools/optimizer/graph/while_pass.h index 21ce33d855..595bb41df3 100644 --- a/mindspore/lite/tools/optimizer/graph/while_pass.h +++ b/mindspore/lite/tools/optimizer/graph/while_pass.h @@ -1,5 +1,5 @@ /** - * Copyright 2020 Huawei Technologies Co., Ltd + * Copyright 2020-2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/mindspore/lite/tools/schema_gen/CMakeLists.txt b/mindspore/lite/tools/schema_gen/CMakeLists.txt index 5f0832705a..b1aa5a870c 100644 --- a/mindspore/lite/tools/schema_gen/CMakeLists.txt +++ b/mindspore/lite/tools/schema_gen/CMakeLists.txt @@ -6,11 +6,10 @@ set(COMMON_SRC ${CMAKE_CURRENT_SOURCE_DIR}/../../src/common/file_utils.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../src/common/utils.cc ${CMAKE_CURRENT_SOURCE_DIR}/../../src/common/log_adapter.cc + ${CMAKE_CURRENT_SOURCE_DIR}/../../src/ops/ops_def.cc ) add_executable(schema_gen ${CMAKE_CURRENT_SOURCE_DIR}/main.cc ${CMAKE_CURRENT_SOURCE_DIR}/schema_gen.cc - ${CMAKE_CURRENT_SOURCE_DIR}/schema_type_def.cc - ${CMAKE_CURRENT_SOURCE_DIR}/../../src/ops/ops_def.cc ${COMMON_SRC}) target_link_libraries(schema_gen mindspore-lite pthread) diff --git a/mindspore/lite/tools/schema_gen/schema_gen.cc b/mindspore/lite/tools/schema_gen/schema_gen.cc index 6469935638..21513865f7 100644 --- a/mindspore/lite/tools/schema_gen/schema_gen.cc +++ b/mindspore/lite/tools/schema_gen/schema_gen.cc @@ -47,13 +47,24 @@ int SchemaGen::Init() { MS_LOG(ERROR) << "Can not open file: " << path; return RET_ERROR; } - std::string ns = "namespace mindspore.schema;\n\n"; + std::string ns = + "/**\n * Copyright 2019-2021 Huawei Technologies Co., Ltd\n *\n" + " * Licensed under the Apache License, Version 2.0 (the \"License\");\n" + " * you may not use this file except in compliance with the License.\n" + " * You may obtain a copy of the License at\n" + " *\n" + " * http://www.apache.org/licenses/LICENSE-2.0\n" + " *\n" + " * Unless required by applicable law or agreed to in writing, software\n" + " * distributed under the License is distributed on an \"AS IS\" BASIS,\n" + " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" + " * See the License for the specific language governing permissions and\n" + " * limitations under the License.\n" + " */\n" + "include \"ops_types.fbs\";\n\nnamespace mindspore.schema;\n\n"; output.write(ns.c_str(), ns.length()); - for (auto &&func : instance->GetAllTypeDefCreateFuncs()) { - std::string &&str = func(); - output.write(str.c_str(), str.length()); - } - + std::string prim_type = instance->GetPrimTypeGenFunc()(); + output.write(prim_type.c_str(), prim_type.length()); for (auto &&func : instance->GetAllOpDefCreateFuncs()) { std::string &&str = func(); output.write(str.c_str(), str.length()); diff --git a/mindspore/lite/tools/schema_gen/schema_type_def.cc b/mindspore/lite/tools/schema_gen/schema_type_def.cc deleted file mode 100644 index 2c9007693e..0000000000 --- a/mindspore/lite/tools/schema_gen/schema_type_def.cc +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#include "tools/schema_gen/schema_type_def.h" - -SCHEMA_ENUM_DEF(ResizeMethod, byte) -SCHEMA_ENUM_ATTR_WITH_VALUE(UNKNOW, -1) -SCHEMA_ENUM_ATTR_WITH_VALUE(BILINEAR, 0) -SCHEMA_ENUM_ATTR_WITH_VALUE(NEAREST_NEIGHBOR, 1) -OP_SCHEMA_DEF_END(ResizeMethod) - -SCHEMA_ENUM_DEF(Format, int) -SCHEMA_ENUM_ATTR_WITH_VALUE(NCHW, 0) -SCHEMA_ENUM_ATTR(NHWC) -SCHEMA_ENUM_ATTR(NHWC4) -SCHEMA_ENUM_ATTR(HWKC) -SCHEMA_ENUM_ATTR(HWCK) -SCHEMA_ENUM_ATTR(KCHW) -SCHEMA_ENUM_ATTR(CKHW) -SCHEMA_ENUM_ATTR(KHWC) -SCHEMA_ENUM_ATTR(CHWK) -SCHEMA_ENUM_ATTR(HW) -SCHEMA_ENUM_ATTR(HW4) -SCHEMA_ENUM_ATTR(NC) -SCHEMA_ENUM_ATTR(NC4) -SCHEMA_ENUM_ATTR_WITH_VALUE(NC4HW4, 100) -SCHEMA_ENUM_ATTR(NUM_OF_FORMAT) -OP_SCHEMA_DEF_END(Format) - -SCHEMA_ENUM_DEF(ActivationType, byte) -SCHEMA_ENUM_ATTR_WITH_VALUE(NO_ACTIVATION, 0) -SCHEMA_ENUM_ATTR_WITH_VALUE(RELU, 1) -SCHEMA_ENUM_ATTR_WITH_VALUE(SIGMOID, 2) -SCHEMA_ENUM_ATTR_WITH_VALUE(RELU6, 3) -SCHEMA_ENUM_ATTR_WITH_VALUE(ELU, 4) -SCHEMA_ENUM_ATTR_WITH_VALUE(LEAKY_RELU, 5) -SCHEMA_ENUM_ATTR_WITH_VALUE(ABS, 6) -SCHEMA_ENUM_ATTR_WITH_VALUE(RELU1, 7) -SCHEMA_ENUM_ATTR_WITH_VALUE(SOFTSIGN, 8) -SCHEMA_ENUM_ATTR_WITH_VALUE(SOFTPLUS, 9) -SCHEMA_ENUM_ATTR_WITH_VALUE(TANH, 10) -SCHEMA_ENUM_ATTR_WITH_VALUE(SELU, 11) -SCHEMA_ENUM_ATTR_WITH_VALUE(HSWISH, 12) -SCHEMA_ENUM_ATTR_WITH_VALUE(HSIGMOID, 13) -SCHEMA_ENUM_ATTR_WITH_VALUE(THRESHOLDRELU, 14) -SCHEMA_ENUM_ATTR_WITH_VALUE(LINEAR, 15) -SCHEMA_ENUM_ATTR_WITH_VALUE(HARD_TANH, 16) -SCHEMA_ENUM_ATTR_WITH_VALUE(SIGN, 17) -SCHEMA_ENUM_ATTR_WITH_VALUE(UNKNOW, 18) -OP_SCHEMA_DEF_END(ActivationType) diff --git a/mindspore/lite/tools/schema_gen/schema_type_def.h b/mindspore/lite/tools/schema_gen/schema_type_def.h deleted file mode 100644 index a8a26138b1..0000000000 --- a/mindspore/lite/tools/schema_gen/schema_type_def.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_TOOLS_SCHEMA_GEN_SCHEMA_TYPE_DEF_H_ -#define MINDSPORE_LITE_TOOLS_SCHEMA_GEN_SCHEMA_TYPE_DEF_H_ - -#include <string> -#include "tools/schema_gen/schema_type_register.h" - -#define SCHEMA_ENUM_DEF(T, B) \ - namespace mindspore::lite::ops { \ - std::string GenEnumDef##T() { \ - std::string def = "enum "; \ - def.append(#T); \ - def.append(" : "); \ - def.append(#B); \ - def.append(" {\n"); - -#define SCHEMA_ENUM_ATTR_WITH_VALUE(key, value) def.append(#key).append(" = ").append(#value).append(",\n"); - -#define SCHEMA_ENUM_ATTR(key) def.append(#key).append(",\n"); - -#define OP_SCHEMA_DEF_END(T) \ - def.append("}\n\n"); \ - return def; \ - } \ - SchemaTypeRegister g_schema_enum_##T(GenEnumDef##T); \ - } // namespace mindspore::lite::ops - -#endif // MINDSPORE_LITE_TOOLS_SCHEMA_GEN_SCHEMA_TYPE_DEF_H_ diff --git a/mindspore/lite/tools/schema_gen/schema_type_register.h b/mindspore/lite/tools/schema_gen/schema_type_register.h deleted file mode 100644 index d9c69e95d7..0000000000 --- a/mindspore/lite/tools/schema_gen/schema_type_register.h +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright 2020 Huawei Technologies Co., Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -#ifndef MINDSPORE_LITE_TOOLS_SCHEMA_GEN_SCHEMA_TYPE_REGISTER_H_ -#define MINDSPORE_LITE_TOOLS_SCHEMA_GEN_SCHEMA_TYPE_REGISTER_H_ -#include <utility> - -#include "src/ops/schema_register.h" - -namespace mindspore::lite::ops { -class SchemaTypeRegister { - public: - explicit SchemaTypeRegister(GetSchemaDef func) { SchemaRegisterImpl::Instance()->TypePush(std::move(func)); } - ~SchemaTypeRegister() = default; -}; -} // namespace mindspore::lite::ops - -#endif // MINDSPORE_LITE_TOOLS_SCHEMA_GEN_SCHEMA_TYPE_REGISTER_H_